text
stringlengths 5
1.04M
|
|---|
/***
*
* Copyright (c) 1996-2001, Valve LLC. All rights reserved.
*
* This product contains software technology licensed from Id
* Software, Inc. ("Id Technology"). Id Technology (c) 1996 Id Software, Inc.
* All Rights Reserved.
*
* Use, distribution, and modification of this source code and/or resulting
* object code is restricted to non-commercial enhancements to products from
* Valve LLC. All other use, distribution, or modification is prohibited
* without written permission from Valve LLC.
*
****/
/*
===== buttons.cpp ========================================================
button-related code
*/
#include "extdll.h"
#include "util.h"
#include "cbase.h"
#include "saverestore.h"
#include "doors.h"
#if !defined ( _WIN32 )
#include <string.h> // memset())))
#endif
#define SF_BUTTON_DONTMOVE 1
#define SF_ROTBUTTON_NOTSOLID 1
#define SF_BUTTON_TOGGLE 32 // button stays pushed until reactivated
#define SF_BUTTON_SPARK_IF_OFF 64 // button sparks in OFF state
#define SF_BUTTON_TOUCH_ONLY 256 // button only fires as a result of USE key.
#define SF_GLOBAL_SET 1 // Set global state to initial state on spawn
class CEnvGlobal : public CPointEntity
{
public:
void Spawn( void );
void KeyValue( KeyValueData *pkvd );
void Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value );
virtual int Save( CSave &save );
virtual int Restore( CRestore &restore );
static TYPEDESCRIPTION m_SaveData[];
string_t m_globalstate;
int m_triggermode;
int m_initialstate;
};
TYPEDESCRIPTION CEnvGlobal::m_SaveData[] =
{
DEFINE_FIELD( CEnvGlobal, m_globalstate, FIELD_STRING ),
DEFINE_FIELD( CEnvGlobal, m_triggermode, FIELD_INTEGER ),
DEFINE_FIELD( CEnvGlobal, m_initialstate, FIELD_INTEGER ),
};
IMPLEMENT_SAVERESTORE( CEnvGlobal, CBaseEntity );
LINK_ENTITY_TO_CLASS( env_global, CEnvGlobal );
void CEnvGlobal::KeyValue( KeyValueData *pkvd )
{
pkvd->fHandled = TRUE;
if ( FStrEq(pkvd->szKeyName, "globalstate") ) // State name
m_globalstate = ALLOC_STRING( pkvd->szValue );
else if ( FStrEq(pkvd->szKeyName, "triggermode") )
m_triggermode = atoi( pkvd->szValue );
else if ( FStrEq(pkvd->szKeyName, "initialstate") )
m_initialstate = atoi( pkvd->szValue );
else
CPointEntity::KeyValue( pkvd );
}
void CEnvGlobal::Spawn( void )
{
if ( !m_globalstate )
{
REMOVE_ENTITY( ENT(pev) );
return;
}
if ( FBitSet( pev->spawnflags, SF_GLOBAL_SET ) )
{
if ( !gGlobalState.EntityInTable( m_globalstate ) )
gGlobalState.EntityAdd( m_globalstate, gpGlobals->mapname, (GLOBALESTATE)m_initialstate );
}
}
void CEnvGlobal::Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
GLOBALESTATE oldState = gGlobalState.EntityGetState( m_globalstate );
GLOBALESTATE newState;
switch( m_triggermode )
{
case 0:
newState = GLOBAL_OFF;
break;
case 1:
newState = GLOBAL_ON;
break;
case 2:
newState = GLOBAL_DEAD;
break;
default:
case 3:
if ( oldState == GLOBAL_ON )
newState = GLOBAL_OFF;
else if ( oldState == GLOBAL_OFF )
newState = GLOBAL_ON;
else
newState = oldState;
}
if ( gGlobalState.EntityInTable( m_globalstate ) )
gGlobalState.EntitySetState( m_globalstate, newState );
else
gGlobalState.EntityAdd( m_globalstate, gpGlobals->mapname, newState );
}
TYPEDESCRIPTION CMultiSource::m_SaveData[] =
{
//!!!BUGBUG FIX
DEFINE_ARRAY( CMultiSource, m_rgEntities, FIELD_EHANDLE, MS_MAX_TARGETS ),
DEFINE_ARRAY( CMultiSource, m_rgTriggered, FIELD_INTEGER, MS_MAX_TARGETS ),
DEFINE_FIELD( CMultiSource, m_iTotal, FIELD_INTEGER ),
DEFINE_FIELD( CMultiSource, m_globalstate, FIELD_STRING ),
};
IMPLEMENT_SAVERESTORE( CMultiSource, CBaseEntity );
LINK_ENTITY_TO_CLASS( multisource, CMultiSource );
//
// Cache user-entity-field values until spawn is called.
//
void CMultiSource::KeyValue( KeyValueData *pkvd )
{
if ( FStrEq(pkvd->szKeyName, "style") ||
FStrEq(pkvd->szKeyName, "height") ||
FStrEq(pkvd->szKeyName, "killtarget") ||
FStrEq(pkvd->szKeyName, "value1") ||
FStrEq(pkvd->szKeyName, "value2") ||
FStrEq(pkvd->szKeyName, "value3"))
pkvd->fHandled = TRUE;
else if ( FStrEq(pkvd->szKeyName, "globalstate") )
{
m_globalstate = ALLOC_STRING( pkvd->szValue );
pkvd->fHandled = TRUE;
}
else
CPointEntity::KeyValue( pkvd );
}
#define SF_MULTI_INIT 1
void CMultiSource::Spawn()
{
// set up think for later registration
pev->solid = SOLID_NOT;
pev->movetype = MOVETYPE_NONE;
pev->nextthink = gpGlobals->time + 0.1;
pev->spawnflags |= SF_MULTI_INIT; // Until it's initialized
SetThink(&CMultiSource::Register);
}
void CMultiSource::Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
int i = 0;
// Find the entity in our list
while (i < m_iTotal)
if ( m_rgEntities[i++] == pCaller )
break;
// if we didn't find it, report error and leave
if (i > m_iTotal)
{
ALERT(at_console, "MultiSrc:Used by non member %s.\n", STRING(pCaller->pev->classname));
return;
}
// CONSIDER: a Use input to the multisource always toggles. Could check useType for ON/OFF/TOGGLE
m_rgTriggered[i-1] ^= 1;
//
if ( IsTriggered( pActivator ) )
{
ALERT( at_aiconsole, "Multisource %s enabled (%d inputs)\n", STRING(pev->targetname), m_iTotal );
USE_TYPE useType = USE_TOGGLE;
if ( m_globalstate )
useType = USE_ON;
SUB_UseTargets( NULL, useType, 0 );
}
}
BOOL CMultiSource::IsTriggered( CBaseEntity * )
{
// Is everything triggered?
int i = 0;
// Still initializing?
if ( pev->spawnflags & SF_MULTI_INIT )
return 0;
while (i < m_iTotal)
{
if (m_rgTriggered[i] == 0)
break;
i++;
}
if (i == m_iTotal)
{
if ( !m_globalstate || gGlobalState.EntityGetState( m_globalstate ) == GLOBAL_ON )
return 1;
}
return 0;
}
void CMultiSource::Register(void)
{
edict_t *pentTarget = NULL;
m_iTotal = 0;
memset( m_rgEntities, 0, MS_MAX_TARGETS * sizeof(EHANDLE) );
SetThink(&CMultiSource::SUB_DoNothing);
// search for all entities which target this multisource (pev->targetname)
pentTarget = FIND_ENTITY_BY_STRING(NULL, "target", STRING(pev->targetname));
while (!FNullEnt(pentTarget) && (m_iTotal < MS_MAX_TARGETS))
{
CBaseEntity *pTarget = CBaseEntity::Instance(pentTarget);
if ( pTarget )
m_rgEntities[m_iTotal++] = pTarget;
pentTarget = FIND_ENTITY_BY_STRING( pentTarget, "target", STRING(pev->targetname));
}
pentTarget = FIND_ENTITY_BY_STRING(NULL, "classname", "multi_manager");
while (!FNullEnt(pentTarget) && (m_iTotal < MS_MAX_TARGETS))
{
CBaseEntity *pTarget = CBaseEntity::Instance(pentTarget);
if ( pTarget && pTarget->HasTarget(pev->targetname) )
m_rgEntities[m_iTotal++] = pTarget;
pentTarget = FIND_ENTITY_BY_STRING( pentTarget, "classname", "multi_manager" );
}
pev->spawnflags &= ~SF_MULTI_INIT;
}
// CBaseButton
TYPEDESCRIPTION CBaseButton::m_SaveData[] =
{
DEFINE_FIELD( CBaseButton, m_fStayPushed, FIELD_BOOLEAN ),
DEFINE_FIELD( CBaseButton, m_fRotating, FIELD_BOOLEAN ),
DEFINE_FIELD( CBaseButton, m_sounds, FIELD_INTEGER ),
DEFINE_FIELD( CBaseButton, m_bLockedSound, FIELD_CHARACTER ),
DEFINE_FIELD( CBaseButton, m_bLockedSentence, FIELD_CHARACTER ),
DEFINE_FIELD( CBaseButton, m_bUnlockedSound, FIELD_CHARACTER ),
DEFINE_FIELD( CBaseButton, m_bUnlockedSentence, FIELD_CHARACTER ),
DEFINE_FIELD( CBaseButton, m_strChangeTarget, FIELD_STRING ),
// DEFINE_FIELD( CBaseButton, m_ls, FIELD_??? ), // This is restored in Precache()
};
IMPLEMENT_SAVERESTORE( CBaseButton, CBaseToggle );
void CBaseButton::Precache( void )
{
const char *pszSound;
if ( FBitSet ( pev->spawnflags, SF_BUTTON_SPARK_IF_OFF ) )// this button should spark in OFF state
{
PRECACHE_SOUND ("buttons/spark1.wav");
PRECACHE_SOUND ("buttons/spark2.wav");
PRECACHE_SOUND ("buttons/spark3.wav");
PRECACHE_SOUND ("buttons/spark4.wav");
PRECACHE_SOUND ("buttons/spark5.wav");
PRECACHE_SOUND ("buttons/spark6.wav");
}
// get door button sounds, for doors which require buttons to open
if (m_bLockedSound)
{
pszSound = ButtonSound( (int)m_bLockedSound );
PRECACHE_SOUND(pszSound);
m_ls.sLockedSound = ALLOC_STRING(pszSound);
}
if (m_bUnlockedSound)
{
pszSound = ButtonSound( (int)m_bUnlockedSound );
PRECACHE_SOUND(pszSound);
m_ls.sUnlockedSound = ALLOC_STRING(pszSound);
}
// get sentence group names, for doors which are directly 'touched' to open
switch (m_bLockedSentence)
{
case 1: m_ls.sLockedSentence = MAKE_STRING("NA"); break; // access denied
case 2: m_ls.sLockedSentence = MAKE_STRING("ND"); break; // security lockout
case 3: m_ls.sLockedSentence = MAKE_STRING("NF"); break; // blast door
case 4: m_ls.sLockedSentence = MAKE_STRING("NFIRE"); break; // fire door
case 5: m_ls.sLockedSentence = MAKE_STRING("NCHEM"); break; // chemical door
case 6: m_ls.sLockedSentence = MAKE_STRING("NRAD"); break; // radiation door
case 7: m_ls.sLockedSentence = MAKE_STRING("NCON"); break; // gen containment
case 8: m_ls.sLockedSentence = MAKE_STRING("NH"); break; // maintenance door
case 9: m_ls.sLockedSentence = MAKE_STRING("NG"); break; // broken door
default: m_ls.sLockedSentence = 0; break;
}
switch (m_bUnlockedSentence)
{
case 1: m_ls.sUnlockedSentence = MAKE_STRING("EA"); break; // access granted
case 2: m_ls.sUnlockedSentence = MAKE_STRING("ED"); break; // security door
case 3: m_ls.sUnlockedSentence = MAKE_STRING("EF"); break; // blast door
case 4: m_ls.sUnlockedSentence = MAKE_STRING("EFIRE"); break; // fire door
case 5: m_ls.sUnlockedSentence = MAKE_STRING("ECHEM"); break; // chemical door
case 6: m_ls.sUnlockedSentence = MAKE_STRING("ERAD"); break; // radiation door
case 7: m_ls.sUnlockedSentence = MAKE_STRING("ECON"); break; // gen containment
case 8: m_ls.sUnlockedSentence = MAKE_STRING("EH"); break; // maintenance door
default: m_ls.sUnlockedSentence = 0; break;
}
}
//
// Cache user-entity-field values until spawn is called.
//
void CBaseButton::KeyValue( KeyValueData *pkvd )
{
if (FStrEq(pkvd->szKeyName, "changetarget"))
{
m_strChangeTarget = ALLOC_STRING(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "locked_sound"))
{
m_bLockedSound = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "locked_sentence"))
{
m_bLockedSentence = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "unlocked_sound"))
{
m_bUnlockedSound = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "unlocked_sentence"))
{
m_bUnlockedSentence = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "sounds"))
{
m_sounds = atoi(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else
CBaseToggle::KeyValue( pkvd );
}
//
// ButtonShot
//
int CBaseButton::TakeDamage( entvars_t* pevInflictor, entvars_t* pevAttacker, float flDamage, int bitsDamageType )
{
BUTTON_CODE code = ButtonResponseToTouch();
if ( code == BUTTON_NOTHING )
return 0;
// Temporarily disable the touch function, until movement is finished.
SetTouch( NULL );
m_hActivator = CBaseEntity::Instance( pevAttacker );
if ( m_hActivator == NULL )
return 0;
if ( code == BUTTON_RETURN )
{
EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
// Toggle buttons fire when they get back to their "home" position
if ( !(pev->spawnflags & SF_BUTTON_TOGGLE) )
SUB_UseTargets( m_hActivator, USE_TOGGLE, 0 );
ButtonReturn();
}
else // code == BUTTON_ACTIVATE
ButtonActivate( );
return 0;
}
/*QUAKED func_button (0 .5 .8) ?
When a button is touched, it moves some distance in the direction of it's angle,
triggers all of it's targets, waits some time, then returns to it's original position
where it can be triggered again.
"angle" determines the opening direction
"target" all entities with a matching targetname will be used
"speed" override the default 40 speed
"wait" override the default 1 second wait (-1 = never return)
"lip" override the default 4 pixel lip remaining at end of move
"health" if set, the button must be killed instead of touched
"sounds"
0) steam metal
1) wooden clunk
2) metallic click
3) in-out
*/
LINK_ENTITY_TO_CLASS( func_button, CBaseButton );
void CBaseButton::Spawn( )
{
const char *pszSound;
//----------------------------------------------------
//determine sounds for buttons
//a sound of 0 should not make a sound
//----------------------------------------------------
pszSound = ButtonSound( m_sounds );
PRECACHE_SOUND(pszSound);
pev->noise = ALLOC_STRING(pszSound);
Precache();
if ( FBitSet ( pev->spawnflags, SF_BUTTON_SPARK_IF_OFF ) )// this button should spark in OFF state
{
SetThink ( &CBaseButton::ButtonSpark );
pev->nextthink = gpGlobals->time + 0.5;// no hurry, make sure everything else spawns
}
SetMovedir(pev);
pev->movetype = MOVETYPE_PUSH;
pev->solid = SOLID_BSP;
SET_MODEL(ENT(pev), STRING(pev->model));
if (pev->speed == 0)
pev->speed = 40;
if (pev->health > 0)
{
pev->takedamage = DAMAGE_YES;
}
if (m_flWait == 0)
m_flWait = 1;
if (m_flLip == 0)
m_flLip = 4;
m_toggle_state = TS_AT_BOTTOM;
m_vecPosition1 = pev->origin;
// Subtract 2 from size because the engine expands bboxes by 1 in all directions making the size too big
m_vecPosition2 = m_vecPosition1 + (pev->movedir * (fabs( pev->movedir.x * (pev->size.x-2) ) + fabs( pev->movedir.y * (pev->size.y-2) ) + fabs( pev->movedir.z * (pev->size.z-2) ) - m_flLip));
// Is this a non-moving button?
if ( ((m_vecPosition2 - m_vecPosition1).Length() < 1) || (pev->spawnflags & SF_BUTTON_DONTMOVE) )
m_vecPosition2 = m_vecPosition1;
m_fStayPushed = (m_flWait == -1 ? TRUE : FALSE);
m_fRotating = FALSE;
// if the button is flagged for USE button activation only, take away it's touch function and add a use function
if ( FBitSet ( pev->spawnflags, SF_BUTTON_TOUCH_ONLY ) ) // touchable button
{
SetTouch( &CBaseButton::ButtonTouch );
}
else
{
SetTouch ( NULL );
SetUse ( &CBaseButton::ButtonUse );
}
}
// Button sound table.
// Also used by CBaseDoor to get 'touched' door lock/unlock sounds
const char *ButtonSound( int sound )
{
const char *pszSound;
switch ( sound )
{
case 0: pszSound = "common/null.wav"; break;
case 1: pszSound = "buttons/button1.wav"; break;
case 2: pszSound = "buttons/button2.wav"; break;
case 3: pszSound = "buttons/button3.wav"; break;
case 4: pszSound = "buttons/button4.wav"; break;
case 5: pszSound = "buttons/button5.wav"; break;
case 6: pszSound = "buttons/button6.wav"; break;
case 7: pszSound = "buttons/button7.wav"; break;
case 8: pszSound = "buttons/button8.wav"; break;
case 9: pszSound = "buttons/button9.wav"; break;
case 10: pszSound = "buttons/button10.wav"; break;
case 11: pszSound = "buttons/button11.wav"; break;
case 12: pszSound = "buttons/latchlocked1.wav"; break;
case 13: pszSound = "buttons/latchunlocked1.wav"; break;
case 14: pszSound = "buttons/lightswitch2.wav";break;
// next 6 slots reserved for any additional sliding button sounds we may add
case 21: pszSound = "buttons/lever1.wav"; break;
case 22: pszSound = "buttons/lever2.wav"; break;
case 23: pszSound = "buttons/lever3.wav"; break;
case 24: pszSound = "buttons/lever4.wav"; break;
case 25: pszSound = "buttons/lever5.wav"; break;
default:pszSound = "buttons/button9.wav"; break;
}
return pszSound;
}
//
// Makes flagged buttons spark when turned off
//
void DoSpark(entvars_t *pev, const Vector &location )
{
Vector tmp = location + pev->size * 0.5;
UTIL_Sparks( tmp );
float flVolume = RANDOM_FLOAT ( 0.25 , 0.75 ) * 0.4;//random volume range
switch ( (int)(RANDOM_FLOAT(0,1) * 6) )
{
case 0: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark1.wav", flVolume, ATTN_NORM); break;
case 1: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark2.wav", flVolume, ATTN_NORM); break;
case 2: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark3.wav", flVolume, ATTN_NORM); break;
case 3: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark4.wav", flVolume, ATTN_NORM); break;
case 4: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark5.wav", flVolume, ATTN_NORM); break;
case 5: EMIT_SOUND(ENT(pev), CHAN_VOICE, "buttons/spark6.wav", flVolume, ATTN_NORM); break;
}
}
void CBaseButton::ButtonSpark ( void )
{
SetThink ( &CBaseButton::ButtonSpark );
pev->nextthink = gpGlobals->time + ( 0.1 + RANDOM_FLOAT ( 0, 1.5 ) );// spark again at random interval
DoSpark( pev, pev->mins );
}
//
// Button's Use function
//
void CBaseButton::ButtonUse ( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
// Ignore touches if button is moving, or pushed-in and waiting to auto-come-out.
// UNDONE: Should this use ButtonResponseToTouch() too?
if (m_toggle_state == TS_GOING_UP || m_toggle_state == TS_GOING_DOWN )
return;
m_hActivator = pActivator;
if ( m_toggle_state == TS_AT_TOP)
{
if (!m_fStayPushed && FBitSet(pev->spawnflags, SF_BUTTON_TOGGLE))
{
EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
//SUB_UseTargets( m_eoActivator );
ButtonReturn();
}
}
else
ButtonActivate( );
}
CBaseButton::BUTTON_CODE CBaseButton::ButtonResponseToTouch( void )
{
// Ignore touches if button is moving, or pushed-in and waiting to auto-come-out.
if (m_toggle_state == TS_GOING_UP ||
m_toggle_state == TS_GOING_DOWN ||
(m_toggle_state == TS_AT_TOP && !m_fStayPushed && !FBitSet(pev->spawnflags, SF_BUTTON_TOGGLE) ) )
return BUTTON_NOTHING;
if (m_toggle_state == TS_AT_TOP)
{
if((FBitSet(pev->spawnflags, SF_BUTTON_TOGGLE) ) && !m_fStayPushed)
{
return BUTTON_RETURN;
}
}
else
return BUTTON_ACTIVATE;
return BUTTON_NOTHING;
}
//
// Touching a button simply "activates" it.
//
void CBaseButton:: ButtonTouch( CBaseEntity *pOther )
{
// Ignore touches by anything but players
if (!FClassnameIs(pOther->pev, "player"))
return;
m_hActivator = pOther;
BUTTON_CODE code = ButtonResponseToTouch();
if ( code == BUTTON_NOTHING )
return;
if (!UTIL_IsMasterTriggered(m_sMaster, pOther))
{
// play button locked sound
PlayLockSounds(pev, &m_ls, TRUE, TRUE);
return;
}
// Temporarily disable the touch function, until movement is finished.
SetTouch( NULL );
if ( code == BUTTON_RETURN )
{
EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
SUB_UseTargets( m_hActivator, USE_TOGGLE, 0 );
ButtonReturn();
}
else // code == BUTTON_ACTIVATE
ButtonActivate( );
}
//
// Starts the button moving "in/up".
//
void CBaseButton::ButtonActivate( )
{
EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
if (!UTIL_IsMasterTriggered(m_sMaster, m_hActivator))
{
// button is locked, play locked sound
PlayLockSounds(pev, &m_ls, TRUE, TRUE);
return;
}
else
{
// button is unlocked, play unlocked sound
PlayLockSounds(pev, &m_ls, FALSE, TRUE);
}
ASSERT(m_toggle_state == TS_AT_BOTTOM);
m_toggle_state = TS_GOING_UP;
SetMoveDone( &CBaseButton::TriggerAndWait );
if (!m_fRotating)
LinearMove( m_vecPosition2, pev->speed);
else
AngularMove( m_vecAngle2, pev->speed);
}
//
// Button has reached the "in/up" position. Activate its "targets", and pause before "popping out".
//
void CBaseButton::TriggerAndWait( void )
{
ASSERT(m_toggle_state == TS_GOING_UP);
if (!UTIL_IsMasterTriggered(m_sMaster, m_hActivator))
return;
m_toggle_state = TS_AT_TOP;
// If button automatically comes back out, start it moving out.
// Else re-instate touch method
if (m_fStayPushed || FBitSet ( pev->spawnflags, SF_BUTTON_TOGGLE ) )
{
if ( !FBitSet ( pev->spawnflags, SF_BUTTON_TOUCH_ONLY ) ) // this button only works if USED, not touched!
{
// ALL buttons are now use only
SetTouch ( NULL );
}
else
SetTouch( &CBaseButton::ButtonTouch );
}
else
{
pev->nextthink = pev->ltime + m_flWait;
SetThink( &CBaseButton::ButtonReturn );
}
pev->frame = 1; // use alternate textures
SUB_UseTargets( m_hActivator, USE_TOGGLE, 0 );
}
//
// Starts the button moving "out/down".
//
void CBaseButton::ButtonReturn( void )
{
ASSERT(m_toggle_state == TS_AT_TOP);
m_toggle_state = TS_GOING_DOWN;
SetMoveDone( &CBaseButton::ButtonBackHome );
if (!m_fRotating)
LinearMove( m_vecPosition1, pev->speed);
else
AngularMove( m_vecAngle1, pev->speed);
pev->frame = 0; // use normal textures
}
//
// Button has returned to start state. Quiesce it.
//
void CBaseButton::ButtonBackHome( void )
{
ASSERT(m_toggle_state == TS_GOING_DOWN);
m_toggle_state = TS_AT_BOTTOM;
if ( FBitSet(pev->spawnflags, SF_BUTTON_TOGGLE) )
{
//EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
SUB_UseTargets( m_hActivator, USE_TOGGLE, 0 );
}
if (!FStringNull(pev->target))
{
edict_t* pentTarget = NULL;
for (;;)
{
pentTarget = FIND_ENTITY_BY_TARGETNAME(pentTarget, STRING(pev->target));
if (FNullEnt(pentTarget))
break;
if (!FClassnameIs(pentTarget, "multisource"))
continue;
CBaseEntity *pTarget = CBaseEntity::Instance( pentTarget );
if ( pTarget )
pTarget->Use( m_hActivator, this, USE_TOGGLE, 0 );
}
}
// Re-instate touch method, movement cycle is complete.
if ( !FBitSet ( pev->spawnflags, SF_BUTTON_TOUCH_ONLY ) ) // this button only works if USED, not touched!
{
// All buttons are now use only
SetTouch ( NULL );
}
else
SetTouch( &CBaseButton::ButtonTouch );
// reset think for a sparking button
if ( FBitSet ( pev->spawnflags, SF_BUTTON_SPARK_IF_OFF ) )
{
SetThink ( &CBaseButton::ButtonSpark );
pev->nextthink = gpGlobals->time + 0.5;// no hurry.
}
}
//
// Rotating button (aka "lever")
//
class CRotButton : public CBaseButton
{
public:
void Spawn( void );
};
LINK_ENTITY_TO_CLASS( func_rot_button, CRotButton );
void CRotButton::Spawn( void )
{
const char *pszSound;
//----------------------------------------------------
//determine sounds for buttons
//a sound of 0 should not make a sound
//----------------------------------------------------
pszSound = ButtonSound( m_sounds );
PRECACHE_SOUND(pszSound);
pev->noise = ALLOC_STRING(pszSound);
// set the axis of rotation
CBaseToggle::AxisDir( pev );
// check for clockwise rotation
if ( FBitSet (pev->spawnflags, SF_DOOR_ROTATE_BACKWARDS) )
pev->movedir = pev->movedir * -1;
pev->movetype = MOVETYPE_PUSH;
if ( pev->spawnflags & SF_ROTBUTTON_NOTSOLID )
pev->solid = SOLID_NOT;
else
pev->solid = SOLID_BSP;
SET_MODEL(ENT(pev), STRING(pev->model));
if (pev->speed == 0)
pev->speed = 40;
if (m_flWait == 0)
m_flWait = 1;
if (pev->health > 0)
{
pev->takedamage = DAMAGE_YES;
}
m_toggle_state = TS_AT_BOTTOM;
m_vecAngle1 = pev->angles;
m_vecAngle2 = pev->angles + pev->movedir * m_flMoveDistance;
ASSERTSZ(m_vecAngle1 != m_vecAngle2, "rotating button start/end positions are equal");
m_fStayPushed = (m_flWait == -1 ? TRUE : FALSE);
m_fRotating = TRUE;
// if the button is flagged for USE button activation only, take away it's touch function and add a use function
if ( !FBitSet ( pev->spawnflags, SF_BUTTON_TOUCH_ONLY ) )
{
SetTouch ( NULL );
SetUse ( &CRotButton::ButtonUse );
}
else // touchable button
SetTouch( &CRotButton::ButtonTouch );
//SetTouch( ButtonTouch );
}
// Make this button behave like a door (HACKHACK)
// This will disable use and make the button solid
// rotating buttons were made SOLID_NOT by default since their were some
// collision problems with them...
#define SF_MOMENTARY_DOOR 0x0001
class CMomentaryRotButton : public CBaseToggle
{
public:
void Spawn ( void );
void KeyValue( KeyValueData *pkvd );
virtual int ObjectCaps( void )
{
int flags = CBaseToggle :: ObjectCaps() & (~FCAP_ACROSS_TRANSITION);
if ( pev->spawnflags & SF_MOMENTARY_DOOR )
return flags;
return flags | FCAP_CONTINUOUS_USE;
}
void Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value );
void EXPORT Off( void );
void EXPORT Return( void );
void UpdateSelf( float value );
void UpdateSelfReturn( float value );
void UpdateAllButtons( float value, int start );
void PlaySound( void );
void UpdateTarget( float value );
static CMomentaryRotButton *Instance( edict_t *pent ) { return (CMomentaryRotButton *)GET_PRIVATE(pent);};
virtual int Save( CSave &save );
virtual int Restore( CRestore &restore );
static TYPEDESCRIPTION m_SaveData[];
int m_lastUsed;
int m_direction;
float m_returnSpeed;
vec3_t m_start;
vec3_t m_end;
int m_sounds;
};
TYPEDESCRIPTION CMomentaryRotButton::m_SaveData[] =
{
DEFINE_FIELD( CMomentaryRotButton, m_lastUsed, FIELD_INTEGER ),
DEFINE_FIELD( CMomentaryRotButton, m_direction, FIELD_INTEGER ),
DEFINE_FIELD( CMomentaryRotButton, m_returnSpeed, FIELD_FLOAT ),
DEFINE_FIELD( CMomentaryRotButton, m_start, FIELD_VECTOR ),
DEFINE_FIELD( CMomentaryRotButton, m_end, FIELD_VECTOR ),
DEFINE_FIELD( CMomentaryRotButton, m_sounds, FIELD_INTEGER ),
};
IMPLEMENT_SAVERESTORE( CMomentaryRotButton, CBaseToggle );
LINK_ENTITY_TO_CLASS( momentary_rot_button, CMomentaryRotButton );
void CMomentaryRotButton::Spawn( void )
{
CBaseToggle::AxisDir( pev );
if ( pev->speed == 0 )
pev->speed = 100;
if ( m_flMoveDistance < 0 )
{
m_start = pev->angles + pev->movedir * m_flMoveDistance;
m_end = pev->angles;
m_direction = 1; // This will toggle to -1 on the first use()
m_flMoveDistance = -m_flMoveDistance;
}
else
{
m_start = pev->angles;
m_end = pev->angles + pev->movedir * m_flMoveDistance;
m_direction = -1; // This will toggle to +1 on the first use()
}
if ( pev->spawnflags & SF_MOMENTARY_DOOR )
pev->solid = SOLID_BSP;
else
pev->solid = SOLID_NOT;
pev->movetype = MOVETYPE_PUSH;
UTIL_SetOrigin(pev, pev->origin);
SET_MODEL(ENT(pev), STRING(pev->model) );
const char *pszSound = ButtonSound( m_sounds );
PRECACHE_SOUND(pszSound);
pev->noise = ALLOC_STRING(pszSound);
m_lastUsed = 0;
}
void CMomentaryRotButton::KeyValue( KeyValueData *pkvd )
{
if (FStrEq(pkvd->szKeyName, "returnspeed"))
{
m_returnSpeed = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if (FStrEq(pkvd->szKeyName, "sounds"))
{
m_sounds = atoi(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else
CBaseToggle::KeyValue( pkvd );
}
void CMomentaryRotButton::PlaySound( void )
{
EMIT_SOUND(ENT(pev), CHAN_VOICE, (char*)STRING(pev->noise), 1, ATTN_NORM);
}
// BUGBUG: This design causes a latentcy. When the button is retriggered, the first impulse
// will send the target in the wrong direction because the parameter is calculated based on the
// current, not future position.
void CMomentaryRotButton::Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
pev->ideal_yaw = CBaseToggle::AxisDelta( pev->spawnflags, pev->angles, m_start ) / m_flMoveDistance;
UpdateAllButtons( pev->ideal_yaw, 1 );
// Calculate destination angle and use it to predict value, this prevents sending target in wrong direction on retriggering
Vector dest = pev->angles + pev->avelocity * (pev->nextthink - pev->ltime);
float value1 = CBaseToggle::AxisDelta( pev->spawnflags, dest, m_start ) / m_flMoveDistance;
UpdateTarget( value1 );
}
void CMomentaryRotButton::UpdateAllButtons( float value, int start )
{
// Update all rot buttons attached to the same target
edict_t *pentTarget = NULL;
for (;;)
{
pentTarget = FIND_ENTITY_BY_STRING(pentTarget, "target", STRING(pev->target));
if (FNullEnt(pentTarget))
break;
if ( FClassnameIs( VARS(pentTarget), "momentary_rot_button" ) )
{
CMomentaryRotButton *pEntity = CMomentaryRotButton::Instance(pentTarget);
if ( pEntity )
{
if ( start )
pEntity->UpdateSelf( value );
else
pEntity->UpdateSelfReturn( value );
}
}
}
}
void CMomentaryRotButton::UpdateSelf( float value )
{
BOOL fplaysound = FALSE;
if ( !m_lastUsed )
{
fplaysound = TRUE;
m_direction = -m_direction;
}
m_lastUsed = 1;
pev->nextthink = pev->ltime + 0.1;
if ( m_direction > 0 && value >= 1.0 )
{
pev->avelocity = g_vecZero;
pev->angles = m_end;
return;
}
else if ( m_direction < 0 && value <= 0 )
{
pev->avelocity = g_vecZero;
pev->angles = m_start;
return;
}
if (fplaysound)
PlaySound();
// HACKHACK -- If we're going slow, we'll get multiple player packets per frame, bump nexthink on each one to avoid stalling
if ( pev->nextthink < pev->ltime )
pev->nextthink = pev->ltime + 0.1;
else
pev->nextthink += 0.1;
pev->avelocity = (m_direction * pev->speed) * pev->movedir;
SetThink( &CMomentaryRotButton::Off );
}
void CMomentaryRotButton::UpdateTarget( float value )
{
if (!FStringNull(pev->target))
{
edict_t* pentTarget = NULL;
for (;;)
{
pentTarget = FIND_ENTITY_BY_TARGETNAME(pentTarget, STRING(pev->target));
if (FNullEnt(pentTarget))
break;
CBaseEntity *pEntity = CBaseEntity::Instance(pentTarget);
if ( pEntity )
{
pEntity->Use( this, this, USE_SET, value );
}
}
}
}
void CMomentaryRotButton::Off( void )
{
pev->avelocity = g_vecZero;
m_lastUsed = 0;
if ( FBitSet( pev->spawnflags, SF_PENDULUM_AUTO_RETURN ) && m_returnSpeed > 0 )
{
SetThink( &CMomentaryRotButton::Return );
pev->nextthink = pev->ltime + 0.1;
m_direction = -1;
}
else
SetThink( NULL );
}
void CMomentaryRotButton::Return( void )
{
float value = CBaseToggle::AxisDelta( pev->spawnflags, pev->angles, m_start ) / m_flMoveDistance;
UpdateAllButtons( value, 0 ); // This will end up calling UpdateSelfReturn() n times, but it still works right
if ( value > 0 )
UpdateTarget( value );
}
void CMomentaryRotButton::UpdateSelfReturn( float value )
{
if ( value <= 0 )
{
pev->avelocity = g_vecZero;
pev->angles = m_start;
pev->nextthink = -1;
SetThink( NULL );
}
else
{
pev->avelocity = -m_returnSpeed * pev->movedir;
pev->nextthink = pev->ltime + 0.1;
}
}
//----------------------------------------------------------------
// Spark
//----------------------------------------------------------------
class CEnvSpark : public CBaseEntity
{
public:
void Spawn(void);
void Precache(void);
void EXPORT SparkThink(void);
void EXPORT SparkStart(CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value );
void EXPORT SparkStop(CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value );
void KeyValue(KeyValueData *pkvd);
virtual int Save( CSave &save );
virtual int Restore( CRestore &restore );
static TYPEDESCRIPTION m_SaveData[];
float m_flDelay;
};
TYPEDESCRIPTION CEnvSpark::m_SaveData[] =
{
DEFINE_FIELD( CEnvSpark, m_flDelay, FIELD_FLOAT),
};
IMPLEMENT_SAVERESTORE( CEnvSpark, CBaseEntity );
LINK_ENTITY_TO_CLASS(env_spark, CEnvSpark);
LINK_ENTITY_TO_CLASS(env_debris, CEnvSpark);
void CEnvSpark::Spawn(void)
{
SetThink( NULL );
SetUse( NULL );
if (FBitSet(pev->spawnflags, 32)) // Use for on/off
{
if (FBitSet(pev->spawnflags, 64)) // Start on
{
SetThink(&CEnvSpark::SparkThink); // start sparking
SetUse(&CEnvSpark::SparkStop); // set up +USE to stop sparking
}
else
SetUse(&CEnvSpark::SparkStart);
}
else
SetThink(&CEnvSpark::SparkThink);
pev->nextthink = gpGlobals->time + ( 0.1 + RANDOM_FLOAT ( 0, 1.5 ) );
if (m_flDelay <= 0)
m_flDelay = 1.5;
Precache( );
}
void CEnvSpark::Precache(void)
{
PRECACHE_SOUND( "buttons/spark1.wav" );
PRECACHE_SOUND( "buttons/spark2.wav" );
PRECACHE_SOUND( "buttons/spark3.wav" );
PRECACHE_SOUND( "buttons/spark4.wav" );
PRECACHE_SOUND( "buttons/spark5.wav" );
PRECACHE_SOUND( "buttons/spark6.wav" );
}
void CEnvSpark::KeyValue( KeyValueData *pkvd )
{
if (FStrEq(pkvd->szKeyName, "MaxDelay"))
{
m_flDelay = atof(pkvd->szValue);
pkvd->fHandled = TRUE;
}
else if ( FStrEq(pkvd->szKeyName, "style") ||
FStrEq(pkvd->szKeyName, "height") ||
FStrEq(pkvd->szKeyName, "killtarget") ||
FStrEq(pkvd->szKeyName, "value1") ||
FStrEq(pkvd->szKeyName, "value2") ||
FStrEq(pkvd->szKeyName, "value3"))
pkvd->fHandled = TRUE;
else
CBaseEntity::KeyValue( pkvd );
}
void EXPORT CEnvSpark::SparkThink(void)
{
pev->nextthink = gpGlobals->time + 0.1 + RANDOM_FLOAT (0, m_flDelay);
DoSpark( pev, pev->origin );
}
void EXPORT CEnvSpark::SparkStart(CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
SetUse(&CEnvSpark::SparkStop);
SetThink(&CEnvSpark::SparkThink);
pev->nextthink = gpGlobals->time + (0.1 + RANDOM_FLOAT ( 0, m_flDelay));
}
void EXPORT CEnvSpark::SparkStop(CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
SetUse(&CEnvSpark::SparkStart);
SetThink(NULL);
}
#define SF_BTARGET_USE 0x0001
#define SF_BTARGET_ON 0x0002
class CButtonTarget : public CBaseEntity
{
public:
void Spawn( void );
void Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value );
int TakeDamage( entvars_t* pevInflictor, entvars_t* pevAttacker, float flDamage, int bitsDamageType );
int ObjectCaps( void );
};
LINK_ENTITY_TO_CLASS( button_target, CButtonTarget );
void CButtonTarget::Spawn( void )
{
pev->movetype = MOVETYPE_PUSH;
pev->solid = SOLID_BSP;
SET_MODEL(ENT(pev), STRING(pev->model));
pev->takedamage = DAMAGE_YES;
if ( FBitSet( pev->spawnflags, SF_BTARGET_ON ) )
pev->frame = 1;
}
void CButtonTarget::Use( CBaseEntity *pActivator, CBaseEntity *pCaller, USE_TYPE useType, float value )
{
if ( !ShouldToggle( useType, (int)pev->frame ) )
return;
pev->frame = 1-pev->frame;
if ( pev->frame )
SUB_UseTargets( pActivator, USE_ON, 0 );
else
SUB_UseTargets( pActivator, USE_OFF, 0 );
}
int CButtonTarget :: ObjectCaps( void )
{
int caps = CBaseEntity::ObjectCaps() & ~FCAP_ACROSS_TRANSITION;
if ( FBitSet(pev->spawnflags, SF_BTARGET_USE) )
return caps | FCAP_IMPULSE_USE;
else
return caps;
}
int CButtonTarget::TakeDamage( entvars_t* pevInflictor, entvars_t* pevAttacker, float flDamage, int bitsDamageType )
{
Use( Instance(pevAttacker), this, USE_TOGGLE, 0 );
return 1;
}
|
#include "gmPythonNodeOption.hpp"
#include <QRegularExpression>
namespace gm
{
namespace Python
{
namespace Node
{
Option::Option(Object* node) : Param(gm::Component::Type::Option, node)
{
this->setName("option param");
this->m_options.append("option 1");
}
auto Option::setIndex(int index) -> void
{
if (this->m_index != index)
{
this->m_index = index;
emit this->indexChanged();
}
}
auto Option::changeOption(const QString& label, int index) -> void
{
this->m_options.replace(index, label);
emit this->optionsChanged();
emit this->indexChanged();
}
auto Option::addOption() -> void
{
this->m_options.append("option " + QString::number(this->m_options.count() + 1));
emit this->optionsChanged();
this->setIndex(this->m_options.count() - 1);
}
auto Option::removeOption() -> void
{
if (this->m_options.count() > 1)
{
this->m_options.removeAt(this->m_index);
emit this->optionsChanged();
this->setIndex(std::max(0, this->m_index - 1));
}
}
auto Option::getOptions() -> QStringList
{
return this->m_options;
}
auto Option::getIndex() -> int
{
return this->m_index;
}
}
}
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/paint/text_painter_base.h"
#include "third_party/blink/renderer/core/dom/document.h"
#include "third_party/blink/renderer/core/layout/text_decoration_offset_base.h"
#include "third_party/blink/renderer/core/paint/applied_decoration_painter.h"
#include "third_party/blink/renderer/core/paint/box_painter_base.h"
#include "third_party/blink/renderer/core/paint/paint_info.h"
#include "third_party/blink/renderer/core/paint/selection_painting_utils.h"
#include "third_party/blink/renderer/core/paint/text_decoration_info.h"
#include "third_party/blink/renderer/core/style/computed_style.h"
#include "third_party/blink/renderer/core/style/shadow_list.h"
#include "third_party/blink/renderer/platform/fonts/font.h"
#include "third_party/blink/renderer/platform/geometry/length_functions.h"
#include "third_party/blink/renderer/platform/graphics/graphics_context.h"
#include "third_party/blink/renderer/platform/graphics/graphics_context_state_saver.h"
#include "third_party/blink/renderer/platform/wtf/assertions.h"
#include "third_party/blink/renderer/platform/wtf/text/character_names.h"
namespace blink {
namespace {
// We usually use the text decoration thickness to determine how far
// ink-skipped text decorations should be away from the glyph
// contours. Cap this at 5 CSS px in each direction when thickness
// growths larger than that. A value of 13 closely matches FireFox'
// implementation.
constexpr float kDecorationClipMaxDilation = 13;
float DoubleOffsetFromThickness(float thickness_pixels) {
return thickness_pixels + 1.0f;
}
} // anonymous namespace
TextPainterBase::TextPainterBase(GraphicsContext& context,
const Font& font,
const PhysicalOffset& text_origin,
const PhysicalRect& text_frame_rect,
bool horizontal)
: graphics_context_(context),
font_(font),
text_origin_(text_origin),
text_frame_rect_(text_frame_rect),
horizontal_(horizontal),
has_combined_text_(false),
emphasis_mark_offset_(0),
ellipsis_offset_(0) {}
TextPainterBase::~TextPainterBase() = default;
void TextPainterBase::SetEmphasisMark(const AtomicString& emphasis_mark,
TextEmphasisPosition position) {
emphasis_mark_ = emphasis_mark;
const SimpleFontData* font_data = font_.PrimaryFont();
DCHECK(font_data);
if (!font_data || emphasis_mark.IsNull()) {
emphasis_mark_offset_ = 0;
} else if ((horizontal_ && (position == TextEmphasisPosition::kOverRight ||
position == TextEmphasisPosition::kOverLeft)) ||
(!horizontal_ &&
(position == TextEmphasisPosition::kOverRight ||
position == TextEmphasisPosition::kUnderRight))) {
emphasis_mark_offset_ = -font_data->GetFontMetrics().Ascent() -
font_.EmphasisMarkDescent(emphasis_mark);
} else {
DCHECK(position == TextEmphasisPosition::kUnderRight ||
position == TextEmphasisPosition::kUnderLeft ||
position == TextEmphasisPosition::kOverLeft);
emphasis_mark_offset_ = font_data->GetFontMetrics().Descent() +
font_.EmphasisMarkAscent(emphasis_mark);
}
}
// static
void TextPainterBase::UpdateGraphicsContext(
GraphicsContext& context,
const TextPaintStyle& text_style,
bool horizontal,
GraphicsContextStateSaver& state_saver) {
TextDrawingModeFlags mode = context.TextDrawingMode();
if (text_style.stroke_width > 0) {
TextDrawingModeFlags new_mode = mode | kTextModeStroke;
if (mode != new_mode) {
state_saver.SaveIfNeeded();
context.SetTextDrawingMode(new_mode);
mode = new_mode;
}
}
if (mode & kTextModeFill && text_style.fill_color != context.FillColor())
context.SetFillColor(text_style.fill_color);
if (mode & kTextModeStroke) {
if (text_style.stroke_color != context.StrokeColor())
context.SetStrokeColor(text_style.stroke_color);
if (text_style.stroke_width != context.StrokeThickness())
context.SetStrokeThickness(text_style.stroke_width);
}
if (text_style.shadow) {
state_saver.SaveIfNeeded();
context.SetDrawLooper(text_style.shadow->CreateDrawLooper(
DrawLooperBuilder::kShadowIgnoresAlpha, text_style.current_color,
text_style.color_scheme, horizontal));
}
}
Color TextPainterBase::TextColorForWhiteBackground(Color text_color) {
int distance_from_white = DifferenceSquared(text_color, Color::kWhite);
// semi-arbitrarily chose 65025 (255^2) value here after a few tests;
return distance_from_white > 65025 ? text_color : text_color.Dark();
}
// static
TextPaintStyle TextPainterBase::TextPaintingStyle(const Document& document,
const ComputedStyle& style,
const PaintInfo& paint_info) {
TextPaintStyle text_style;
text_style.stroke_width = style.TextStrokeWidth();
text_style.color_scheme = style.UsedColorScheme();
bool is_printing = paint_info.IsPrinting();
if (paint_info.phase == PaintPhase::kTextClip) {
// When we use the text as a clip, we only care about the alpha, thus we
// make all the colors black.
text_style.current_color = Color::kBlack;
text_style.fill_color = Color::kBlack;
text_style.stroke_color = Color::kBlack;
text_style.emphasis_mark_color = Color::kBlack;
text_style.shadow = nullptr;
} else {
text_style.current_color =
style.VisitedDependentColor(GetCSSPropertyColor());
text_style.fill_color =
style.VisitedDependentColor(GetCSSPropertyWebkitTextFillColor());
text_style.stroke_color =
style.VisitedDependentColor(GetCSSPropertyWebkitTextStrokeColor());
text_style.emphasis_mark_color =
style.VisitedDependentColor(GetCSSPropertyWebkitTextEmphasisColor());
text_style.shadow = style.TextShadow();
// Adjust text color when printing with a white background.
DCHECK_EQ(document.Printing(), is_printing);
bool force_background_to_white =
BoxPainterBase::ShouldForceWhiteBackgroundForPrintEconomy(document,
style);
if (force_background_to_white) {
text_style.fill_color =
TextColorForWhiteBackground(text_style.fill_color);
text_style.stroke_color =
TextColorForWhiteBackground(text_style.stroke_color);
text_style.emphasis_mark_color =
TextColorForWhiteBackground(text_style.emphasis_mark_color);
}
}
return text_style;
}
TextPaintStyle TextPainterBase::SelectionPaintingStyle(
const Document& document,
const ComputedStyle& style,
Node* node,
bool have_selection,
const PaintInfo& paint_info,
const TextPaintStyle& text_style) {
return SelectionPaintingUtils::SelectionPaintingStyle(
document, style, node, have_selection, text_style, paint_info);
}
void TextPainterBase::DecorationsStripeIntercepts(
float upper,
float stripe_width,
float dilation,
const Vector<Font::TextIntercept>& text_intercepts) {
for (auto intercept : text_intercepts) {
FloatPoint clip_origin(text_origin_);
FloatRect clip_rect(
clip_origin + FloatPoint(intercept.begin_, upper),
FloatSize(intercept.end_ - intercept.begin_, stripe_width));
clip_rect.InflateX(dilation);
// We need to ensure the clip rectangle is covering the full underline
// extent. For horizontal drawing, using enclosingIntRect would be
// sufficient, since we can clamp to full device pixels that way. However,
// for vertical drawing, we have a transformation applied, which breaks the
// integers-equal-device pixels assumption, so vertically inflating by 1
// pixel makes sure we're always covering. This should only be done on the
// clipping rectangle, not when computing the glyph intersects.
clip_rect.InflateY(1.0);
if (!clip_rect.IsFinite())
continue;
graphics_context_.ClipOut(clip_rect);
}
}
void TextPainterBase::PaintDecorationsExceptLineThrough(
const TextDecorationOffsetBase& decoration_offset,
TextDecorationInfo& decoration_info,
const PaintInfo& paint_info,
const Vector<AppliedTextDecoration>& decorations,
const TextPaintStyle& text_style,
bool* has_line_through_decoration) {
GraphicsContext& context = paint_info.context;
GraphicsContextStateSaver state_saver(context);
UpdateGraphicsContext(context, text_style, horizontal_, state_saver);
if (has_combined_text_)
context.ConcatCTM(Rotation(text_frame_rect_, kClockwise));
// text-underline-position may flip underline and overline.
ResolvedUnderlinePosition underline_position =
decoration_info.UnderlinePosition();
bool flip_underline_and_overline = false;
if (underline_position == ResolvedUnderlinePosition::kOver) {
flip_underline_and_overline = true;
underline_position = ResolvedUnderlinePosition::kUnder;
}
for (size_t applied_decoration_index = 0;
applied_decoration_index < decorations.size();
++applied_decoration_index) {
const AppliedTextDecoration& decoration =
decorations[applied_decoration_index];
TextDecoration lines = decoration.Lines();
bool has_underline = EnumHasFlags(lines, TextDecoration::kUnderline);
bool has_overline = EnumHasFlags(lines, TextDecoration::kOverline);
if (flip_underline_and_overline)
std::swap(has_underline, has_overline);
decoration_info.SetDecorationIndex(applied_decoration_index);
float resolved_thickness = decoration_info.ResolvedThickness();
context.SetStrokeThickness(resolved_thickness);
if (has_underline && decoration_info.FontData()) {
const int paint_underline_offset =
decoration_offset.ComputeUnderlineOffset(
underline_position, decoration_info.Style().ComputedFontSize(),
decoration_info.FontData()->GetFontMetrics(),
decoration.UnderlineOffset(), resolved_thickness);
decoration_info.SetPerLineData(
TextDecoration::kUnderline, paint_underline_offset,
DoubleOffsetFromThickness(resolved_thickness), 1);
PaintDecorationUnderOrOverLine(context, decoration_info,
TextDecoration::kUnderline);
}
if (has_overline && decoration_info.FontData()) {
FontVerticalPositionType position =
flip_underline_and_overline ? FontVerticalPositionType::TopOfEmHeight
: FontVerticalPositionType::TextTop;
const int paint_overline_offset =
decoration_offset.ComputeUnderlineOffsetForUnder(
decoration_info.Style().TextUnderlineOffset(),
decoration_info.Style().ComputedFontSize(), resolved_thickness,
position);
decoration_info.SetPerLineData(
TextDecoration::kOverline, paint_overline_offset,
-DoubleOffsetFromThickness(resolved_thickness), 1);
PaintDecorationUnderOrOverLine(context, decoration_info,
TextDecoration::kOverline);
}
// We could instead build a vector of the TextDecoration instances needing
// line-through but this is a rare case so better to avoid vector overhead.
*has_line_through_decoration |=
EnumHasFlags(lines, TextDecoration::kLineThrough);
}
// Restore rotation as needed.
if (has_combined_text_)
context.ConcatCTM(Rotation(text_frame_rect_, kCounterclockwise));
}
void TextPainterBase::PaintDecorationsOnlyLineThrough(
TextDecorationInfo& decoration_info,
const PaintInfo& paint_info,
const Vector<AppliedTextDecoration>& decorations,
const TextPaintStyle& text_style) {
GraphicsContext& context = paint_info.context;
GraphicsContextStateSaver state_saver(context);
UpdateGraphicsContext(context, text_style, horizontal_, state_saver);
if (has_combined_text_)
context.ConcatCTM(Rotation(text_frame_rect_, kClockwise));
for (size_t applied_decoration_index = 0;
applied_decoration_index < decorations.size();
++applied_decoration_index) {
const AppliedTextDecoration& decoration =
decorations[applied_decoration_index];
TextDecoration lines = decoration.Lines();
if (EnumHasFlags(lines, TextDecoration::kLineThrough)) {
decoration_info.SetDecorationIndex(applied_decoration_index);
float resolved_thickness = decoration_info.ResolvedThickness();
context.SetStrokeThickness(resolved_thickness);
// For increased line thickness, the line-through decoration needs to grow
// in both directions from its origin, subtract half the thickness to keep
// it centered at the same origin.
const float line_through_offset =
2 * decoration_info.Baseline() / 3 - resolved_thickness / 2;
// Floor double_offset in order to avoid double-line gap to appear
// of different size depending on position where the double line
// is drawn because of rounding downstream in
// GraphicsContext::DrawLineForText.
decoration_info.SetPerLineData(
TextDecoration::kLineThrough, line_through_offset,
floorf(DoubleOffsetFromThickness(resolved_thickness)), 0);
AppliedDecorationPainter decoration_painter(context, decoration_info,
TextDecoration::kLineThrough);
// No skip: ink for line-through,
// compare https://github.com/w3c/csswg-drafts/issues/711
decoration_painter.Paint();
}
}
// Restore rotation as needed.
if (has_combined_text_)
context.ConcatCTM(Rotation(text_frame_rect_, kCounterclockwise));
}
void TextPainterBase::PaintDecorationUnderOrOverLine(
GraphicsContext& context,
TextDecorationInfo& decoration_info,
TextDecoration line) {
AppliedDecorationPainter decoration_painter(context, decoration_info, line);
if (decoration_info.Style().TextDecorationSkipInk() ==
ETextDecorationSkipInk::kAuto) {
FloatRect decoration_bounds = decoration_info.BoundsForLine(line);
ClipDecorationsStripe(
decoration_info.InkSkipClipUpper(decoration_bounds.Y()),
decoration_bounds.Height(),
std::min(decoration_info.ResolvedThickness(),
kDecorationClipMaxDilation));
}
decoration_painter.Paint();
}
} // namespace blink
|
/*************************************************************************/
/* visual_shader.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "visual_shader.h"
#include "core/vmap.h"
#include "servers/rendering/shader_types.h"
#include "visual_shader_nodes.h"
bool VisualShaderNode::is_simple_decl() const {
return simple_decl;
}
void VisualShaderNode::set_output_port_for_preview(int p_index) {
port_preview = p_index;
}
int VisualShaderNode::get_output_port_for_preview() const {
return port_preview;
}
void VisualShaderNode::set_input_port_default_value(int p_port, const Variant &p_value) {
default_input_values[p_port] = p_value;
emit_changed();
}
Variant VisualShaderNode::get_input_port_default_value(int p_port) const {
if (default_input_values.has(p_port)) {
return default_input_values[p_port];
}
return Variant();
}
bool VisualShaderNode::is_port_separator(int p_index) const {
return false;
}
Vector<VisualShader::DefaultTextureParam> VisualShaderNode::get_default_texture_parameters(VisualShader::Type p_type, int p_id) const {
return Vector<VisualShader::DefaultTextureParam>();
}
String VisualShaderNode::generate_global(Shader::Mode p_mode, VisualShader::Type p_type, int p_id) const {
return String();
}
String VisualShaderNode::generate_global_per_node(Shader::Mode p_mode, VisualShader::Type p_type, int p_id) const {
return String();
}
String VisualShaderNode::generate_global_per_func(Shader::Mode p_mode, VisualShader::Type p_type, int p_id) const {
return String();
}
Vector<StringName> VisualShaderNode::get_editable_properties() const {
return Vector<StringName>();
}
Array VisualShaderNode::get_default_input_values() const {
Array ret;
for (Map<int, Variant>::Element *E = default_input_values.front(); E; E = E->next()) {
ret.push_back(E->key());
ret.push_back(E->get());
}
return ret;
}
void VisualShaderNode::set_default_input_values(const Array &p_values) {
if (p_values.size() % 2 == 0) {
for (int i = 0; i < p_values.size(); i += 2) {
default_input_values[p_values[i + 0]] = p_values[i + 1];
}
}
emit_changed();
}
String VisualShaderNode::get_warning(Shader::Mode p_mode, VisualShader::Type p_type) const {
return String();
}
String VisualShaderNode::get_input_port_default_hint(int p_port) const {
return "";
}
void VisualShaderNode::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_output_port_for_preview", "port"), &VisualShaderNode::set_output_port_for_preview);
ClassDB::bind_method(D_METHOD("get_output_port_for_preview"), &VisualShaderNode::get_output_port_for_preview);
ClassDB::bind_method(D_METHOD("set_input_port_default_value", "port", "value"), &VisualShaderNode::set_input_port_default_value);
ClassDB::bind_method(D_METHOD("get_input_port_default_value", "port"), &VisualShaderNode::get_input_port_default_value);
ClassDB::bind_method(D_METHOD("set_default_input_values", "values"), &VisualShaderNode::set_default_input_values);
ClassDB::bind_method(D_METHOD("get_default_input_values"), &VisualShaderNode::get_default_input_values);
ADD_PROPERTY(PropertyInfo(Variant::INT, "output_port_for_preview"), "set_output_port_for_preview", "get_output_port_for_preview");
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "default_input_values", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR | PROPERTY_USAGE_INTERNAL), "set_default_input_values", "get_default_input_values");
ADD_SIGNAL(MethodInfo("editor_refresh_request"));
BIND_ENUM_CONSTANT(PORT_TYPE_SCALAR);
BIND_ENUM_CONSTANT(PORT_TYPE_SCALAR_INT);
BIND_ENUM_CONSTANT(PORT_TYPE_VECTOR);
BIND_ENUM_CONSTANT(PORT_TYPE_BOOLEAN);
BIND_ENUM_CONSTANT(PORT_TYPE_TRANSFORM);
BIND_ENUM_CONSTANT(PORT_TYPE_SAMPLER);
BIND_ENUM_CONSTANT(PORT_TYPE_MAX);
}
VisualShaderNode::VisualShaderNode() {
port_preview = -1;
simple_decl = true;
}
/////////////////////////////////////////////////////////
void VisualShaderNodeCustom::update_ports() {
ERR_FAIL_COND(!get_script_instance());
input_ports.clear();
if (get_script_instance()->has_method("_get_input_port_count")) {
int input_port_count = (int)get_script_instance()->call("_get_input_port_count");
bool has_name = get_script_instance()->has_method("_get_input_port_name");
bool has_type = get_script_instance()->has_method("_get_input_port_type");
for (int i = 0; i < input_port_count; i++) {
Port port;
if (has_name) {
port.name = (String)get_script_instance()->call("_get_input_port_name", i);
} else {
port.name = "in" + itos(i);
}
if (has_type) {
port.type = (int)get_script_instance()->call("_get_input_port_type", i);
} else {
port.type = (int)PortType::PORT_TYPE_SCALAR;
}
input_ports.push_back(port);
}
}
output_ports.clear();
if (get_script_instance()->has_method("_get_output_port_count")) {
int output_port_count = (int)get_script_instance()->call("_get_output_port_count");
bool has_name = get_script_instance()->has_method("_get_output_port_name");
bool has_type = get_script_instance()->has_method("_get_output_port_type");
for (int i = 0; i < output_port_count; i++) {
Port port;
if (has_name) {
port.name = (String)get_script_instance()->call("_get_output_port_name", i);
} else {
port.name = "out" + itos(i);
}
if (has_type) {
port.type = (int)get_script_instance()->call("_get_output_port_type", i);
} else {
port.type = (int)PortType::PORT_TYPE_SCALAR;
}
output_ports.push_back(port);
}
}
}
String VisualShaderNodeCustom::get_caption() const {
ERR_FAIL_COND_V(!get_script_instance(), "");
if (get_script_instance()->has_method("_get_name")) {
return (String)get_script_instance()->call("_get_name");
}
return "Unnamed";
}
int VisualShaderNodeCustom::get_input_port_count() const {
return input_ports.size();
}
VisualShaderNodeCustom::PortType VisualShaderNodeCustom::get_input_port_type(int p_port) const {
ERR_FAIL_INDEX_V(p_port, input_ports.size(), PORT_TYPE_SCALAR);
return (PortType)input_ports[p_port].type;
}
String VisualShaderNodeCustom::get_input_port_name(int p_port) const {
ERR_FAIL_INDEX_V(p_port, input_ports.size(), "");
return input_ports[p_port].name;
}
int VisualShaderNodeCustom::get_output_port_count() const {
return output_ports.size();
}
VisualShaderNodeCustom::PortType VisualShaderNodeCustom::get_output_port_type(int p_port) const {
ERR_FAIL_INDEX_V(p_port, output_ports.size(), PORT_TYPE_SCALAR);
return (PortType)output_ports[p_port].type;
}
String VisualShaderNodeCustom::get_output_port_name(int p_port) const {
ERR_FAIL_INDEX_V(p_port, output_ports.size(), "");
return output_ports[p_port].name;
}
String VisualShaderNodeCustom::generate_code(Shader::Mode p_mode, VisualShader::Type p_type, int p_id, const String *p_input_vars, const String *p_output_vars, bool p_for_preview) const {
ERR_FAIL_COND_V(!get_script_instance(), "");
ERR_FAIL_COND_V(!get_script_instance()->has_method("_get_code"), "");
Array input_vars;
for (int i = 0; i < get_input_port_count(); i++) {
input_vars.push_back(p_input_vars[i]);
}
Array output_vars;
for (int i = 0; i < get_output_port_count(); i++) {
output_vars.push_back(p_output_vars[i]);
}
String code = "\t{\n";
String _code = (String)get_script_instance()->call("_get_code", input_vars, output_vars, (int)p_mode, (int)p_type);
bool nend = _code.ends_with("\n");
_code = _code.insert(0, "\t\t");
_code = _code.replace("\n", "\n\t\t");
code += _code;
if (!nend) {
code += "\n\t}";
} else {
code.remove(code.size() - 1);
code += "}";
}
code += "\n";
return code;
}
String VisualShaderNodeCustom::generate_global_per_node(Shader::Mode p_mode, VisualShader::Type p_type, int p_id) const {
ERR_FAIL_COND_V(!get_script_instance(), "");
if (get_script_instance()->has_method("_get_global_code")) {
String code = "// " + get_caption() + "\n";
code += (String)get_script_instance()->call("_get_global_code", (int)p_mode);
code += "\n";
return code;
}
return "";
}
void VisualShaderNodeCustom::_bind_methods() {
BIND_VMETHOD(MethodInfo(Variant::STRING, "_get_name"));
BIND_VMETHOD(MethodInfo(Variant::STRING, "_get_description"));
BIND_VMETHOD(MethodInfo(Variant::STRING, "_get_category"));
BIND_VMETHOD(MethodInfo(Variant::INT, "_get_return_icon_type"));
BIND_VMETHOD(MethodInfo(Variant::INT, "_get_input_port_count"));
BIND_VMETHOD(MethodInfo(Variant::INT, "_get_input_port_type", PropertyInfo(Variant::INT, "port")));
BIND_VMETHOD(MethodInfo(Variant::STRING_NAME, "_get_input_port_name", PropertyInfo(Variant::INT, "port")));
BIND_VMETHOD(MethodInfo(Variant::INT, "_get_output_port_count"));
BIND_VMETHOD(MethodInfo(Variant::INT, "_get_output_port_type", PropertyInfo(Variant::INT, "port")));
BIND_VMETHOD(MethodInfo(Variant::STRING_NAME, "_get_output_port_name", PropertyInfo(Variant::INT, "port")));
BIND_VMETHOD(MethodInfo(Variant::STRING, "_get_code", PropertyInfo(Variant::ARRAY, "input_vars"), PropertyInfo(Variant::ARRAY, "output_vars"), PropertyInfo(Variant::INT, "mode"), PropertyInfo(Variant::INT, "type")));
BIND_VMETHOD(MethodInfo(Variant::STRING, "_get_global_code", PropertyInfo(Variant::INT, "mode")));
BIND_VMETHOD(MethodInfo(Variant::BOOL, "_is_highend"));
}
VisualShaderNodeCustom::VisualShaderNodeCustom() {
simple_decl = false;
}
/////////////////////////////////////////////////////////
void VisualShader::set_version(const String &p_version) {
version = p_version;
}
String VisualShader::get_version() const {
return version;
}
void VisualShader::update_version(const String &p_new_version) {
if (version == "") {
for (int i = 0; i < TYPE_MAX; i++) {
for (Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
Ref<VisualShaderNodeExpression> expression = Object::cast_to<VisualShaderNodeExpression>(E->get().node.ptr());
if (expression.is_valid()) {
for (int j = 0; j < expression->get_input_port_count(); j++) {
int type = expression->get_input_port_type(j);
if (type > 0) { // + PORT_TYPE_SCALAR_INT
type += 1;
}
expression->set_input_port_type(j, type);
}
for (int j = 0; j < expression->get_output_port_count(); j++) {
int type = expression->get_output_port_type(j);
if (type > 0) { // + PORT_TYPE_SCALAR_INT
type += 1;
}
expression->set_output_port_type(j, type);
}
}
Ref<VisualShaderNodeCompare> compare = Object::cast_to<VisualShaderNodeCompare>(E->get().node.ptr());
if (compare.is_valid()) {
int ctype = int(compare->get_comparison_type());
if (int(ctype) > 0) { // + PORT_TYPE_SCALAR_INT
ctype += 1;
}
compare->set_comparison_type(VisualShaderNodeCompare::ComparisonType(ctype));
}
}
}
}
set_version(p_new_version);
}
void VisualShader::add_node(Type p_type, const Ref<VisualShaderNode> &p_node, const Vector2 &p_position, int p_id) {
ERR_FAIL_COND(p_node.is_null());
ERR_FAIL_COND(p_id < 2);
ERR_FAIL_INDEX(p_type, TYPE_MAX);
Graph *g = &graph[p_type];
ERR_FAIL_COND(g->nodes.has(p_id));
Node n;
n.node = p_node;
n.position = p_position;
Ref<VisualShaderNodeUniform> uniform = n.node;
if (uniform.is_valid()) {
String valid_name = validate_uniform_name(uniform->get_uniform_name(), uniform);
uniform->set_uniform_name(valid_name);
}
Ref<VisualShaderNodeInput> input = n.node;
if (input.is_valid()) {
input->shader_mode = shader_mode;
input->shader_type = p_type;
input->connect("input_type_changed", callable_mp(this, &VisualShader::_input_type_changed), varray(p_type, p_id));
}
n.node->connect("changed", callable_mp(this, &VisualShader::_queue_update));
Ref<VisualShaderNodeCustom> custom = n.node;
if (custom.is_valid()) {
custom->update_ports();
}
g->nodes[p_id] = n;
_queue_update();
}
void VisualShader::set_node_position(Type p_type, int p_id, const Vector2 &p_position) {
ERR_FAIL_INDEX(p_type, TYPE_MAX);
Graph *g = &graph[p_type];
ERR_FAIL_COND(!g->nodes.has(p_id));
g->nodes[p_id].position = p_position;
}
Vector2 VisualShader::get_node_position(Type p_type, int p_id) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, Vector2());
const Graph *g = &graph[p_type];
ERR_FAIL_COND_V(!g->nodes.has(p_id), Vector2());
return g->nodes[p_id].position;
}
Ref<VisualShaderNode> VisualShader::get_node(Type p_type, int p_id) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, Ref<VisualShaderNode>());
const Graph *g = &graph[p_type];
ERR_FAIL_COND_V(!g->nodes.has(p_id), Ref<VisualShaderNode>());
return g->nodes[p_id].node;
}
Vector<int> VisualShader::get_node_list(Type p_type) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, Vector<int>());
const Graph *g = &graph[p_type];
Vector<int> ret;
for (Map<int, Node>::Element *E = g->nodes.front(); E; E = E->next()) {
ret.push_back(E->key());
}
return ret;
}
int VisualShader::get_valid_node_id(Type p_type) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, NODE_ID_INVALID);
const Graph *g = &graph[p_type];
return g->nodes.size() ? MAX(2, g->nodes.back()->key() + 1) : 2;
}
int VisualShader::find_node_id(Type p_type, const Ref<VisualShaderNode> &p_node) const {
for (const Map<int, Node>::Element *E = graph[p_type].nodes.front(); E; E = E->next()) {
if (E->get().node == p_node)
return E->key();
}
return NODE_ID_INVALID;
}
void VisualShader::remove_node(Type p_type, int p_id) {
ERR_FAIL_INDEX(p_type, TYPE_MAX);
ERR_FAIL_COND(p_id < 2);
Graph *g = &graph[p_type];
ERR_FAIL_COND(!g->nodes.has(p_id));
Ref<VisualShaderNodeInput> input = g->nodes[p_id].node;
if (input.is_valid()) {
input->disconnect("input_type_changed", callable_mp(this, &VisualShader::_input_type_changed));
}
g->nodes[p_id].node->disconnect("changed", callable_mp(this, &VisualShader::_queue_update));
g->nodes.erase(p_id);
for (List<Connection>::Element *E = g->connections.front(); E;) {
List<Connection>::Element *N = E->next();
if (E->get().from_node == p_id || E->get().to_node == p_id) {
g->connections.erase(E);
if (E->get().from_node == p_id) {
g->nodes[E->get().to_node].prev_connected_nodes.erase(p_id);
}
}
E = N;
}
_queue_update();
}
bool VisualShader::is_node_connection(Type p_type, int p_from_node, int p_from_port, int p_to_node, int p_to_port) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, false);
const Graph *g = &graph[p_type];
for (const List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
if (E->get().from_node == p_from_node && E->get().from_port == p_from_port && E->get().to_node == p_to_node && E->get().to_port == p_to_port) {
return true;
}
}
return false;
}
bool VisualShader::is_nodes_connected_relatively(const Graph *p_graph, int p_node, int p_target) const {
bool result = false;
const VisualShader::Node &node = p_graph->nodes[p_node];
for (const List<int>::Element *E = node.prev_connected_nodes.front(); E; E = E->next()) {
if (E->get() == p_target) {
return true;
}
result = is_nodes_connected_relatively(p_graph, E->get(), p_target);
if (result) {
break;
}
}
return result;
}
bool VisualShader::can_connect_nodes(Type p_type, int p_from_node, int p_from_port, int p_to_node, int p_to_port) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, false);
const Graph *g = &graph[p_type];
if (!g->nodes.has(p_from_node))
return false;
if (p_from_node == p_to_node)
return false;
if (p_from_port < 0 || p_from_port >= g->nodes[p_from_node].node->get_output_port_count())
return false;
if (!g->nodes.has(p_to_node))
return false;
if (p_to_port < 0 || p_to_port >= g->nodes[p_to_node].node->get_input_port_count())
return false;
VisualShaderNode::PortType from_port_type = g->nodes[p_from_node].node->get_output_port_type(p_from_port);
VisualShaderNode::PortType to_port_type = g->nodes[p_to_node].node->get_input_port_type(p_to_port);
if (!is_port_types_compatible(from_port_type, to_port_type)) {
return false;
}
for (const List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
if (E->get().from_node == p_from_node && E->get().from_port == p_from_port && E->get().to_node == p_to_node && E->get().to_port == p_to_port) {
return false;
}
}
if (is_nodes_connected_relatively(g, p_from_node, p_to_node))
return false;
return true;
}
bool VisualShader::is_port_types_compatible(int p_a, int p_b) const {
return MAX(0, p_a - 3) == (MAX(0, p_b - 3));
}
void VisualShader::connect_nodes_forced(Type p_type, int p_from_node, int p_from_port, int p_to_node, int p_to_port) {
ERR_FAIL_INDEX(p_type, TYPE_MAX);
Graph *g = &graph[p_type];
Connection c;
c.from_node = p_from_node;
c.from_port = p_from_port;
c.to_node = p_to_node;
c.to_port = p_to_port;
g->connections.push_back(c);
g->nodes[p_to_node].prev_connected_nodes.push_back(p_from_node);
_queue_update();
}
Error VisualShader::connect_nodes(Type p_type, int p_from_node, int p_from_port, int p_to_node, int p_to_port) {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, ERR_CANT_CONNECT);
Graph *g = &graph[p_type];
ERR_FAIL_COND_V(!g->nodes.has(p_from_node), ERR_INVALID_PARAMETER);
ERR_FAIL_INDEX_V(p_from_port, g->nodes[p_from_node].node->get_output_port_count(), ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(!g->nodes.has(p_to_node), ERR_INVALID_PARAMETER);
ERR_FAIL_INDEX_V(p_to_port, g->nodes[p_to_node].node->get_input_port_count(), ERR_INVALID_PARAMETER);
VisualShaderNode::PortType from_port_type = g->nodes[p_from_node].node->get_output_port_type(p_from_port);
VisualShaderNode::PortType to_port_type = g->nodes[p_to_node].node->get_input_port_type(p_to_port);
ERR_FAIL_COND_V_MSG(!is_port_types_compatible(from_port_type, to_port_type), ERR_INVALID_PARAMETER, "Incompatible port types (scalar/vec/bool) with transform.");
for (List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
if (E->get().from_node == p_from_node && E->get().from_port == p_from_port && E->get().to_node == p_to_node && E->get().to_port == p_to_port) {
ERR_FAIL_V(ERR_ALREADY_EXISTS);
}
}
Connection c;
c.from_node = p_from_node;
c.from_port = p_from_port;
c.to_node = p_to_node;
c.to_port = p_to_port;
g->connections.push_back(c);
g->nodes[p_to_node].prev_connected_nodes.push_back(p_from_node);
_queue_update();
return OK;
}
void VisualShader::disconnect_nodes(Type p_type, int p_from_node, int p_from_port, int p_to_node, int p_to_port) {
ERR_FAIL_INDEX(p_type, TYPE_MAX);
Graph *g = &graph[p_type];
for (List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
if (E->get().from_node == p_from_node && E->get().from_port == p_from_port && E->get().to_node == p_to_node && E->get().to_port == p_to_port) {
g->connections.erase(E);
g->nodes[p_to_node].prev_connected_nodes.erase(p_from_node);
_queue_update();
return;
}
}
}
Array VisualShader::_get_node_connections(Type p_type) const {
ERR_FAIL_INDEX_V(p_type, TYPE_MAX, Array());
const Graph *g = &graph[p_type];
Array ret;
for (const List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
Dictionary d;
d["from_node"] = E->get().from_node;
d["from_port"] = E->get().from_port;
d["to_node"] = E->get().to_node;
d["to_port"] = E->get().to_port;
ret.push_back(d);
}
return ret;
}
void VisualShader::get_node_connections(Type p_type, List<Connection> *r_connections) const {
ERR_FAIL_INDEX(p_type, TYPE_MAX);
const Graph *g = &graph[p_type];
for (const List<Connection>::Element *E = g->connections.front(); E; E = E->next()) {
r_connections->push_back(E->get());
}
}
void VisualShader::set_mode(Mode p_mode) {
if (shader_mode == p_mode) {
return;
}
//erase input/output connections
modes.clear();
flags.clear();
shader_mode = p_mode;
for (int i = 0; i < TYPE_MAX; i++) {
for (Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
Ref<VisualShaderNodeInput> input = E->get().node;
if (input.is_valid()) {
input->shader_mode = shader_mode;
//input->input_index = 0;
}
}
Ref<VisualShaderNodeOutput> output = graph[i].nodes[NODE_ID_OUTPUT].node;
output->shader_mode = shader_mode;
// clear connections since they are no longer valid
for (List<Connection>::Element *E = graph[i].connections.front(); E;) {
bool keep = true;
List<Connection>::Element *N = E->next();
int from = E->get().from_node;
int to = E->get().to_node;
if (!graph[i].nodes.has(from)) {
keep = false;
} else {
Ref<VisualShaderNode> from_node = graph[i].nodes[from].node;
if (from_node->is_class("VisualShaderNodeOutput") || from_node->is_class("VisualShaderNodeInput")) {
keep = false;
}
}
if (!graph[i].nodes.has(to)) {
keep = false;
} else {
Ref<VisualShaderNode> to_node = graph[i].nodes[to].node;
if (to_node->is_class("VisualShaderNodeOutput") || to_node->is_class("VisualShaderNodeInput")) {
keep = false;
}
}
if (!keep) {
graph[i].connections.erase(E);
}
E = N;
}
}
_queue_update();
_change_notify();
}
void VisualShader::set_graph_offset(const Vector2 &p_offset) {
graph_offset = p_offset;
}
Vector2 VisualShader::get_graph_offset() const {
return graph_offset;
}
Shader::Mode VisualShader::get_mode() const {
return shader_mode;
}
bool VisualShader::is_text_shader() const {
return false;
}
String VisualShader::generate_preview_shader(Type p_type, int p_node, int p_port, Vector<DefaultTextureParam> &default_tex_params) const {
Ref<VisualShaderNode> node = get_node(p_type, p_node);
ERR_FAIL_COND_V(!node.is_valid(), String());
ERR_FAIL_COND_V(p_port < 0 || p_port >= node->get_output_port_count(), String());
ERR_FAIL_COND_V(node->get_output_port_type(p_port) == VisualShaderNode::PORT_TYPE_TRANSFORM, String());
StringBuilder global_code;
StringBuilder global_code_per_node;
Map<Type, StringBuilder> global_code_per_func;
StringBuilder code;
Set<StringName> classes;
global_code += String() + "shader_type canvas_item;\n";
String global_expressions;
for (int i = 0, index = 0; i < TYPE_MAX; i++) {
for (Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
Ref<VisualShaderNodeGlobalExpression> global_expression = Object::cast_to<VisualShaderNodeGlobalExpression>(E->get().node.ptr());
if (global_expression.is_valid()) {
String expr = "";
expr += "// " + global_expression->get_caption() + ":" + itos(index++) + "\n";
expr += global_expression->generate_global(get_mode(), Type(i), -1);
expr = expr.replace("\n", "\n\t");
expr += "\n";
global_expressions += expr;
}
}
}
global_code += "\n";
global_code += global_expressions;
//make it faster to go around through shader
VMap<ConnectionKey, const List<Connection>::Element *> input_connections;
VMap<ConnectionKey, const List<Connection>::Element *> output_connections;
for (const List<Connection>::Element *E = graph[p_type].connections.front(); E; E = E->next()) {
ConnectionKey from_key;
from_key.node = E->get().from_node;
from_key.port = E->get().from_port;
output_connections.insert(from_key, E);
ConnectionKey to_key;
to_key.node = E->get().to_node;
to_key.port = E->get().to_port;
input_connections.insert(to_key, E);
}
code += "\nvoid fragment() {\n";
Set<int> processed;
Error err = _write_node(p_type, global_code, global_code_per_node, global_code_per_func, code, default_tex_params, input_connections, output_connections, p_node, processed, true, classes);
ERR_FAIL_COND_V(err != OK, String());
if (node->get_output_port_type(p_port) == VisualShaderNode::PORT_TYPE_SCALAR) {
code += "\tCOLOR.rgb = vec3(n_out" + itos(p_node) + "p" + itos(p_port) + " );\n";
} else if (node->get_output_port_type(p_port) == VisualShaderNode::PORT_TYPE_SCALAR_INT) {
code += "\tCOLOR.rgb = vec3(float(n_out" + itos(p_node) + "p" + itos(p_port) + "));\n";
} else if (node->get_output_port_type(p_port) == VisualShaderNode::PORT_TYPE_BOOLEAN) {
code += "\tCOLOR.rgb = vec3(n_out" + itos(p_node) + "p" + itos(p_port) + " ? 1.0 : 0.0);\n";
} else {
code += "\tCOLOR.rgb = n_out" + itos(p_node) + "p" + itos(p_port) + ";\n";
}
code += "}\n";
//set code secretly
global_code += "\n\n";
String final_code = global_code;
final_code += global_code_per_node;
final_code += code;
return final_code;
}
#define IS_INITIAL_CHAR(m_d) (((m_d) >= 'a' && (m_d) <= 'z') || ((m_d) >= 'A' && (m_d) <= 'Z'))
#define IS_SYMBOL_CHAR(m_d) (((m_d) >= 'a' && (m_d) <= 'z') || ((m_d) >= 'A' && (m_d) <= 'Z') || ((m_d) >= '0' && (m_d) <= '9') || (m_d) == '_')
String VisualShader::validate_port_name(const String &p_name, const List<String> &p_input_ports, const List<String> &p_output_ports) const {
String name = p_name;
while (name.length() && !IS_INITIAL_CHAR(name[0])) {
name = name.substr(1, name.length() - 1);
}
if (name != String()) {
String valid_name;
for (int i = 0; i < name.length(); i++) {
if (IS_SYMBOL_CHAR(name[i])) {
valid_name += String::chr(name[i]);
} else if (name[i] == ' ') {
valid_name += "_";
}
}
name = valid_name;
}
String valid_name = name;
bool is_equal = false;
for (int i = 0; i < p_input_ports.size(); i++) {
if (name == p_input_ports[i]) {
is_equal = true;
break;
}
}
if (!is_equal) {
for (int i = 0; i < p_output_ports.size(); i++) {
if (name == p_output_ports[i]) {
is_equal = true;
break;
}
}
}
if (is_equal) {
name = "";
}
return name;
}
String VisualShader::validate_uniform_name(const String &p_name, const Ref<VisualShaderNodeUniform> &p_uniform) const {
String name = p_name; //validate name first
while (name.length() && !IS_INITIAL_CHAR(name[0])) {
name = name.substr(1, name.length() - 1);
}
if (name != String()) {
String valid_name;
for (int i = 0; i < name.length(); i++) {
if (IS_SYMBOL_CHAR(name[i])) {
valid_name += String::chr(name[i]);
} else if (name[i] == ' ') {
valid_name += "_";
}
}
name = valid_name;
}
if (name == String()) {
name = p_uniform->get_caption();
}
int attempt = 1;
while (true) {
bool exists = false;
for (int i = 0; i < TYPE_MAX; i++) {
for (const Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
Ref<VisualShaderNodeUniform> node = E->get().node;
if (node == p_uniform) { //do not test on self
continue;
}
if (node.is_valid() && node->get_uniform_name() == name) {
exists = true;
break;
}
}
if (exists) {
break;
}
}
if (exists) {
//remove numbers, put new and try again
attempt++;
while (name.length() && name[name.length() - 1] >= '0' && name[name.length() - 1] <= '9') {
name = name.substr(0, name.length() - 1);
}
ERR_FAIL_COND_V(name == String(), String());
name += itos(attempt);
} else {
break;
}
}
return name;
}
VisualShader::RenderModeEnums VisualShader::render_mode_enums[] = {
{ Shader::MODE_SPATIAL, "blend" },
{ Shader::MODE_SPATIAL, "depth_draw" },
{ Shader::MODE_SPATIAL, "cull" },
{ Shader::MODE_SPATIAL, "diffuse" },
{ Shader::MODE_SPATIAL, "specular" },
{ Shader::MODE_CANVAS_ITEM, "blend" },
{ Shader::MODE_CANVAS_ITEM, nullptr }
};
static const char *type_string[VisualShader::TYPE_MAX] = {
"vertex",
"fragment",
"light"
};
bool VisualShader::_set(const StringName &p_name, const Variant &p_value) {
String name = p_name;
if (name == "mode") {
set_mode(Shader::Mode(int(p_value)));
return true;
} else if (name.begins_with("flags/")) {
StringName flag = name.get_slicec('/', 1);
bool enable = p_value;
if (enable) {
flags.insert(flag);
} else {
flags.erase(flag);
}
_queue_update();
return true;
} else if (name.begins_with("modes/")) {
String mode = name.get_slicec('/', 1);
int value = p_value;
if (value == 0) {
modes.erase(mode); //means it's default anyway, so don't store it
} else {
modes[mode] = value;
}
_queue_update();
return true;
} else if (name.begins_with("nodes/")) {
String typestr = name.get_slicec('/', 1);
Type type = TYPE_VERTEX;
for (int i = 0; i < TYPE_MAX; i++) {
if (typestr == type_string[i]) {
type = Type(i);
break;
}
}
String index = name.get_slicec('/', 2);
if (index == "connections") {
Vector<int> conns = p_value;
if (conns.size() % 4 == 0) {
for (int i = 0; i < conns.size(); i += 4) {
connect_nodes_forced(type, conns[i + 0], conns[i + 1], conns[i + 2], conns[i + 3]);
}
}
return true;
}
int id = index.to_int();
String what = name.get_slicec('/', 3);
if (what == "node") {
add_node(type, p_value, Vector2(), id);
return true;
} else if (what == "position") {
set_node_position(type, id, p_value);
return true;
} else if (what == "size") {
((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->set_size(p_value);
return true;
} else if (what == "input_ports") {
((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->set_inputs(p_value);
return true;
} else if (what == "output_ports") {
((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->set_outputs(p_value);
return true;
} else if (what == "expression") {
((VisualShaderNodeExpression *)get_node(type, id).ptr())->set_expression(p_value);
return true;
}
}
return false;
}
bool VisualShader::_get(const StringName &p_name, Variant &r_ret) const {
String name = p_name;
if (name == "mode") {
r_ret = get_mode();
return true;
} else if (name.begins_with("flags/")) {
StringName flag = name.get_slicec('/', 1);
r_ret = flags.has(flag);
return true;
} else if (name.begins_with("modes/")) {
String mode = name.get_slicec('/', 1);
if (modes.has(mode)) {
r_ret = modes[mode];
} else {
r_ret = 0;
}
return true;
} else if (name.begins_with("nodes/")) {
String typestr = name.get_slicec('/', 1);
Type type = TYPE_VERTEX;
for (int i = 0; i < TYPE_MAX; i++) {
if (typestr == type_string[i]) {
type = Type(i);
break;
}
}
String index = name.get_slicec('/', 2);
if (index == "connections") {
Vector<int> conns;
for (const List<Connection>::Element *E = graph[type].connections.front(); E; E = E->next()) {
conns.push_back(E->get().from_node);
conns.push_back(E->get().from_port);
conns.push_back(E->get().to_node);
conns.push_back(E->get().to_port);
}
r_ret = conns;
return true;
}
int id = index.to_int();
String what = name.get_slicec('/', 3);
if (what == "node") {
r_ret = get_node(type, id);
return true;
} else if (what == "position") {
r_ret = get_node_position(type, id);
return true;
} else if (what == "size") {
r_ret = ((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->get_size();
return true;
} else if (what == "input_ports") {
r_ret = ((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->get_inputs();
return true;
} else if (what == "output_ports") {
r_ret = ((VisualShaderNodeGroupBase *)get_node(type, id).ptr())->get_outputs();
return true;
} else if (what == "expression") {
r_ret = ((VisualShaderNodeExpression *)get_node(type, id).ptr())->get_expression();
return true;
}
}
return false;
}
void VisualShader::_get_property_list(List<PropertyInfo> *p_list) const {
//mode
p_list->push_back(PropertyInfo(Variant::INT, "mode", PROPERTY_HINT_ENUM, "Node3D,CanvasItem,Particles,Sky"));
//render modes
Map<String, String> blend_mode_enums;
Set<String> toggles;
for (int i = 0; i < ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode)).size(); i++) {
String mode = ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode))[i];
int idx = 0;
bool in_enum = false;
while (render_mode_enums[idx].string) {
if (mode.begins_with(render_mode_enums[idx].string)) {
String begin = render_mode_enums[idx].string;
String option = mode.replace_first(begin + "_", "");
if (!blend_mode_enums.has(begin)) {
blend_mode_enums[begin] = option;
} else {
blend_mode_enums[begin] += "," + option;
}
in_enum = true;
break;
}
idx++;
}
if (!in_enum) {
toggles.insert(mode);
}
}
for (Map<String, String>::Element *E = blend_mode_enums.front(); E; E = E->next()) {
p_list->push_back(PropertyInfo(Variant::INT, "modes/" + E->key(), PROPERTY_HINT_ENUM, E->get()));
}
for (Set<String>::Element *E = toggles.front(); E; E = E->next()) {
p_list->push_back(PropertyInfo(Variant::BOOL, "flags/" + E->get()));
}
for (int i = 0; i < TYPE_MAX; i++) {
for (Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
String prop_name = "nodes/";
prop_name += type_string[i];
prop_name += "/" + itos(E->key());
if (E->key() != NODE_ID_OUTPUT) {
p_list->push_back(PropertyInfo(Variant::OBJECT, prop_name + "/node", PROPERTY_HINT_RESOURCE_TYPE, "VisualShaderNode", PROPERTY_USAGE_NOEDITOR | PROPERTY_USAGE_DO_NOT_SHARE_ON_DUPLICATE));
}
p_list->push_back(PropertyInfo(Variant::VECTOR2, prop_name + "/position", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
if (Object::cast_to<VisualShaderNodeGroupBase>(E->get().node.ptr()) != nullptr) {
p_list->push_back(PropertyInfo(Variant::VECTOR2, prop_name + "/size", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
p_list->push_back(PropertyInfo(Variant::STRING, prop_name + "/input_ports", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
p_list->push_back(PropertyInfo(Variant::STRING, prop_name + "/output_ports", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
}
if (Object::cast_to<VisualShaderNodeExpression>(E->get().node.ptr()) != nullptr) {
p_list->push_back(PropertyInfo(Variant::STRING, prop_name + "/expression", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
}
}
p_list->push_back(PropertyInfo(Variant::PACKED_INT32_ARRAY, "nodes/" + String(type_string[i]) + "/connections", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR));
}
}
Error VisualShader::_write_node(Type type, StringBuilder &global_code, StringBuilder &global_code_per_node, Map<Type, StringBuilder> &global_code_per_func, StringBuilder &code, Vector<VisualShader::DefaultTextureParam> &def_tex_params, const VMap<ConnectionKey, const List<Connection>::Element *> &input_connections, const VMap<ConnectionKey, const List<Connection>::Element *> &output_connections, int node, Set<int> &processed, bool for_preview, Set<StringName> &r_classes) const {
const Ref<VisualShaderNode> vsnode = graph[type].nodes[node].node;
//check inputs recursively first
int input_count = vsnode->get_input_port_count();
for (int i = 0; i < input_count; i++) {
ConnectionKey ck;
ck.node = node;
ck.port = i;
if (input_connections.has(ck)) {
int from_node = input_connections[ck]->get().from_node;
if (processed.has(from_node)) {
continue;
}
Error err = _write_node(type, global_code, global_code_per_node, global_code_per_func, code, def_tex_params, input_connections, output_connections, from_node, processed, for_preview, r_classes);
if (err)
return err;
}
}
// then this node
code += "// " + vsnode->get_caption() + ":" + itos(node) + "\n";
Vector<String> input_vars;
input_vars.resize(vsnode->get_input_port_count());
String *inputs = input_vars.ptrw();
for (int i = 0; i < input_count; i++) {
ConnectionKey ck;
ck.node = node;
ck.port = i;
if (input_connections.has(ck)) {
//connected to something, use that output
int from_node = input_connections[ck]->get().from_node;
int from_port = input_connections[ck]->get().from_port;
VisualShaderNode::PortType in_type = vsnode->get_input_port_type(i);
VisualShaderNode::PortType out_type = graph[type].nodes[from_node].node->get_output_port_type(from_port);
String src_var = "n_out" + itos(from_node) + "p" + itos(from_port);
if (in_type == VisualShaderNode::PORT_TYPE_SAMPLER && out_type == VisualShaderNode::PORT_TYPE_SAMPLER) {
VisualShaderNode *ptr = const_cast<VisualShaderNode *>(graph[type].nodes[from_node].node.ptr());
if (ptr->has_method("get_input_real_name")) {
inputs[i] = ptr->call("get_input_real_name");
} else if (ptr->has_method("get_uniform_name")) {
inputs[i] = ptr->call("get_uniform_name");
} else {
inputs[i] = "";
}
} else if (in_type == out_type) {
inputs[i] = src_var;
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR && out_type == VisualShaderNode::PORT_TYPE_VECTOR) {
inputs[i] = "dot(" + src_var + ", vec3(0.333333, 0.333333, 0.333333))";
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR_INT && out_type == VisualShaderNode::PORT_TYPE_VECTOR) {
inputs[i] = "dot(float(" + src_var + "), vec3(0.333333, 0.333333, 0.333333))";
} else if (in_type == VisualShaderNode::PORT_TYPE_VECTOR && out_type == VisualShaderNode::PORT_TYPE_SCALAR) {
inputs[i] = "vec3(" + src_var + ")";
} else if (in_type == VisualShaderNode::PORT_TYPE_VECTOR && out_type == VisualShaderNode::PORT_TYPE_SCALAR_INT) {
inputs[i] = "vec3(float(" + src_var + "))";
} else if (in_type == VisualShaderNode::PORT_TYPE_BOOLEAN && out_type == VisualShaderNode::PORT_TYPE_VECTOR) {
inputs[i] = "all(bvec3(" + src_var + "))";
} else if (in_type == VisualShaderNode::PORT_TYPE_BOOLEAN && out_type == VisualShaderNode::PORT_TYPE_SCALAR) {
inputs[i] = src_var + " > 0.0 ? true : false";
} else if (in_type == VisualShaderNode::PORT_TYPE_BOOLEAN && out_type == VisualShaderNode::PORT_TYPE_SCALAR_INT) {
inputs[i] = src_var + " > 0 ? true : false";
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR && out_type == VisualShaderNode::PORT_TYPE_BOOLEAN) {
inputs[i] = src_var + " ? 1.0 : 0.0";
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR_INT && out_type == VisualShaderNode::PORT_TYPE_BOOLEAN) {
inputs[i] = src_var + " ? 1 : 0";
} else if (in_type == VisualShaderNode::PORT_TYPE_VECTOR && out_type == VisualShaderNode::PORT_TYPE_BOOLEAN) {
inputs[i] = "vec3(" + src_var + " ? 1.0 : 0.0)";
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR && out_type == VisualShaderNode::PORT_TYPE_SCALAR_INT) {
inputs[i] = "float(" + src_var + ")";
} else if (in_type == VisualShaderNode::PORT_TYPE_SCALAR_INT && out_type == VisualShaderNode::PORT_TYPE_SCALAR) {
inputs[i] = "int(" + src_var + ")";
}
} else {
Variant defval = vsnode->get_input_port_default_value(i);
if (defval.get_type() == Variant::FLOAT) {
float val = defval;
inputs[i] = "n_in" + itos(node) + "p" + itos(i);
code += "\tfloat " + inputs[i] + " = " + vformat("%.5f", val) + ";\n";
} else if (defval.get_type() == Variant::INT) {
int val = defval;
inputs[i] = "n_in" + itos(node) + "p" + itos(i);
code += "\tint " + inputs[i] + " = " + itos(val) + ";\n";
} else if (defval.get_type() == Variant::BOOL) {
bool val = defval;
inputs[i] = "n_in" + itos(node) + "p" + itos(i);
code += "\tbool " + inputs[i] + " = " + (val ? "true" : "false") + ";\n";
} else if (defval.get_type() == Variant::VECTOR3) {
Vector3 val = defval;
inputs[i] = "n_in" + itos(node) + "p" + itos(i);
code += "\tvec3 " + inputs[i] + " = " + vformat("vec3(%.5f, %.5f, %.5f);\n", val.x, val.y, val.z);
} else if (defval.get_type() == Variant::TRANSFORM) {
Transform val = defval;
val.basis.transpose();
inputs[i] = "n_in" + itos(node) + "p" + itos(i);
Array values;
for (int j = 0; j < 3; j++) {
values.push_back(val.basis[j].x);
values.push_back(val.basis[j].y);
values.push_back(val.basis[j].z);
}
values.push_back(val.origin.x);
values.push_back(val.origin.y);
values.push_back(val.origin.z);
bool err = false;
code += "\tmat4 " + inputs[i] + " = " + String("mat4(vec4(%.5f, %.5f, %.5f, 0.0), vec4(%.5f, %.5f, %.5f, 0.0), vec4(%.5f, %.5f, %.5f, 0.0), vec4(%.5f, %.5f, %.5f, 1.0));\n").sprintf(values, &err);
} else {
//will go empty, node is expected to know what it is doing at this point and handle it
}
}
}
int output_count = vsnode->get_output_port_count();
Vector<String> output_vars;
output_vars.resize(vsnode->get_output_port_count());
String *outputs = output_vars.ptrw();
if (vsnode->is_simple_decl()) { // less code to generate for some simple_decl nodes
for (int i = 0; i < output_count; i++) {
String var_name = "n_out" + itos(node) + "p" + itos(i);
switch (vsnode->get_output_port_type(i)) {
case VisualShaderNode::PORT_TYPE_SCALAR: outputs[i] = "float " + var_name; break;
case VisualShaderNode::PORT_TYPE_SCALAR_INT: outputs[i] = "int " + var_name; break;
case VisualShaderNode::PORT_TYPE_VECTOR: outputs[i] = "vec3 " + var_name; break;
case VisualShaderNode::PORT_TYPE_BOOLEAN: outputs[i] = "bool " + var_name; break;
case VisualShaderNode::PORT_TYPE_TRANSFORM: outputs[i] = "mat4 " + var_name; break;
default: {
}
}
}
} else {
for (int i = 0; i < output_count; i++) {
outputs[i] = "n_out" + itos(node) + "p" + itos(i);
switch (vsnode->get_output_port_type(i)) {
case VisualShaderNode::PORT_TYPE_SCALAR: code += String() + "\tfloat " + outputs[i] + ";\n"; break;
case VisualShaderNode::PORT_TYPE_SCALAR_INT: code += String() + "\tint " + outputs[i] + ";\n"; break;
case VisualShaderNode::PORT_TYPE_VECTOR: code += String() + "\tvec3 " + outputs[i] + ";\n"; break;
case VisualShaderNode::PORT_TYPE_BOOLEAN: code += String() + "\tbool " + outputs[i] + ";\n"; break;
case VisualShaderNode::PORT_TYPE_TRANSFORM: code += String() + "\tmat4 " + outputs[i] + ";\n"; break;
default: {
}
}
}
}
Vector<VisualShader::DefaultTextureParam> params = vsnode->get_default_texture_parameters(type, node);
for (int i = 0; i < params.size(); i++) {
def_tex_params.push_back(params[i]);
}
Ref<VisualShaderNodeInput> input = vsnode;
bool skip_global = input.is_valid() && for_preview;
if (!skip_global) {
global_code += vsnode->generate_global(get_mode(), type, node);
String class_name = vsnode->get_class_name();
if (class_name == "VisualShaderNodeCustom") {
class_name = vsnode->get_script_instance()->get_script()->get_language()->get_global_class_name(vsnode->get_script_instance()->get_script()->get_path());
}
if (!r_classes.has(class_name)) {
global_code_per_node += vsnode->generate_global_per_node(get_mode(), type, node);
for (int i = 0; i < TYPE_MAX; i++) {
global_code_per_func[Type(i)] += vsnode->generate_global_per_func(get_mode(), Type(i), node);
}
r_classes.insert(class_name);
}
}
code += vsnode->generate_code(get_mode(), type, node, inputs, outputs, for_preview);
code += "\n"; //
processed.insert(node);
return OK;
}
void VisualShader::_update_shader() const {
if (!dirty)
return;
dirty = false;
StringBuilder global_code;
StringBuilder global_code_per_node;
Map<Type, StringBuilder> global_code_per_func;
StringBuilder code;
Vector<VisualShader::DefaultTextureParam> default_tex_params;
Set<StringName> classes;
Map<int, int> insertion_pos;
static const char *shader_mode_str[Shader::MODE_MAX] = { "spatial", "canvas_item", "particles", "sky" };
global_code += String() + "shader_type " + shader_mode_str[shader_mode] + ";\n";
String render_mode;
{
//fill render mode enums
int idx = 0;
while (render_mode_enums[idx].string) {
if (shader_mode == render_mode_enums[idx].mode) {
if (modes.has(render_mode_enums[idx].string)) {
int which = modes[render_mode_enums[idx].string];
int count = 0;
for (int i = 0; i < ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode)).size(); i++) {
String mode = ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode))[i];
if (mode.begins_with(render_mode_enums[idx].string)) {
if (count == which) {
if (render_mode != String()) {
render_mode += ", ";
}
render_mode += mode;
break;
}
count++;
}
}
}
}
idx++;
}
//fill render mode flags
for (int i = 0; i < ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode)).size(); i++) {
String mode = ShaderTypes::get_singleton()->get_modes(RenderingServer::ShaderMode(shader_mode))[i];
if (flags.has(mode)) {
if (render_mode != String()) {
render_mode += ", ";
}
render_mode += mode;
}
}
}
if (render_mode != String()) {
global_code += "render_mode " + render_mode + ";\n\n";
}
static const char *func_name[TYPE_MAX] = { "vertex", "fragment", "light" };
String global_expressions;
for (int i = 0, index = 0; i < TYPE_MAX; i++) {
if (!ShaderTypes::get_singleton()->get_functions(RenderingServer::ShaderMode(shader_mode)).has(func_name[i])) {
continue;
}
for (Map<int, Node>::Element *E = graph[i].nodes.front(); E; E = E->next()) {
Ref<VisualShaderNodeGlobalExpression> global_expression = Object::cast_to<VisualShaderNodeGlobalExpression>(E->get().node.ptr());
if (global_expression.is_valid()) {
String expr = "";
expr += "// " + global_expression->get_caption() + ":" + itos(index++) + "\n";
expr += global_expression->generate_global(get_mode(), Type(i), -1);
expr = expr.replace("\n", "\n\t");
expr += "\n";
global_expressions += expr;
}
}
}
for (int i = 0; i < TYPE_MAX; i++) {
if (!ShaderTypes::get_singleton()->get_functions(RenderingServer::ShaderMode(shader_mode)).has(func_name[i])) {
continue;
}
//make it faster to go around through shader
VMap<ConnectionKey, const List<Connection>::Element *> input_connections;
VMap<ConnectionKey, const List<Connection>::Element *> output_connections;
for (const List<Connection>::Element *E = graph[i].connections.front(); E; E = E->next()) {
ConnectionKey from_key;
from_key.node = E->get().from_node;
from_key.port = E->get().from_port;
output_connections.insert(from_key, E);
ConnectionKey to_key;
to_key.node = E->get().to_node;
to_key.port = E->get().to_port;
input_connections.insert(to_key, E);
}
code += "\nvoid " + String(func_name[i]) + "() {\n";
Set<int> processed;
Error err = _write_node(Type(i), global_code, global_code_per_node, global_code_per_func, code, default_tex_params, input_connections, output_connections, NODE_ID_OUTPUT, processed, false, classes);
ERR_FAIL_COND(err != OK);
insertion_pos.insert(i, code.get_string_length());
code += "}\n";
}
//set code secretly
global_code += "\n\n";
String final_code = global_code;
final_code += global_code_per_node;
final_code += global_expressions;
String tcode = code;
for (int i = 0; i < TYPE_MAX; i++) {
if (!ShaderTypes::get_singleton()->get_functions(RenderingServer::ShaderMode(shader_mode)).has(func_name[i])) {
continue;
}
tcode = tcode.insert(insertion_pos[i], global_code_per_func[Type(i)]);
}
final_code += tcode;
const_cast<VisualShader *>(this)->set_code(final_code);
for (int i = 0; i < default_tex_params.size(); i++) {
const_cast<VisualShader *>(this)->set_default_texture_param(default_tex_params[i].name, default_tex_params[i].param);
}
if (previous_code != final_code) {
const_cast<VisualShader *>(this)->emit_signal("changed");
}
previous_code = final_code;
}
void VisualShader::_queue_update() {
if (dirty) {
return;
}
dirty = true;
call_deferred("_update_shader");
}
void VisualShader::_input_type_changed(Type p_type, int p_id) {
//erase connections using this input, as type changed
Graph *g = &graph[p_type];
for (List<Connection>::Element *E = g->connections.front(); E;) {
List<Connection>::Element *N = E->next();
if (E->get().from_node == p_id) {
g->connections.erase(E);
g->nodes[E->get().to_node].prev_connected_nodes.erase(p_id);
}
E = N;
}
}
void VisualShader::rebuild() {
dirty = true;
_update_shader();
}
void VisualShader::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_mode", "mode"), &VisualShader::set_mode);
ClassDB::bind_method(D_METHOD("add_node", "type", "node", "position", "id"), &VisualShader::add_node);
ClassDB::bind_method(D_METHOD("get_node", "type", "id"), &VisualShader::get_node);
ClassDB::bind_method(D_METHOD("set_node_position", "type", "id", "position"), &VisualShader::set_node_position);
ClassDB::bind_method(D_METHOD("get_node_position", "type", "id"), &VisualShader::get_node_position);
ClassDB::bind_method(D_METHOD("get_node_list", "type"), &VisualShader::get_node_list);
ClassDB::bind_method(D_METHOD("get_valid_node_id", "type"), &VisualShader::get_valid_node_id);
ClassDB::bind_method(D_METHOD("remove_node", "type", "id"), &VisualShader::remove_node);
ClassDB::bind_method(D_METHOD("is_node_connection", "type", "from_node", "from_port", "to_node", "to_port"), &VisualShader::is_node_connection);
ClassDB::bind_method(D_METHOD("can_connect_nodes", "type", "from_node", "from_port", "to_node", "to_port"), &VisualShader::can_connect_nodes);
ClassDB::bind_method(D_METHOD("connect_nodes", "type", "from_node", "from_port", "to_node", "to_port"), &VisualShader::connect_nodes);
ClassDB::bind_method(D_METHOD("disconnect_nodes", "type", "from_node", "from_port", "to_node", "to_port"), &VisualShader::disconnect_nodes);
ClassDB::bind_method(D_METHOD("connect_nodes_forced", "type", "from_node", "from_port", "to_node", "to_port"), &VisualShader::connect_nodes_forced);
ClassDB::bind_method(D_METHOD("get_node_connections", "type"), &VisualShader::_get_node_connections);
ClassDB::bind_method(D_METHOD("set_version", "version"), &VisualShader::set_version);
ClassDB::bind_method(D_METHOD("get_version"), &VisualShader::get_version);
ClassDB::bind_method(D_METHOD("set_graph_offset", "offset"), &VisualShader::set_graph_offset);
ClassDB::bind_method(D_METHOD("get_graph_offset"), &VisualShader::get_graph_offset);
ClassDB::bind_method(D_METHOD("_update_shader"), &VisualShader::_update_shader);
ADD_PROPERTY(PropertyInfo(Variant::VECTOR2, "graph_offset", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR), "set_graph_offset", "get_graph_offset");
ADD_PROPERTY(PropertyInfo(Variant::STRING, "version", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR), "set_version", "get_version");
BIND_ENUM_CONSTANT(TYPE_VERTEX);
BIND_ENUM_CONSTANT(TYPE_FRAGMENT);
BIND_ENUM_CONSTANT(TYPE_LIGHT);
BIND_ENUM_CONSTANT(TYPE_MAX);
BIND_CONSTANT(NODE_ID_INVALID);
BIND_CONSTANT(NODE_ID_OUTPUT);
}
VisualShader::VisualShader() {
shader_mode = Shader::MODE_SPATIAL;
for (int i = 0; i < TYPE_MAX; i++) {
Ref<VisualShaderNodeOutput> output;
output.instance();
output->shader_type = Type(i);
output->shader_mode = shader_mode;
graph[i].nodes[NODE_ID_OUTPUT].node = output;
graph[i].nodes[NODE_ID_OUTPUT].position = Vector2(400, 150);
}
dirty = true;
}
///////////////////////////////////////////////////////////
const VisualShaderNodeInput::Port VisualShaderNodeInput::ports[] = {
// Spatial, Vertex
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "VERTEX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "tangent", "TANGENT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "binormal", "BINORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv2", "vec3(UV2, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "point_size", "POINT_SIZE" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "world", "WORLD_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "modelview", "MODELVIEW_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "camera", "CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_camera", "INV_CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "projection", "PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_projection", "INV_PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "viewport_size", "vec3(VIEWPORT_SIZE, 0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_BOOLEAN, "output_is_srgb", "OUTPUT_IS_SRGB" },
// Spatial, Fragment
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "fragcoord", "FRAGCOORD.xyz" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "VERTEX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "tangent", "TANGENT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "binormal", "BINORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "view", "VIEW" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv2", "vec3(UV2, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "point_coord", "vec3(POINT_COORD, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "side", "float(FRONT_FACING ? 1.0 : 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_TRANSFORM, "world", "WORLD_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_camera", "INV_CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_TRANSFORM, "camera", "CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_TRANSFORM, "projection", "PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_projection", "INV_PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "viewport_size", "vec3(VIEWPORT_SIZE, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "output_is_srgb", "OUTPUT_IS_SRGB" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "front_facing", "FRONT_FACING" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "screen_texture", "SCREEN_TEXTURE" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "depth_texture", "DEPTH_TEXTURE" },
// Spatial, Light
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "fragcoord", "FRAGCOORD.xyz" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "view", "VIEW" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light", "LIGHT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light_color", "LIGHT_COLOR" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "attenuation", "ATTENUATION" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "albedo", "ALBEDO" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "transmission", "TRANSMISSION" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "diffuse", "DIFFUSE_LIGHT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "specular", "SPECULAR_LIGHT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "roughness", "ROUGHNESS" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_TRANSFORM, "world", "WORLD_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_camera", "INV_CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_TRANSFORM, "camera", "CAMERA_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_TRANSFORM, "projection", "PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_TRANSFORM, "inv_projection", "INV_PROJECTION_MATRIX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "viewport_size", "vec3(VIEWPORT_SIZE, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_BOOLEAN, "output_is_srgb", "OUTPUT_IS_SRGB" },
// Canvas Item, Vertex
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "vec3(VERTEX, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV,0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "point_size", "POINT_SIZE" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "texture_pixel_size", "vec3(TEXTURE_PIXEL_SIZE, 1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "world", "WORLD_MATRIX" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "projection", "PROJECTION_MATRIX" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "extra", "EXTRA_MATRIX" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "light_pass", "float(AT_LIGHT_PASS ? 1.0 : 0.0)" },
// Canvas Item, Fragment
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "fragcoord", "FRAGCOORD.xyz" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "texture_pixel_size", "vec3(TEXTURE_PIXEL_SIZE, 1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_pixel_size", "vec3(SCREEN_PIXEL_SIZE, 1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "point_coord", "vec3(POINT_COORD, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "light_pass", "float(AT_LIGHT_PASS ? 1.0 : 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "texture", "TEXTURE" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "normal_texture", "NORMAL_TEXTURE" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "screen_texture", "SCREEN_TEXTURE" },
// Canvas Item, Light
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "fragcoord", "FRAGCOORD.xyz" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light_vec", "vec3(LIGHT_VEC, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "light_height", "LIGHT_HEIGHT" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light_color", "LIGHT_COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light_alpha", "LIGHT_COLOR.a" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light_uv", "vec3(LIGHT_UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "shadow_color", "SHADOW_COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "texture_pixel_size", "vec3(TEXTURE_PIXEL_SIZE, 1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "point_coord", "vec3(POINT_COORD, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SAMPLER, "texture", "TEXTURE" },
// Particles, Vertex
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "velocity", "VELOCITY" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "restart", "float(RESTART ? 1.0 : 0.0)" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "active", "float(ACTIVE ? 1.0 : 0.0)" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "custom", "CUSTOM.rgb" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "custom_alpha", "CUSTOM.a" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "transform", "TRANSFORM" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "delta", "DELTA" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "lifetime", "LIFETIME" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR_INT, "index", "INDEX" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "emission_transform", "EMISSION_TRANSFORM" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
// Sky, Fragment
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "at_cubemap_pass", "AT_CUBEMAP_PASS" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "at_half_res_pass", "AT_HALF_RES_PASS" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "at_quarter_res_pass", "AT_QUARTER_RES_PASS" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "eyedir", "EYEDIR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "half_res_color", "HALF_RES_COLOR.rgb" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "half_res_alpha", "HALF_RES_COLOR.a" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light0_color", "LIGHT0_COLOR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light0_direction", "LIGHT0_DIRECTION" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "light0_enabled", "LIGHT0_ENABLED" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "light0_energy", "LIGHT0_ENERGY" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light1_color", "LIGHT1_COLOR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light1_direction", "LIGHT1_DIRECTION" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "light1_enabled", "LIGHT1_ENABLED" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "light1_energy", "LIGHT1_ENERGY" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light2_color", "LIGHT2_COLOR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light2_direction", "LIGHT2_DIRECTION" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "light2_enabled", "LIGHT2_ENABLED" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "light2_energy", "LIGHT2_ENERGY" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light3_color", "LIGHT3_COLOR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "light3_direction", "LIGHT3_DIRECTION" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_BOOLEAN, "light3_enabled", "LIGHT3_ENABLED" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "light3_energy", "LIGHT3_ENERGY" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "position", "POSITION" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "quarter_res_color", "QUARTER_RES_COLOR.rgb" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "quarter_res_alpha", "QUARTER_RES_COLOR.a" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SAMPLER, "radiance", "RADIANCE" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "sky_coords", "vec3(SKY_COORDS, 0.0)" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_MAX, VisualShader::TYPE_MAX, VisualShaderNode::PORT_TYPE_TRANSFORM, nullptr, nullptr },
};
const VisualShaderNodeInput::Port VisualShaderNodeInput::preview_ports[] = {
// Spatial, Fragment
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "vec3(0.0, 0.0, 1.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "tangent", "vec3(0.0, 1.0, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "binormal", "vec3(1.0, 0.0, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv2", "vec3(UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "vec3(1.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "1.0" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "side", "1.0" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "viewport_size", "vec3(1.0,1.0, 0.0)" },
// Spatial, Light
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "vec3(0.0, 0.0, 1.0)" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "viewport_size", "vec3(1.0, 1.0, 0.0)" },
// Canvas Item, Vertex
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "vec3(VERTEX, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "vec3(1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "1.0" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
// Canvas Item, Fragment
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "vec3(1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "1.0" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
// Canvas Item, Light
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "vec3(UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "vec3(0.0, 0.0, 1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "vec3(1.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "1.0" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "screen_uv", "vec3(SCREEN_UV, 0.0)" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
// Particles, Vertex
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "vec3(1.0)" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "1.0" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "velocity", "vec3(0.0, 0.0, 1.0)" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "time", "TIME" },
{ Shader::MODE_MAX, VisualShader::TYPE_MAX, VisualShaderNode::PORT_TYPE_TRANSFORM, nullptr, nullptr },
};
int VisualShaderNodeInput::get_input_port_count() const {
return 0;
}
VisualShaderNodeInput::PortType VisualShaderNodeInput::get_input_port_type(int p_port) const {
return PORT_TYPE_SCALAR;
}
String VisualShaderNodeInput::get_input_port_name(int p_port) const {
return "";
}
int VisualShaderNodeInput::get_output_port_count() const {
return 1;
}
VisualShaderNodeInput::PortType VisualShaderNodeInput::get_output_port_type(int p_port) const {
return get_input_type_by_name(input_name);
}
String VisualShaderNodeInput::get_output_port_name(int p_port) const {
return "";
}
String VisualShaderNodeInput::get_caption() const {
return "Input";
}
String VisualShaderNodeInput::generate_code(Shader::Mode p_mode, VisualShader::Type p_type, int p_id, const String *p_input_vars, const String *p_output_vars, bool p_for_preview) const {
if (get_output_port_type(0) == PORT_TYPE_SAMPLER) {
return "";
}
if (p_for_preview) {
int idx = 0;
String code;
while (preview_ports[idx].mode != Shader::MODE_MAX) {
if (preview_ports[idx].mode == shader_mode && preview_ports[idx].shader_type == shader_type && preview_ports[idx].name == input_name) {
code = "\t" + p_output_vars[0] + " = " + preview_ports[idx].string + ";\n";
break;
}
idx++;
}
if (code == String()) {
switch (get_output_port_type(0)) {
case PORT_TYPE_SCALAR: {
code = "\t" + p_output_vars[0] + " = 0.0;\n";
} break;
case PORT_TYPE_SCALAR_INT: {
code = "\t" + p_output_vars[0] + " = 0;\n";
} break;
case PORT_TYPE_VECTOR: {
code = "\t" + p_output_vars[0] + " = vec3(0.0);\n";
} break;
case PORT_TYPE_TRANSFORM: {
code = "\t" + p_output_vars[0] + " = mat4( vec4(1.0,0.0,0.0,0.0), vec4(0.0,1.0,0.0,0.0), vec4(0.0,0.0,1.0,0.0), vec4(0.0,0.0,0.0,1.0) );\n";
} break;
case PORT_TYPE_BOOLEAN: {
code = "\t" + p_output_vars[0] + " = false;\n";
} break;
default: //default (none found) is scalar
break;
}
}
return code;
} else {
int idx = 0;
String code;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type && ports[idx].name == input_name) {
code = "\t" + p_output_vars[0] + " = " + ports[idx].string + ";\n";
break;
}
idx++;
}
if (code == String()) {
code = "\t" + p_output_vars[0] + " = 0.0;\n"; //default (none found) is scalar
}
return code;
}
}
void VisualShaderNodeInput::set_input_name(String p_name) {
PortType prev_type = get_input_type_by_name(input_name);
input_name = p_name;
emit_changed();
if (get_input_type_by_name(input_name) != prev_type) {
emit_signal("input_type_changed");
}
}
String VisualShaderNodeInput::get_input_name() const {
return input_name;
}
String VisualShaderNodeInput::get_input_real_name() const {
int idx = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type && ports[idx].name == input_name) {
return String(ports[idx].string);
}
idx++;
}
return "";
}
VisualShaderNodeInput::PortType VisualShaderNodeInput::get_input_type_by_name(String p_name) const {
int idx = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type && ports[idx].name == p_name) {
return ports[idx].type;
}
idx++;
}
return PORT_TYPE_SCALAR;
}
int VisualShaderNodeInput::get_input_index_count() const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
count++;
}
idx++;
}
return count;
}
VisualShaderNodeInput::PortType VisualShaderNodeInput::get_input_index_type(int p_index) const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (count == p_index) {
return ports[idx].type;
}
count++;
}
idx++;
}
return PORT_TYPE_SCALAR;
}
String VisualShaderNodeInput::get_input_index_name(int p_index) const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (count == p_index) {
return ports[idx].name;
}
count++;
}
idx++;
}
return "";
}
void VisualShaderNodeInput::_validate_property(PropertyInfo &property) const {
if (property.name == "input_name") {
String port_list;
int idx = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (port_list != String()) {
port_list += ",";
}
port_list += ports[idx].name;
}
idx++;
}
if (port_list == "") {
port_list = TTR("None");
}
property.hint_string = port_list;
}
}
Vector<StringName> VisualShaderNodeInput::get_editable_properties() const {
Vector<StringName> props;
props.push_back("input_name");
return props;
}
void VisualShaderNodeInput::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_input_name", "name"), &VisualShaderNodeInput::set_input_name);
ClassDB::bind_method(D_METHOD("get_input_name"), &VisualShaderNodeInput::get_input_name);
ClassDB::bind_method(D_METHOD("get_input_real_name"), &VisualShaderNodeInput::get_input_real_name);
ADD_PROPERTY(PropertyInfo(Variant::STRING_NAME, "input_name", PROPERTY_HINT_ENUM, ""), "set_input_name", "get_input_name");
ADD_SIGNAL(MethodInfo("input_type_changed"));
}
VisualShaderNodeInput::VisualShaderNodeInput() {
input_name = "[None]";
// changed when set
shader_type = VisualShader::TYPE_MAX;
shader_mode = Shader::MODE_MAX;
}
////////////////////////////////////////////
const VisualShaderNodeOutput::Port VisualShaderNodeOutput::ports[] = {
// Spatial, Vertex
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "VERTEX" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "tangent", "TANGENT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "binormal", "BINORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "UV:xy" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv2", "UV2:xy" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "roughness", "ROUGHNESS" },
// Spatial, Fragment
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "albedo", "ALBEDO" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "ALPHA" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "metallic", "METALLIC" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "roughness", "ROUGHNESS" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "specular", "SPECULAR" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "emission", "EMISSION" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "ao", "AO" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normalmap", "NORMALMAP" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "normalmap_depth", "NORMALMAP_DEPTH" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "rim", "RIM" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "rim_tint", "RIM_TINT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "clearcoat", "CLEARCOAT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "clearcoat_gloss", "CLEARCOAT_GLOSS" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "anisotropy", "ANISOTROPY" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "anisotropy_flow", "ANISOTROPY_FLOW:xy" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "subsurf_scatter", "SSS_STRENGTH" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "transmission", "TRANSMISSION" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha_scissor", "ALPHA_SCISSOR" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "ao_light_affect", "AO_LIGHT_AFFECT" },
// Spatial, Light
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "diffuse", "DIFFUSE_LIGHT" },
{ Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "specular", "SPECULAR_LIGHT" },
// Canvas Item, Vertex
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "vertex", "VERTEX:xy" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "uv", "UV:xy" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
// Canvas Item, Fragment
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normal", "NORMAL" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "normalmap", "NORMALMAP" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "normalmap_depth", "NORMALMAP_DEPTH" },
// Canvas Item, Light
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR, "light", "LIGHT.rgb" },
{ Shader::MODE_CANVAS_ITEM, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "light_alpha", "LIGHT.a" },
// Particles, Vertex
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR.rgb" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "COLOR.a" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "velocity", "VELOCITY" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_VECTOR, "custom", "CUSTOM.rgb" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_SCALAR, "custom_alpha", "CUSTOM.a" },
{ Shader::MODE_PARTICLES, VisualShader::TYPE_VERTEX, VisualShaderNode::PORT_TYPE_TRANSFORM, "transform", "TRANSFORM" },
// Sky, Fragment
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_VECTOR, "color", "COLOR" },
{ Shader::MODE_SKY, VisualShader::TYPE_FRAGMENT, VisualShaderNode::PORT_TYPE_SCALAR, "alpha", "ALPHA" },
{ Shader::MODE_MAX, VisualShader::TYPE_MAX, VisualShaderNode::PORT_TYPE_TRANSFORM, nullptr, nullptr },
};
int VisualShaderNodeOutput::get_input_port_count() const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
count++;
}
idx++;
}
return count;
}
VisualShaderNodeOutput::PortType VisualShaderNodeOutput::get_input_port_type(int p_port) const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (count == p_port) {
return ports[idx].type;
}
count++;
}
idx++;
}
return PORT_TYPE_SCALAR;
}
String VisualShaderNodeOutput::get_input_port_name(int p_port) const {
int idx = 0;
int count = 0;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (count == p_port) {
return String(ports[idx].name).capitalize();
}
count++;
}
idx++;
}
return String();
}
Variant VisualShaderNodeOutput::get_input_port_default_value(int p_port) const {
return Variant();
}
int VisualShaderNodeOutput::get_output_port_count() const {
return 0;
}
VisualShaderNodeOutput::PortType VisualShaderNodeOutput::get_output_port_type(int p_port) const {
return PORT_TYPE_SCALAR;
}
String VisualShaderNodeOutput::get_output_port_name(int p_port) const {
return String();
}
bool VisualShaderNodeOutput::is_port_separator(int p_index) const {
if (shader_mode == Shader::MODE_SPATIAL && shader_type == VisualShader::TYPE_FRAGMENT) {
String name = get_input_port_name(p_index);
return (name == "Normal" || name == "Rim" || name == "Alpha Scissor");
}
return false;
}
String VisualShaderNodeOutput::get_caption() const {
return "Output";
}
String VisualShaderNodeOutput::generate_code(Shader::Mode p_mode, VisualShader::Type p_type, int p_id, const String *p_input_vars, const String *p_output_vars, bool p_for_preview) const {
int idx = 0;
int count = 0;
String code;
while (ports[idx].mode != Shader::MODE_MAX) {
if (ports[idx].mode == shader_mode && ports[idx].shader_type == shader_type) {
if (p_input_vars[count] != String()) {
String s = ports[idx].string;
if (s.find(":") != -1) {
code += "\t" + s.get_slicec(':', 0) + " = " + p_input_vars[count] + "." + s.get_slicec(':', 1) + ";\n";
} else {
code += "\t" + s + " = " + p_input_vars[count] + ";\n";
}
}
count++;
}
idx++;
}
return code;
}
VisualShaderNodeOutput::VisualShaderNodeOutput() {
}
///////////////////////////
void VisualShaderNodeUniform::set_uniform_name(const String &p_name) {
uniform_name = p_name;
emit_signal("name_changed");
emit_changed();
}
String VisualShaderNodeUniform::get_uniform_name() const {
return uniform_name;
}
void VisualShaderNodeUniform::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_uniform_name", "name"), &VisualShaderNodeUniform::set_uniform_name);
ClassDB::bind_method(D_METHOD("get_uniform_name"), &VisualShaderNodeUniform::get_uniform_name);
ADD_PROPERTY(PropertyInfo(Variant::STRING_NAME, "uniform_name"), "set_uniform_name", "get_uniform_name");
}
VisualShaderNodeUniform::VisualShaderNodeUniform() {
}
////////////// GroupBase
String VisualShaderNodeGroupBase::get_caption() const {
return "Group";
}
void VisualShaderNodeGroupBase::set_size(const Vector2 &p_size) {
size = p_size;
}
Vector2 VisualShaderNodeGroupBase::get_size() const {
return size;
}
void VisualShaderNodeGroupBase::set_inputs(const String &p_inputs) {
if (inputs == p_inputs)
return;
clear_input_ports();
inputs = p_inputs;
Vector<String> input_strings = inputs.split(";", false);
int input_port_count = input_strings.size();
for (int i = 0; i < input_port_count; i++) {
Vector<String> arr = input_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
int port_idx = arr[0].to_int();
int port_type = arr[1].to_int();
String port_name = arr[2];
Port port;
port.type = (PortType)port_type;
port.name = port_name;
input_ports[port_idx] = port;
}
}
String VisualShaderNodeGroupBase::get_inputs() const {
return inputs;
}
void VisualShaderNodeGroupBase::set_outputs(const String &p_outputs) {
if (outputs == p_outputs)
return;
clear_output_ports();
outputs = p_outputs;
Vector<String> output_strings = outputs.split(";", false);
int output_port_count = output_strings.size();
for (int i = 0; i < output_port_count; i++) {
Vector<String> arr = output_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
int port_idx = arr[0].to_int();
int port_type = arr[1].to_int();
String port_name = arr[2];
Port port;
port.type = (PortType)port_type;
port.name = port_name;
output_ports[port_idx] = port;
}
}
String VisualShaderNodeGroupBase::get_outputs() const {
return outputs;
}
bool VisualShaderNodeGroupBase::is_valid_port_name(const String &p_name) const {
if (!p_name.is_valid_identifier()) {
return false;
}
for (int i = 0; i < get_input_port_count(); i++) {
if (get_input_port_name(i) == p_name) {
return false;
}
}
for (int i = 0; i < get_output_port_count(); i++) {
if (get_output_port_name(i) == p_name) {
return false;
}
}
return true;
}
void VisualShaderNodeGroupBase::add_input_port(int p_id, int p_type, const String &p_name) {
String str = itos(p_id) + "," + itos(p_type) + "," + p_name + ";";
Vector<String> inputs_strings = inputs.split(";", false);
int index = 0;
if (p_id < inputs_strings.size()) {
for (int i = 0; i < inputs_strings.size(); i++) {
if (i == p_id) {
inputs = inputs.insert(index, str);
break;
}
index += inputs_strings[i].size();
}
} else {
inputs += str;
}
inputs_strings = inputs.split(";", false);
index = 0;
for (int i = 0; i < inputs_strings.size(); i++) {
int count = 0;
for (int j = 0; j < inputs_strings[i].size(); j++) {
if (inputs_strings[i][j] == ',') {
break;
}
count++;
}
inputs.erase(index, count);
inputs = inputs.insert(index, itos(i));
index += inputs_strings[i].size();
}
_apply_port_changes();
}
void VisualShaderNodeGroupBase::remove_input_port(int p_id) {
ERR_FAIL_COND(!has_input_port(p_id));
Vector<String> inputs_strings = inputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < inputs_strings.size(); i++) {
Vector<String> arr = inputs_strings[i].split(",");
if (arr[0].to_int() == p_id) {
count = inputs_strings[i].size();
break;
}
index += inputs_strings[i].size();
}
inputs.erase(index, count);
inputs_strings = inputs.split(";", false);
for (int i = p_id; i < inputs_strings.size(); i++) {
inputs = inputs.replace_first(inputs_strings[i].split(",")[0], itos(i));
}
_apply_port_changes();
}
int VisualShaderNodeGroupBase::get_input_port_count() const {
return input_ports.size();
}
bool VisualShaderNodeGroupBase::has_input_port(int p_id) const {
return input_ports.has(p_id);
}
void VisualShaderNodeGroupBase::add_output_port(int p_id, int p_type, const String &p_name) {
String str = itos(p_id) + "," + itos(p_type) + "," + p_name + ";";
Vector<String> outputs_strings = outputs.split(";", false);
int index = 0;
if (p_id < outputs_strings.size()) {
for (int i = 0; i < outputs_strings.size(); i++) {
if (i == p_id) {
outputs = outputs.insert(index, str);
break;
}
index += outputs_strings[i].size();
}
} else {
outputs += str;
}
outputs_strings = outputs.split(";", false);
index = 0;
for (int i = 0; i < outputs_strings.size(); i++) {
int count = 0;
for (int j = 0; j < outputs_strings[i].size(); j++) {
if (outputs_strings[i][j] == ',') {
break;
}
count++;
}
outputs.erase(index, count);
outputs = outputs.insert(index, itos(i));
index += outputs_strings[i].size();
}
_apply_port_changes();
}
void VisualShaderNodeGroupBase::remove_output_port(int p_id) {
ERR_FAIL_COND(!has_output_port(p_id));
Vector<String> outputs_strings = outputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < outputs_strings.size(); i++) {
Vector<String> arr = outputs_strings[i].split(",");
if (arr[0].to_int() == p_id) {
count = outputs_strings[i].size();
break;
}
index += outputs_strings[i].size();
}
outputs.erase(index, count);
outputs_strings = outputs.split(";", false);
for (int i = p_id; i < outputs_strings.size(); i++) {
outputs = outputs.replace_first(outputs_strings[i].split(",")[0], itos(i));
}
_apply_port_changes();
}
int VisualShaderNodeGroupBase::get_output_port_count() const {
return output_ports.size();
}
bool VisualShaderNodeGroupBase::has_output_port(int p_id) const {
return output_ports.has(p_id);
}
void VisualShaderNodeGroupBase::clear_input_ports() {
input_ports.clear();
}
void VisualShaderNodeGroupBase::clear_output_ports() {
output_ports.clear();
}
void VisualShaderNodeGroupBase::set_input_port_type(int p_id, int p_type) {
ERR_FAIL_COND(!has_input_port(p_id));
ERR_FAIL_COND(p_type < 0 || p_type >= PORT_TYPE_MAX);
if (input_ports[p_id].type == p_type)
return;
Vector<String> inputs_strings = inputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < inputs_strings.size(); i++) {
Vector<String> arr = inputs_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
if (arr[0].to_int() == p_id) {
index += arr[0].size();
count = arr[1].size() - 1;
break;
}
index += inputs_strings[i].size();
}
inputs.erase(index, count);
inputs = inputs.insert(index, itos(p_type));
_apply_port_changes();
}
VisualShaderNodeGroupBase::PortType VisualShaderNodeGroupBase::get_input_port_type(int p_id) const {
ERR_FAIL_COND_V(!input_ports.has(p_id), (PortType)0);
return input_ports[p_id].type;
}
void VisualShaderNodeGroupBase::set_input_port_name(int p_id, const String &p_name) {
ERR_FAIL_COND(!has_input_port(p_id));
ERR_FAIL_COND(!is_valid_port_name(p_name));
if (input_ports[p_id].name == p_name)
return;
Vector<String> inputs_strings = inputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < inputs_strings.size(); i++) {
Vector<String> arr = inputs_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
if (arr[0].to_int() == p_id) {
index += arr[0].size() + arr[1].size();
count = arr[2].size() - 1;
break;
}
index += inputs_strings[i].size();
}
inputs.erase(index, count);
inputs = inputs.insert(index, p_name);
_apply_port_changes();
}
String VisualShaderNodeGroupBase::get_input_port_name(int p_id) const {
ERR_FAIL_COND_V(!input_ports.has(p_id), "");
return input_ports[p_id].name;
}
void VisualShaderNodeGroupBase::set_output_port_type(int p_id, int p_type) {
ERR_FAIL_COND(!has_output_port(p_id));
ERR_FAIL_COND(p_type < 0 || p_type >= PORT_TYPE_MAX);
if (output_ports[p_id].type == p_type)
return;
Vector<String> output_strings = outputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < output_strings.size(); i++) {
Vector<String> arr = output_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
if (arr[0].to_int() == p_id) {
index += arr[0].size();
count = arr[1].size() - 1;
break;
}
index += output_strings[i].size();
}
outputs.erase(index, count);
outputs = outputs.insert(index, itos(p_type));
_apply_port_changes();
}
VisualShaderNodeGroupBase::PortType VisualShaderNodeGroupBase::get_output_port_type(int p_id) const {
ERR_FAIL_COND_V(!output_ports.has(p_id), (PortType)0);
return output_ports[p_id].type;
}
void VisualShaderNodeGroupBase::set_output_port_name(int p_id, const String &p_name) {
ERR_FAIL_COND(!has_output_port(p_id));
ERR_FAIL_COND(!is_valid_port_name(p_name));
if (output_ports[p_id].name == p_name)
return;
Vector<String> output_strings = outputs.split(";", false);
int count = 0;
int index = 0;
for (int i = 0; i < output_strings.size(); i++) {
Vector<String> arr = output_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
if (arr[0].to_int() == p_id) {
index += arr[0].size() + arr[1].size();
count = arr[2].size() - 1;
break;
}
index += output_strings[i].size();
}
outputs.erase(index, count);
outputs = outputs.insert(index, p_name);
_apply_port_changes();
}
String VisualShaderNodeGroupBase::get_output_port_name(int p_id) const {
ERR_FAIL_COND_V(!output_ports.has(p_id), "");
return output_ports[p_id].name;
}
int VisualShaderNodeGroupBase::get_free_input_port_id() const {
return input_ports.size();
}
int VisualShaderNodeGroupBase::get_free_output_port_id() const {
return output_ports.size();
}
void VisualShaderNodeGroupBase::set_control(Control *p_control, int p_index) {
controls[p_index] = p_control;
}
Control *VisualShaderNodeGroupBase::get_control(int p_index) {
ERR_FAIL_COND_V(!controls.has(p_index), nullptr);
return controls[p_index];
}
void VisualShaderNodeGroupBase::_apply_port_changes() {
Vector<String> inputs_strings = inputs.split(";", false);
Vector<String> outputs_strings = outputs.split(";", false);
clear_input_ports();
clear_output_ports();
for (int i = 0; i < inputs_strings.size(); i++) {
Vector<String> arr = inputs_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
Port port;
port.type = (PortType)arr[1].to_int();
port.name = arr[2];
input_ports[i] = port;
}
for (int i = 0; i < outputs_strings.size(); i++) {
Vector<String> arr = outputs_strings[i].split(",");
ERR_FAIL_COND(arr.size() != 3);
Port port;
port.type = (PortType)arr[1].to_int();
port.name = arr[2];
output_ports[i] = port;
}
}
void VisualShaderNodeGroupBase::set_editable(bool p_enabled) {
editable = p_enabled;
}
bool VisualShaderNodeGroupBase::is_editable() const {
return editable;
}
void VisualShaderNodeGroupBase::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_size", "size"), &VisualShaderNodeGroupBase::set_size);
ClassDB::bind_method(D_METHOD("get_size"), &VisualShaderNodeGroupBase::get_size);
ClassDB::bind_method(D_METHOD("set_inputs", "inputs"), &VisualShaderNodeGroupBase::set_inputs);
ClassDB::bind_method(D_METHOD("get_inputs"), &VisualShaderNodeGroupBase::get_inputs);
ClassDB::bind_method(D_METHOD("set_outputs", "outputs"), &VisualShaderNodeGroupBase::set_outputs);
ClassDB::bind_method(D_METHOD("get_outputs"), &VisualShaderNodeGroupBase::get_outputs);
ClassDB::bind_method(D_METHOD("is_valid_port_name", "name"), &VisualShaderNodeGroupBase::is_valid_port_name);
ClassDB::bind_method(D_METHOD("add_input_port", "id", "type", "name"), &VisualShaderNodeGroupBase::add_input_port);
ClassDB::bind_method(D_METHOD("remove_input_port", "id"), &VisualShaderNodeGroupBase::remove_input_port);
ClassDB::bind_method(D_METHOD("get_input_port_count"), &VisualShaderNodeGroupBase::get_input_port_count);
ClassDB::bind_method(D_METHOD("has_input_port", "id"), &VisualShaderNodeGroupBase::has_input_port);
ClassDB::bind_method(D_METHOD("clear_input_ports"), &VisualShaderNodeGroupBase::clear_input_ports);
ClassDB::bind_method(D_METHOD("add_output_port", "id", "type", "name"), &VisualShaderNodeGroupBase::add_output_port);
ClassDB::bind_method(D_METHOD("remove_output_port", "id"), &VisualShaderNodeGroupBase::remove_output_port);
ClassDB::bind_method(D_METHOD("get_output_port_count"), &VisualShaderNodeGroupBase::get_output_port_count);
ClassDB::bind_method(D_METHOD("has_output_port", "id"), &VisualShaderNodeGroupBase::has_output_port);
ClassDB::bind_method(D_METHOD("clear_output_ports"), &VisualShaderNodeGroupBase::clear_output_ports);
ClassDB::bind_method(D_METHOD("set_input_port_name", "id", "name"), &VisualShaderNodeGroupBase::set_input_port_name);
ClassDB::bind_method(D_METHOD("set_input_port_type", "id", "type"), &VisualShaderNodeGroupBase::set_input_port_type);
ClassDB::bind_method(D_METHOD("set_output_port_name", "id", "name"), &VisualShaderNodeGroupBase::set_output_port_name);
ClassDB::bind_method(D_METHOD("set_output_port_type", "id", "type"), &VisualShaderNodeGroupBase::set_output_port_type);
ClassDB::bind_method(D_METHOD("get_free_input_port_id"), &VisualShaderNodeGroupBase::get_free_input_port_id);
ClassDB::bind_method(D_METHOD("get_free_output_port_id"), &VisualShaderNodeGroupBase::get_free_output_port_id);
ADD_PROPERTY(PropertyInfo(Variant::VECTOR2, "size"), "set_size", "get_size");
}
String VisualShaderNodeGroupBase::generate_code(Shader::Mode p_mode, VisualShader::Type p_type, int p_id, const String *p_input_vars, const String *p_output_vars, bool p_for_preview) const {
return "";
}
VisualShaderNodeGroupBase::VisualShaderNodeGroupBase() {
size = Size2(0, 0);
inputs = "";
outputs = "";
editable = false;
simple_decl = false;
}
////////////// Expression
String VisualShaderNodeExpression::get_caption() const {
return "Expression";
}
void VisualShaderNodeExpression::set_expression(const String &p_expression) {
expression = p_expression;
}
String VisualShaderNodeExpression::get_expression() const {
return expression;
}
String VisualShaderNodeExpression::generate_code(Shader::Mode p_mode, VisualShader::Type p_type, int p_id, const String *p_input_vars, const String *p_output_vars, bool p_for_preview) const {
String _expression = expression;
_expression = _expression.insert(0, "\n");
_expression = _expression.replace("\n", "\n\t\t");
static Vector<String> pre_symbols;
if (pre_symbols.empty()) {
pre_symbols.push_back("\t");
pre_symbols.push_back(",");
pre_symbols.push_back(";");
pre_symbols.push_back("{");
pre_symbols.push_back("[");
pre_symbols.push_back("]");
pre_symbols.push_back("(");
pre_symbols.push_back(" ");
pre_symbols.push_back("-");
pre_symbols.push_back("*");
pre_symbols.push_back("/");
pre_symbols.push_back("+");
pre_symbols.push_back("=");
pre_symbols.push_back("&");
pre_symbols.push_back("|");
pre_symbols.push_back("!");
}
static Vector<String> post_symbols;
if (post_symbols.empty()) {
post_symbols.push_back("\t");
post_symbols.push_back("\n");
post_symbols.push_back(",");
post_symbols.push_back(";");
post_symbols.push_back("}");
post_symbols.push_back("[");
post_symbols.push_back("]");
post_symbols.push_back(")");
post_symbols.push_back(" ");
post_symbols.push_back(".");
post_symbols.push_back("-");
post_symbols.push_back("*");
post_symbols.push_back("/");
post_symbols.push_back("+");
post_symbols.push_back("=");
post_symbols.push_back("&");
post_symbols.push_back("|");
post_symbols.push_back("!");
}
for (int i = 0; i < get_input_port_count(); i++) {
for (int j = 0; j < pre_symbols.size(); j++) {
for (int k = 0; k < post_symbols.size(); k++) {
_expression = _expression.replace(pre_symbols[j] + get_input_port_name(i) + post_symbols[k], pre_symbols[j] + p_input_vars[i] + post_symbols[k]);
}
}
}
for (int i = 0; i < get_output_port_count(); i++) {
for (int j = 0; j < pre_symbols.size(); j++) {
for (int k = 0; k < post_symbols.size(); k++) {
_expression = _expression.replace(pre_symbols[j] + get_output_port_name(i) + post_symbols[k], pre_symbols[j] + p_output_vars[i] + post_symbols[k]);
}
}
}
String output_initializer;
for (int i = 0; i < get_output_port_count(); i++) {
int port_type = get_output_port_type(i);
String tk = "";
switch (port_type) {
case PORT_TYPE_SCALAR:
tk = "0.0";
break;
case PORT_TYPE_SCALAR_INT:
tk = "0";
break;
case PORT_TYPE_VECTOR:
tk = "vec3(0.0, 0.0, 0.0)";
break;
case PORT_TYPE_BOOLEAN:
tk = "false";
break;
case PORT_TYPE_TRANSFORM:
tk = "mat4(1.0)";
break;
default:
continue;
}
output_initializer += "\t" + p_output_vars[i] + " = " + tk + ";\n";
}
String code;
code += output_initializer;
code += "\t{";
code += _expression;
code += "\n\t}\n";
return code;
}
void VisualShaderNodeExpression::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_expression", "expression"), &VisualShaderNodeExpression::set_expression);
ClassDB::bind_method(D_METHOD("get_expression"), &VisualShaderNodeExpression::get_expression);
ADD_PROPERTY(PropertyInfo(Variant::STRING, "expression"), "set_expression", "get_expression");
}
VisualShaderNodeExpression::VisualShaderNodeExpression() {
expression = "";
set_editable(true);
}
////////////// Global Expression
String VisualShaderNodeGlobalExpression::get_caption() const {
return "GlobalExpression";
}
String VisualShaderNodeGlobalExpression::generate_global(Shader::Mode p_mode, VisualShader::Type p_type, int p_id) const {
return expression;
}
VisualShaderNodeGlobalExpression::VisualShaderNodeGlobalExpression() {
set_editable(false);
}
|
/*
* Copyright (C) 2012 Victor Carbune (victor@rosedu.org)
* Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#if ENABLE(VIDEO)
#include "RenderVTTCue.h"
#include "RenderInline.h"
#include "RenderLayoutState.h"
#include "RenderView.h"
#include "TextTrackCueGeneric.h"
#include "VTTCue.h"
#include <wtf/IsoMallocInlines.h>
#include <wtf/StackStats.h>
namespace WebCore {
WTF_MAKE_ISO_ALLOCATED_IMPL(RenderVTTCue);
RenderVTTCue::RenderVTTCue(VTTCueBox& element, RenderStyle&& style)
: RenderBlockFlow(element, WTFMove(style))
, m_cue(downcast<VTTCue>(element.getCue()))
{
ASSERT(m_cue);
}
void RenderVTTCue::layout()
{
StackStats::LayoutCheckPoint layoutCheckPoint;
RenderBlockFlow::layout();
// If WebVTT Regions are used, the regular WebVTT layout algorithm is no
// longer necessary, since cues having the region parameter set do not have
// any positioning parameters. Also, in this case, the regions themselves
// have positioning information.
if (!m_cue->regionId().isEmpty())
return;
LayoutStateMaintainer statePusher(*this, locationOffset(), hasTransform() || hasReflection() || style().isFlippedBlocksWritingMode());
if (m_cue->cueType()== TextTrackCue::WebVTT) {
if (m_cue->snapToLines())
repositionCueSnapToLinesSet();
else
repositionCueSnapToLinesNotSet();
} else
repositionGenericCue();
}
bool RenderVTTCue::initializeLayoutParameters(InlineFlowBox*& firstLineBox, LayoutUnit& step, LayoutUnit& position)
{
ASSERT(firstChild());
if (!firstChild())
return false;
RenderBlock* parentBlock = containingBlock();
// firstChild() returns the wrapping (backdrop) <div>. The cue object is
// the <div>'s first child.
RenderObject& firstChild = *this->firstChild();
RenderElement& backdropElement = downcast<RenderElement>(firstChild);
firstLineBox = downcast<RenderInline>(*backdropElement.firstChild()).firstLineBox();
if (!firstLineBox)
firstLineBox = this->firstRootBox();
// 1. Horizontal: Let step be the height of the first line box in boxes.
// Vertical: Let step be the width of the first line box in boxes.
step = m_cue->getWritingDirection() == VTTCue::Horizontal ? firstLineBox->height() : firstLineBox->width();
// 2. If step is zero, then jump to the step labeled done positioning below.
if (!step)
return false;
// 3. Let line position be the text track cue computed line position.
int linePosition = m_cue->calculateComputedLinePosition();
// 4. Vertical Growing Left: Add one to line position then negate it.
if (m_cue->getWritingDirection() == VTTCue::VerticalGrowingLeft)
linePosition = -(linePosition + 1);
// 5. Let position be the result of multiplying step and line position.
position = step * linePosition;
// 6. Vertical Growing Left: Decrease position by the width of the
// bounding box of the boxes in boxes, then increase position by step.
if (m_cue->getWritingDirection() == VTTCue::VerticalGrowingLeft) {
position -= width();
position += step;
}
// 7. If line position is less than zero...
if (linePosition < 0) {
// Horizontal / Vertical: ... then increase position by the
// height / width of the video's rendering area ...
position += m_cue->getWritingDirection() == VTTCue::Horizontal ? parentBlock->height() : parentBlock->width();
// ... and negate step.
step = -step;
}
return true;
}
void RenderVTTCue::placeBoxInDefaultPosition(LayoutUnit position, bool& switched)
{
// 8. Move all boxes in boxes ...
if (m_cue->getWritingDirection() == VTTCue::Horizontal)
// Horizontal: ... down by the distance given by position
setY(y() + position);
else
// Vertical: ... right by the distance given by position
setX(x() + position);
// 9. Default: Remember the position of all the boxes in boxes as their
// default position.
m_fallbackPosition = FloatPoint(x(), y());
// 10. Let switched be false.
switched = false;
}
bool RenderVTTCue::isOutside() const
{
return !rectIsWithinContainer(absoluteContentBox());
}
bool RenderVTTCue::rectIsWithinContainer(const IntRect& rect) const
{
return containingBlock()->absoluteBoundingBoxRect().contains(rect);
}
bool RenderVTTCue::isOverlapping() const
{
return overlappingObject();
}
RenderObject* RenderVTTCue::overlappingObject() const
{
return overlappingObjectForRect(absoluteBoundingBoxRect());
}
RenderObject* RenderVTTCue::overlappingObjectForRect(const IntRect& rect) const
{
for (RenderObject* box = previousSibling(); box; box = box->previousSibling()) {
IntRect boxRect = box->absoluteBoundingBoxRect();
if (rect.intersects(boxRect))
return box;
}
return 0;
}
bool RenderVTTCue::shouldSwitchDirection(InlineFlowBox* firstLineBox, LayoutUnit step) const
{
LayoutUnit top = y();
LayoutUnit left = x();
LayoutUnit bottom { top + firstLineBox->height() };
LayoutUnit right { left + firstLineBox->width() };
// 12. Horizontal: If step is negative and the top of the first line
// box in boxes is now above the top of the video's rendering area,
// or if step is positive and the bottom of the first line box in
// boxes is now below the bottom of the video's rendering area, jump
// to the step labeled switch direction.
LayoutUnit parentHeight = containingBlock()->height();
if (m_cue->getWritingDirection() == VTTCue::Horizontal && ((step < 0 && top < 0) || (step > 0 && bottom > parentHeight)))
return true;
// 12. Vertical: If step is negative and the left edge of the first line
// box in boxes is now to the left of the left edge of the video's
// rendering area, or if step is positive and the right edge of the
// first line box in boxes is now to the right of the right edge of
// the video's rendering area, jump to the step labeled switch direction.
LayoutUnit parentWidth = containingBlock()->width();
if (m_cue->getWritingDirection() != VTTCue::Horizontal && ((step < 0 && left < 0) || (step > 0 && right > parentWidth)))
return true;
return false;
}
void RenderVTTCue::moveBoxesByStep(LayoutUnit step)
{
// 13. Horizontal: Move all the boxes in boxes down by the distance
// given by step. (If step is negative, then this will actually
// result in an upwards movement of the boxes in absolute terms.)
if (m_cue->getWritingDirection() == VTTCue::Horizontal)
setY(y() + step);
// 13. Vertical: Move all the boxes in boxes right by the distance
// given by step. (If step is negative, then this will actually
// result in a leftwards movement of the boxes in absolute terms.)
else
setX(x() + step);
}
bool RenderVTTCue::switchDirection(bool& switched, LayoutUnit& step)
{
// 15. Switch direction: Move all the boxes in boxes back to their
// default position as determined in the step above labeled default.
setX(m_fallbackPosition.x());
setY(m_fallbackPosition.y());
// 16. If switched is true, jump to the step labeled done
// positioning below.
if (switched)
return false;
// 17. Negate step.
step = -step;
// 18. Set switched to true.
switched = true;
return true;
}
void RenderVTTCue::moveIfNecessaryToKeepWithinContainer()
{
IntRect containerRect = containingBlock()->absoluteBoundingBoxRect();
IntRect cueRect = absoluteBoundingBoxRect();
int topOverflow = cueRect.y() - containerRect.y();
int bottomOverflow = containerRect.maxY() - cueRect.maxY();
int verticalAdjustment = 0;
if (topOverflow < 0)
verticalAdjustment = -topOverflow;
else if (bottomOverflow < 0)
verticalAdjustment = bottomOverflow;
if (verticalAdjustment)
setY(y() + verticalAdjustment);
int leftOverflow = cueRect.x() - containerRect.x();
int rightOverflow = containerRect.maxX() - cueRect.maxX();
int horizontalAdjustment = 0;
if (leftOverflow < 0)
horizontalAdjustment = -leftOverflow;
else if (rightOverflow < 0)
horizontalAdjustment = rightOverflow;
if (horizontalAdjustment)
setX(x() + horizontalAdjustment);
}
bool RenderVTTCue::findNonOverlappingPosition(int& newX, int& newY) const
{
newX = x();
newY = y();
IntRect srcRect = absoluteBoundingBoxRect();
IntRect destRect = srcRect;
// Move the box up, looking for a non-overlapping position:
while (RenderObject* box = overlappingObjectForRect(destRect)) {
if (m_cue->getWritingDirection() == VTTCue::Horizontal)
destRect.setY(box->absoluteBoundingBoxRect().y() - destRect.height());
else
destRect.setX(box->absoluteBoundingBoxRect().x() - destRect.width());
}
if (rectIsWithinContainer(destRect)) {
newX += destRect.x() - srcRect.x();
newY += destRect.y() - srcRect.y();
return true;
}
destRect = srcRect;
// Move the box down, looking for a non-overlapping position:
while (RenderObject* box = overlappingObjectForRect(destRect)) {
if (m_cue->getWritingDirection() == VTTCue::Horizontal)
destRect.setY(box->absoluteBoundingBoxRect().maxY());
else
destRect.setX(box->absoluteBoundingBoxRect().maxX());
}
if (rectIsWithinContainer(destRect)) {
newX += destRect.x() - srcRect.x();
newY += destRect.y() - srcRect.y();
return true;
}
return false;
}
void RenderVTTCue::repositionCueSnapToLinesSet()
{
InlineFlowBox* firstLineBox;
LayoutUnit step;
LayoutUnit position;
if (!initializeLayoutParameters(firstLineBox, step, position))
return;
bool switched;
placeBoxInDefaultPosition(position, switched);
// 11. Step loop: If none of the boxes in boxes would overlap any of the boxes
// in output and all the boxes in output are within the video's rendering area
// then jump to the step labeled done positioning.
while (isOutside() || isOverlapping()) {
if (!shouldSwitchDirection(firstLineBox, step))
// 13. Move all the boxes in boxes ...
// 14. Jump back to the step labeled step loop.
moveBoxesByStep(step);
else if (!switchDirection(switched, step))
break;
// 19. Jump back to the step labeled step loop.
}
// Acommodate extra top and bottom padding, border or margin.
// Note: this is supported only for internal UA styling, not through the cue selector.
if (hasInlineDirectionBordersPaddingOrMargin())
moveIfNecessaryToKeepWithinContainer();
}
void RenderVTTCue::repositionGenericCue()
{
ASSERT(firstChild());
// firstChild() returns the wrapping (backdrop) <div>. The cue object is
// the <div>'s first child.
RenderObject& firstChild = *this->firstChild();
RenderElement& backdropElement = downcast<RenderElement>(firstChild);
InlineFlowBox* firstLineBox = downcast<RenderInline>(*backdropElement.firstChild()).firstLineBox();
if (downcast<TextTrackCueGeneric>(*m_cue).useDefaultPosition() && firstLineBox) {
LayoutUnit parentWidth = containingBlock()->logicalWidth();
LayoutUnit width { firstLineBox->width() };
LayoutUnit right = (parentWidth / 2) - (width / 2);
setX(right);
}
repositionCueSnapToLinesNotSet();
}
void RenderVTTCue::repositionCueSnapToLinesNotSet()
{
// 3. If none of the boxes in boxes would overlap any of the boxes in output, and all the boxes in
// output are within the video's rendering area, then jump to the step labeled done positioning below.
if (!isOutside() && !isOverlapping())
return;
// 4. If there is a position to which the boxes in boxes can be moved while maintaining the relative
// positions of the boxes in boxes to each other such that none of the boxes in boxes would overlap
// any of the boxes in output, and all the boxes in output would be within the video's rendering area,
// then move the boxes in boxes to the closest such position to their current position, and then jump
// to the step labeled done positioning below. If there are multiple such positions that are equidistant
// from their current position, use the highest one amongst them; if there are several at that height,
// then use the leftmost one amongst them.
moveIfNecessaryToKeepWithinContainer();
int x = 0;
int y = 0;
if (!findNonOverlappingPosition(x, y))
return;
setX(x);
setY(y);
}
} // namespace WebCore
#endif
|
/*
This file contains code adapted from p3d.py in
http://code.google.com/p/pythonisosurfaces/source/checkout
which was released under the new BSD license.
accessed 31 July 2012
*/
#include <math.h>
#include <stdio.h>
const int edgeTable[] = {0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0};
/* CTNG:tritable */
const int triTable[256][16] = {{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
/* CTNG:interpedge */
void vi(double* p1, double* p2, double v1, double v2, double* out) {
if (fabs(v2) < 0.000000000001) {
/* printf("interesting: v1 = %g, v2 = %g\n", v1, v2); */
out[0] = p2[0]; out[1] = p2[1]; out[2] = p2[2];
return;
}
if (fabs(v1) < 0.000000000001) {
/* printf("interesting: v1 = %g, v2 = %g\n", v1, v2); */
out[0] = p1[0]; out[1] = p1[1]; out[2] = p1[2];
return;
}
/*
if (isinf(v1)) {
printf("v1 is inf\n");
}
if (isinf(v2)) {
printf("v2 is inf\n");
} */
double delta_v = v1 - v2;
if (fabs(delta_v) < 0.0000000001) {
out[0] = p1[0]; out[1] = p1[1]; out[2] = p1[2];
return;
}
if (isinf(v1) || isinf(v2)) {
/* printf("interesting. v1 = %g, v2 = %g\n", v1, v2); */
}
double mu = v1 / delta_v;
/*
if (isnan(mu)) {
printf("Should be impossible to get here. delta_v = %g, v1 = %g, v2 = %g\n", delta_v, v1, v2);
} */
out[0] = p1[0] + mu * (p2[0] - p1[0]);
out[1] = p1[1] + mu * (p2[1] - p1[1]);
out[2] = p1[2] + mu * (p2[2] - p1[2]);
}
extern "C" int find_triangles(double thresh, double value0, double value1, double value2, double value3, double value4, double value5, double value6, double value7, double x0, double x1, double y0, double y1, double z0, double z1, double* out) {
double position[8][3] = {{x0, y0, z0},
{x1, y0, z0},
{x1, y1, z0},
{x0, y1, z0},
{x0, y0, z1},
{x1, y0, z1},
{x1, y1, z1},
{x0, y1, z1}};
/* CTNG:domarch */
int cubeIndex = 0;
if (value0 < 0) cubeIndex |= 1;
if (value1 < 0) cubeIndex |= 2;
if (value2 < 0) cubeIndex |= 4;
if (value3 < 0) cubeIndex |= 8;
if (value4 < 0) cubeIndex |= 16;
if (value5 < 0) cubeIndex |= 32;
if (value6 < 0) cubeIndex |= 64;
if (value7 < 0) cubeIndex |= 128;
/* No triangles were found because all corners are inside or not inside
the object. */
if (cubeIndex == 0 || cubeIndex == 255) {
if (fabs(value0) <= thresh) cubeIndex ^= 1;
if (fabs(value1) <= thresh) cubeIndex ^= 2;
if (fabs(value2) <= thresh) cubeIndex ^= 4;
if (fabs(value3) <= thresh) cubeIndex ^= 8;
if (fabs(value4) <= thresh) cubeIndex ^= 16;
if (fabs(value5) <= thresh) cubeIndex ^= 32;
if (fabs(value6) <= thresh) cubeIndex ^= 64;
if (fabs(value7) <= thresh) cubeIndex ^= 128;
}
/*
if isinf(value0) printf('value0 is inf\n');
if isinf(value1) printf('value1 is inf\n');
if isinf(value2) printf('value2 is inf\n');
if isinf(value3) printf('value3 is inf\n');
if isinf(value4) printf('value4 is inf\n');
if isinf(value5) printf('value5 is inf\n');
if isinf(value6) printf('value6 is inf\n');
if isinf(value7) printf('value7 is inf\n');
*/
int et = edgeTable[cubeIndex];
if (et == 0) return 0;
double vertexList[12][3];
if (et & 1) vi(position[0], position[1], value0, value1, vertexList[0]);
if (et & 2) vi(position[1], position[2], value1, value2, vertexList[1]);
if (et & 4) vi(position[2], position[3], value2, value3, vertexList[2]);
if (et & 8) vi(position[3], position[0], value3, value0, vertexList[3]);
if (et & 16) vi(position[4], position[5], value4, value5, vertexList[4]);
if (et & 32) vi(position[5], position[6], value5, value6, vertexList[5]);
if (et & 64) vi(position[6], position[7], value6, value7, vertexList[6]);
if (et & 128) vi(position[7], position[4], value7, value4, vertexList[7]);
if (et & 256) vi(position[0], position[4], value0, value4, vertexList[8]);
if (et & 512) vi(position[1], position[5], value1, value5, vertexList[9]);
if (et & 1024) vi(position[2], position[6], value2, value6, vertexList[10]);
if (et & 2048) vi(position[3], position[7], value3, value7, vertexList[11]);
int const * const tt = triTable[cubeIndex];
int i, j, k, count;
for (i = 0, count = 0; i < 16; i += 3, count++) {
if (tt[i] == -1) break;
for (k = 0; k < 3; k++) {
for (j = 0; j < 3; j++) out[j] = vertexList[tt[i + k]][j];
out += 3;
}
}
return count;
}
|
// RUN: mlir-clang %s --function=foo -S | FileCheck %s
extern "C" {
int foo(char t) {
int n = 10;
switch (t) {
case 'a':
n = 20;
break;
case 'A':
n = 30;
break;
default:
return -1;
}
return n;
}
}
// TODO the select should be canonicalized better
// CHECK: func @foo(%arg0: i8) -> i32 attributes {llvm.linkage = #llvm.linkage<external>} {
// CHECK-NEXT: %c-1_i32 = arith.constant -1 : i32
// CHECK-NEXT: %c30_i32 = arith.constant 30 : i32
// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %c20_i32 = arith.constant 20 : i32
// CHECK-NEXT: %c10_i32 = arith.constant 10 : i32
// CHECK-NEXT: %true = arith.constant true
// CHECK-NEXT: %0 = llvm.mlir.undef : i32
// CHECK-NEXT: %1 = arith.extsi %arg0 : i8 to i32
// CHECK-NEXT: switch %1 : i32, [
// CHECK-NEXT: default: ^bb1(%c10_i32, %false, %c-1_i32 : i32, i1, i32),
// CHECK-NEXT: 97: ^bb1(%c20_i32, %true, %0 : i32, i1, i32),
// CHECK-NEXT: 65: ^bb1(%c30_i32, %true, %0 : i32, i1, i32)
// CHECK-NEXT: ]
// CHECK-NEXT: ^bb1(%2: i32, %3: i1, %4: i32): // 3 preds: ^bb0, ^bb0, ^bb0
// CHECK-NEXT: %5 = select %3, %2, %4 : i32
// CHECK-NEXT: return %5 : i32
// CHECK-NEXT: }
|
//===- HWStubExternalModules.cpp - HW Module Stubbing Pass ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This transformation pass converts external modules to empty normal modules.
//
//===----------------------------------------------------------------------===//
#include "SVPassDetail.h"
#include "circt/Dialect/SV/SVPasses.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
using namespace circt;
//===----------------------------------------------------------------------===//
// HWStubExternalModules Pass
//===----------------------------------------------------------------------===//
namespace {
struct HWStubExternalModulesPass
: public sv::HWStubExternalModulesBase<HWStubExternalModulesPass> {
void runOnOperation() override;
};
} // end anonymous namespace
void HWStubExternalModulesPass::runOnOperation() {
auto topModule = getOperation().getBody();
OpBuilder builder(topModule->getParentOp()->getContext());
builder.setInsertionPointToEnd(topModule);
for (auto &op : llvm::make_early_inc_range(*topModule))
if (auto module = dyn_cast<hw::HWModuleExternOp>(op)) {
SmallVector<hw::ModulePortInfo> ports = module.getPorts();
auto nameAttr = module.getNameAttr();
auto newModule =
builder.create<hw::HWModuleOp>(module.getLoc(), nameAttr, ports);
auto outputOp = newModule.getBodyBlock()->getTerminator();
OpBuilder innerBuilder(outputOp);
SmallVector<Value, 8> outputs;
// All output ports need values, use x
for (auto &p : ports) {
if (p.isOutput())
outputs.push_back(
innerBuilder.create<sv::ConstantXOp>(outputOp->getLoc(), p.type));
}
outputOp->setOperands(outputs);
// Now update instances to drop parameters
auto useRange = SymbolTable::getSymbolUses(module, getOperation());
if (useRange)
for (auto &user : *useRange)
if (auto inst = dyn_cast<hw::InstanceOp>(user.getUser()))
inst->removeAttr("parameters");
// Done with the old module.
module.erase();
}
}
std::unique_ptr<Pass> circt::sv::createHWStubExternalModulesPass() {
return std::make_unique<HWStubExternalModulesPass>();
}
|
#define FBGEMM_EXPORTS
#include "fbgemm/QuantUtils.h"
#include <cpuinfo.h>
#include "fbgemm/Fbgemm.h"
namespace fbgemm {
using namespace std;
float TensorQuantizationParams::Min() const {
return Dequantize(0, *this);
}
float TensorQuantizationParams::Max() const {
return Dequantize((1 << precision) - 1, *this);
}
TensorQuantizationParams ChooseQuantizationParams(
float min,
float max,
int32_t qmin,
int32_t qmax,
bool preserve_sparsity,
bool force_scale_power_of_two) {
if (min < 0 && max > 0 && preserve_sparsity) {
int symmetric_qmin = -((qmax - qmin) / 2 + 1);
int symmetric_qmax = (qmax - qmin) / 2;
double max_scale =
std::max(fabs(min / symmetric_qmin), fabs(max / symmetric_qmax));
min = max_scale * symmetric_qmin;
max = max_scale * symmetric_qmax;
}
// We extend the [min, max] interval to ensure that it contains 0.
// Otherwise, we would not meet the requirement that 0 be an exactly
// representable value.
min = std::min(min, 0.f);
max = std::max(max, 0.f);
// Use double precision for intermediate computation but use single precision
// in final number to reflect the actual number used during quantization.
float scale = (static_cast<double>(max) - min) / (qmax - qmin);
// If scale is 0 or too small so its reciprocal is infinity, we arbitrary
// adjust the scale to 0.1 . We want to avoid scale's reciprocal being
// infinity because some of fbgemm code pre-computes scale's reciprocal to do
// multiplication instead of division in the time critical part of code.
if (scale == 0.0f || isinf(1.0f / scale)) {
scale = 0.1;
}
assert(scale > 0);
if (force_scale_power_of_two) {
if (scale < 1) {
scale = 1.0 / (1 << static_cast<int>(floor(log2(1.0 / scale))));
} else {
scale = 1 << static_cast<int>(ceil(log2(scale)));
}
}
// Zero-point computation.
// First the initial floating-point computation. The zero-point can be
// determined from solving an affine equation for any known pair
// (real value, corresponding quantized value).
// We know two such pairs: (rmin, qmin) and (rmax, qmax).
// The arithmetic error on the zero point computed from either pair
// will be roughly machine_epsilon * (sum of absolute values of terms)
// so we want to use the variant that adds the smaller terms.
double zero_point_from_min = qmin - min / static_cast<double>(scale);
double zero_point_from_max = qmax - max / static_cast<double>(scale);
double zero_point_from_min_error =
std::abs(qmin) + std::abs(min / static_cast<double>(scale));
double zero_point_from_max_error =
std::abs(qmax) + std::abs(max / static_cast<double>(scale));
double initial_zero_point =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
// for symmetric quantization (preserve_sparsity == true), we force zero_point
// to be a middle value between qmin and qmax.
// If either min or max is 0, then we just use 0 as zero_point.
if (min < 0 && max > 0 && preserve_sparsity) {
initial_zero_point = (qmin + qmax) / 2 + 1;
}
// Now we need to nudge the zero point to be an integer
// (our zero points are integer, and this is motivated by the requirement
// to be able to represent the real value "0" exactly as a quantized value,
// which is required in multiple places, for example in Im2col with zero
// padding).
int32_t nudged_zero_point = 0;
if (initial_zero_point < qmin) {
nudged_zero_point = qmin;
} else if (initial_zero_point > qmax) {
nudged_zero_point = qmax;
} else {
nudged_zero_point = nearbyint(initial_zero_point);
}
TensorQuantizationParams result;
result.scale = scale;
result.zero_point = nudged_zero_point;
return result;
}
void ChooseRequantizationMultiplier(
float real_multiplier,
int32_t* quantized_multiplier,
int* right_shift,
int requantization_multiplier_precision) {
assert(real_multiplier != 0.f);
// Assuming requantization_multiplier_precision_ = 31,
// the default right shift is 31 when the real multiplier is already
// in interval [1/2, 1).
// Multiplying a 32-bit signed integer with all 31 bits except the sign bit
// is used followed by 31-bit right shift implements multiplying with a real
// number in [1/2, 1).
// We want to utilize all 31 bits except the sign bit in the 32-bit signed
// integer to get the best accuracy.
int s = 31;
// We want to bring the real multiplier into the interval [1/2, 1).
// We can do so by multiplying it by two, and recording how many times
// we multiplied by two so that we can compensate that by a right
// shift by the same amount.
if (real_multiplier > 0.f) {
while (real_multiplier < 0.5f) {
real_multiplier *= 2.f;
s++;
}
while (real_multiplier > 1.f) {
real_multiplier /= 2.f;
s--;
}
}
// Now that the real multiplier is in [1/2, 1), we convert it
// into a fixed-point number.
int64_t q = nearbyint(
real_multiplier * (1ll << (requantization_multiplier_precision - 1)));
assert(q <= (1ll << (requantization_multiplier_precision - 1)));
// Handle the special case when the real multiplier was so close to 1
// that its fixed-point approximation was undistinguishable from 1.
// We handle this by dividing it by two, and remembering to decrement
// the right shift amount.
if (q == (1ll << (requantization_multiplier_precision - 1))) {
q /= 2;
s--;
}
assert(s >= 0);
assert(q >= 0);
assert(q <= numeric_limits<int32_t>::max());
*quantized_multiplier = static_cast<int32_t>(q);
*right_shift = s;
assert(s < 64);
}
////////////////////////////////////////////////////////////////////////////////
// Utility functions
#define FBGEMM_SPECIALIZED_QUANTIZE(T, LEGACY) \
template <> \
FBGEMM_API void Quantize<T, LEGACY>( \
const float* src, \
T* dst, \
const int len, \
const TensorQuantizationParams& qparams, \
int thread_id, \
int num_threads) { \
int i_begin, i_end; \
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end); \
for (int i = i_begin; i < i_end; ++i) { \
dst[i] = Quantize<T, LEGACY>(src[i], qparams); \
} \
}
FBGEMM_SPECIALIZED_QUANTIZE(uint16_t, true)
FBGEMM_SPECIALIZED_QUANTIZE(int16_t, true)
FBGEMM_SPECIALIZED_QUANTIZE(int32_t, true)
FBGEMM_SPECIALIZED_QUANTIZE(uint16_t, false)
FBGEMM_SPECIALIZED_QUANTIZE(int16_t, false)
FBGEMM_SPECIALIZED_QUANTIZE(int32_t, false)
#undef FBGEMM_SPECIALIZED_QUANTIZE
#define FBGEMM_SPECIALIZED_QUANTIZE_AVX2(T, LEGACY) \
template <> \
FBGEMM_API void Quantize<T, LEGACY>( \
const float* src, \
T* dst, \
int len, \
const TensorQuantizationParams& qparams, \
int thread_id, \
int num_threads) { \
bool avx2_support = cpuinfo_initialize() && fbgemmHasAvx2Support(); \
bool fma_support = cpuinfo_has_x86_fma3(); \
int i_begin, i_end; \
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end); \
if (avx2_support && fma_support && qparams.precision == 8) { \
/* fast path */ \
QuantizeAvx2<T, LEGACY>( \
&src[i_begin], &dst[i_begin], i_end - i_begin, qparams); \
} else { \
for (std::size_t i = i_begin; i < i_end; ++i) { \
dst[i] = Quantize<T, LEGACY>(src[i], qparams); \
} \
} \
}
FBGEMM_SPECIALIZED_QUANTIZE_AVX2(int8_t, true)
FBGEMM_SPECIALIZED_QUANTIZE_AVX2(uint8_t, true)
FBGEMM_SPECIALIZED_QUANTIZE_AVX2(int8_t, false)
FBGEMM_SPECIALIZED_QUANTIZE_AVX2(uint8_t, false)
#undef FBGEMM_SPECIALIZED_QUANTIZE_AVX2
#define FBGEMM_SPECIALIZED_FUSED_QUANTIZE_DEQUANTIZE_AVX2(T) \
template <> \
FBGEMM_API void FusedQuantizeDequantize<T>( \
const float* src, \
float* dst, \
int len, \
const TensorQuantizationParams& qparams, \
int thread_id, \
int num_threads) { \
bool avx2_support = cpuinfo_initialize() && fbgemmHasAvx2Support(); \
bool fma_support = cpuinfo_has_x86_fma3(); \
int i_begin, i_end; \
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end); \
if (avx2_support && fma_support && qparams.precision == 8) { \
/* fast path */ \
FusedQuantizeDequantizeAvx2<T>( \
&src[i_begin], &dst[i_begin], i_end - i_begin, qparams); \
} else { \
for (std::size_t i = i_begin; i < i_end; ++i) { \
dst[i] = FusedQuantizeDequantize<T>(src[i], qparams); \
} \
} \
}
FBGEMM_SPECIALIZED_FUSED_QUANTIZE_DEQUANTIZE_AVX2(int8_t)
FBGEMM_SPECIALIZED_FUSED_QUANTIZE_DEQUANTIZE_AVX2(uint8_t)
#undef FBGEMM_SPECIALIZED_FUSED_QUANTIZE_DEQUANTIZE_AVX2
#define FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKCX(T) \
template <> \
FBGEMM_API void QuantizeGroupwise<T, layout_t::KCX>( \
const float* src, \
int N, \
int C, \
int X, \
int G, \
const float* scales, \
const std::int32_t* zero_points, \
T* dst) { \
assert(C % G == 0); \
int C_per_G = C / G; \
for (int i = 0; i < N; ++i) { \
for (int g = 0; g < G; ++g) { \
float scale = scales[g]; \
int32_t zero_point = zero_points[g]; \
for (int c = 0; c < C / G; ++c) { \
for (int x = 0; x < X; ++x) { \
dst[(i * C + g * C_per_G + c) * X + x] = Quantize<T>( \
src[(i * C + g * C_per_G + c) * X + x], \
zero_point, \
scale, \
8 * sizeof(T)); \
} \
} \
} \
} \
}
FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKCX(int8_t)
FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKCX(int32_t)
#undef FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKCX
template <>
FBGEMM_API void QuantizeGroupwise<uint8_t, layout_t::KCX>(
const float* src,
int K,
int C,
int X,
int G,
const float* scales,
const std::int32_t* zero_points,
uint8_t* dst) {
assert(C % G == 0);
int C_per_G = C / G;
fbgemm::TensorQuantizationParams qparams;
qparams.precision = 8 * sizeof(uint8_t);
bool takeFastPath =
cpuinfo_initialize() && fbgemmHasAvx2Support() && cpuinfo_has_x86_fma3();
for (int i = 0; i < K; ++i) {
for (int g = 0; g < G; ++g) {
qparams.scale = scales[g];
qparams.zero_point = zero_points[g];
if (takeFastPath) {
QuantizeAvx2(
src + (i * C + g * C_per_G) * X,
dst + (i * C + g * C_per_G) * X,
C_per_G * X,
qparams);
} else {
for (int c = 0; c < C / G; ++c) {
for (int x = 0; x < X; ++x) {
dst[(i * C + g * C_per_G + c) * X + x] = Quantize<uint8_t>(
src[(i * C + g * C_per_G + c) * X + x],
qparams.zero_point,
qparams.scale,
qparams.precision);
}
}
}
}
}
}
#define FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKXC(T) \
template <> \
FBGEMM_API void QuantizeGroupwise<T, layout_t::KXC>( \
const float* src, \
int K, \
int C, \
int X, \
int G, \
const float* scales, \
const std::int32_t* zero_points, \
T* dst) { \
assert(C % G == 0); \
int C_per_G = C / G; \
for (int i = 0; i < K; ++i) { \
for (int x = 0; x < X; ++x) { \
for (int g = 0; g < G; ++g) { \
float scale = scales[g]; \
int32_t zero_point = zero_points[g]; \
for (int c = 0; c < C / G; ++c) { \
dst[(i * X + x) * C + g * C_per_G + c] = Quantize<T>( \
src[(i * X + x) * C + g * C_per_G + c], \
zero_point, \
scale, \
8 * sizeof(T)); \
} \
} \
} \
} \
}
FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKXC(int8_t)
FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKXC(uint8_t)
FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKXC(int32_t)
#undef FBGEMM_SPECIALIZED_QUANTIZEGROUPWISEKXC
////////////////////////////////////////////////////////////////////////////////
// Requantization (pure fixed-point)
int64_t SaturatingRoundingMulWithShift(int32_t a, int32_t b, int right_shift) {
int64_t a_64(a);
int64_t b_64(b);
int64_t ab_64 = a_64 * b_64;
int64_t nudge = 1ll << (right_shift - 1);
return (ab_64 + nudge) >> right_shift;
}
#define FBGEMM_SPECIALIZED_REQUANTIZE(T) \
template <> \
FBGEMM_API void Requantize<T>( \
const int32_t* src, \
T* dst, \
const int len, \
const RequantizationParams& params, \
int thread_id, \
int num_threads) { \
int i_begin, i_end; \
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end); \
for (int i = i_begin; i < i_end; ++i) { \
dst[i] = Requantize<T>(src[i], params); \
} \
}
FBGEMM_SPECIALIZED_REQUANTIZE(uint16_t)
FBGEMM_SPECIALIZED_REQUANTIZE(int32_t)
#undef FBGEMM_SPECIALIZED_REQUANTIZE
template <>
FBGEMM_API void Requantize<uint8_t>(
const int32_t* src,
uint8_t* dst,
const int len,
const RequantizationParams& params,
int thread_id,
int num_threads) {
int i_begin, i_end;
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end);
if (params.target_qparams.precision == 8 && cpuinfo_initialize() &&
fbgemmHasAvx2Support()) {
RequantizeAvx2(&src[i_begin], &dst[i_begin], i_end - i_begin, params);
} else {
for (int i = i_begin; i < i_end; ++i) {
dst[i] = Requantize<uint8_t>(src[i], params);
}
}
}
template <typename T>
FBGEMM_API void RequantizeFixedPoint(
const std::int32_t* src,
T* dst,
int len,
const RequantizationParams& params,
int thread_id,
int num_threads) {
int i_begin, i_end;
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end);
if (std::is_same<T, uint8_t>::value && params.target_qparams.precision == 8 &&
cpuinfo_initialize() && fbgemmHasAvx2Support()) {
RequantizeFixedPointAvx2(
&src[i_begin], &dst[i_begin], i_end - i_begin, params);
} else {
for (int i = i_begin; i < i_end; ++i) {
dst[i] = RequantizeFixedPoint<T>(src[i], params);
}
}
}
#define FBGEMM_SPECIALIZED_REQUANTIZE(T) \
template <> \
FBGEMM_API void RequantizeFixedPoint<T>( \
const int32_t* src, \
T* dst, \
const int len, \
const RequantizationParams& params, \
int thread_id, \
int num_threads) { \
int i_begin, i_end; \
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end); \
for (int i = i_begin; i < i_end; ++i) { \
dst[i] = RequantizeFixedPoint<T>(src[i], params); \
} \
}
FBGEMM_SPECIALIZED_REQUANTIZE(uint16_t)
FBGEMM_SPECIALIZED_REQUANTIZE(int32_t)
#undef FBGEMM_SPECIALIZED_REQUANTIZE
template <>
FBGEMM_API void RequantizeFixedPoint<uint8_t>(
const int32_t* src,
uint8_t* dst,
const int len,
const RequantizationParams& params,
int thread_id,
int num_threads) {
int i_begin, i_end;
fbgemmPartition1D(thread_id, num_threads, len, i_begin, i_end);
if (params.target_qparams.precision == 8 && cpuinfo_initialize() &&
fbgemmHasAvx2Support()) {
RequantizeFixedPointAvx2(
&src[i_begin], &dst[i_begin], i_end - i_begin, params);
} else {
for (int i = i_begin; i < i_end; ++i) {
dst[i] = RequantizeFixedPoint<uint8_t>(src[i], params);
}
}
}
} // namespace fbgemm
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Portions Copyright (c) 1993-2010 NVIDIA Corporation. All rights reserved.
* Portions Copyright (c) 2009 Mike Giles, Oxford University. All rights reserved.
* Portions Copyright (c) 2008 Frances Y. Kuo and Stephen Joe. All rights reserved.
*
* Sobol Quasi-random Number Generator example
*
* Based on CUDA code submitted by Mike Giles, Oxford University, United Kingdom
* http://people.maths.ox.ac.uk/~gilesm/
*
* and C code developed by Stephen Joe, University of Waikato, New Zealand
* and Frances Kuo, University of New South Wales, Australia
* http://web.maths.unsw.edu.au/~fkuo/sobol/
*
* For theoretical background see:
*
* P. Bratley and B.L. Fox.
* Implementing Sobol's quasirandom sequence generator
* http://portal.acm.org/citation.cfm?id=42288
* ACM Trans. on Math. Software, 14(1):88-100, 1988
*
* S. Joe and F. Kuo.
* Remark on algorithm 659: implementing Sobol's quasirandom sequence generator.
* http://portal.acm.org/citation.cfm?id=641879
* ACM Trans. on Math. Software, 29(1):49-57, 2003
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "sobol.h"
#include "sobol_gold.h"
#include "sobol_primitives.h"
#define k_2powneg32 2.3283064E-10F
// Windows does not provide ffs (find first set) so here is a
// fairly simple implementation.
// _WIN32 is defined on 32 and 64 bit Windows
#ifdef _WIN32
int ffs(const unsigned int &i)
{
unsigned int v = i;
unsigned int count;
if (! v)
{
count = 0;
}
else
{
count = 2;
if ((v & 0xffff) == 0)
{
v >>= 16;
count += 16;
}
if ((v & 0xff) == 0)
{
v >>= 8;
count += 8;
}
if ((v & 0xf) == 0)
{
v >>= 4;
count += 4;
}
if ((v & 0x3) == 0)
{
v >>= 2;
count += 2;
}
count -= v & 0x1;
}
return count;
}
#endif
// Create the direction numbers, based on the primitive polynomials.
void initSobolDirectionVectors(int n_dimensions, unsigned int *directions)
{
unsigned int *v = directions;
for (int dim = 0 ; dim < n_dimensions ; dim++)
{
// First dimension is a special case
if (dim == 0)
{
for (int i = 0 ; i < n_directions ; i++)
{
// All m's are 1
v[i] = 1 << (31 - i);
}
}
else
{
int d = sobol_primitives[dim].degree;
// The first direction numbers (up to the degree of the polynomial)
// are simply v[i] = m[i] / 2^i (stored in Q0.32 format)
for (int i = 0 ; i < d ; i++)
{
v[i] = sobol_primitives[dim].m[i] << (31 - i);
}
// The remaining direction numbers are computed as described in
// the Bratley and Fox paper.
// v[i] = a[1]v[i-1] ^ a[2]v[i-2] ^ ... ^ a[v-1]v[i-d+1] ^ v[i-d] ^ v[i-d]/2^d
for (int i = d ; i < n_directions ; i++)
{
// First do the v[i-d] ^ v[i-d]/2^d part
v[i] = v[i - d] ^ (v[i - d] >> d);
// Now do the a[1]v[i-1] ^ a[2]v[i-2] ^ ... part
// Note that the coefficients a[] are zero or one and for compactness in
// the input tables they are stored as bits of a single integer. To extract
// the relevant bit we use right shift and mask with 1.
// For example, for a 10 degree polynomial there are ten useful bits in a,
// so to get a[2] we need to right shift 7 times (to get the 8th bit into
// the LSB) and then mask with 1.
for (int j = 1 ; j < d ; j++)
{
v[i] ^= (((sobol_primitives[dim].a >> (d - 1 - j)) & 1) * v[i - j]);
}
}
}
v += n_directions;
}
}
// Reference model for generating Sobol numbers on the host
void sobolCPU(int n_vectors, int n_dimensions, unsigned int *directions, float *output)
{
unsigned int *v = directions;
for (int d = 0 ; d < n_dimensions ; d++)
{
unsigned int X = 0;
// x[0] is zero (in all dimensions)
output[n_vectors * d] = 0.0;
for (int i = 1 ; i < n_vectors ; i++)
{
// x[i] = x[i-1] ^ v[c]
// where c is the index of the rightmost zero bit in i
// minus 1 (since C arrays count from zero)
// In the Bratley and Fox paper this is equation (**)
X ^= v[ffs(~(i - 1)) - 1];
output[i + n_vectors * d] = (float)X * k_2powneg32;
}
v += n_directions;
}
}
|
//===--- SILLowerAggregateInstrs.cpp - Aggregate insts to Scalar insts ---===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// Simplify aggregate instructions into scalar instructions.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sil-lower-aggregate-instrs"
#include "swift/SIL/Projection.h"
#include "swift/SIL/SILBuilder.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SIL/SILModule.h"
#include "swift/SIL/SILVisitor.h"
#include "swift/SIL/TypeLowering.h"
#include "swift/SILOptimizer/PassManager/Passes.h"
#include "swift/SILOptimizer/PassManager/Transforms.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
using namespace swift;
using namespace swift::Lowering;
STATISTIC(NumExpand, "Number of instructions expanded");
//===----------------------------------------------------------------------===//
// Higher Level Operation Expansion
//===----------------------------------------------------------------------===//
/// Lower copy_addr into loads/stores/retain/release if we have a
/// non-address only type. We do this here so we can process the resulting
/// loads/stores.
///
/// This peephole implements the following optimizations:
///
/// copy_addr %0 to %1 : $*T
/// ->
/// %new = load %0 : $*T // Load the new value from the source
/// %old = load %1 : $*T // Load the old value from the destination
/// strong_retain %new : $T // Retain the new value
/// strong_release %old : $T // Release the old
/// store %new to %1 : $*T // Store the new value to the destination
///
/// copy_addr [take] %0 to %1 : $*T
/// ->
/// %new = load %0 : $*T
/// %old = load %1 : $*T
/// // no retain of %new!
/// strong_release %old : $T
/// store %new to %1 : $*T
///
/// copy_addr %0 to [initialization] %1 : $*T
/// ->
/// %new = load %0 : $*T
/// strong_retain %new : $T
/// // no load/release of %old!
/// store %new to %1 : $*T
///
/// copy_addr [take] %0 to [initialization] %1 : $*T
/// ->
/// %new = load %0 : $*T
/// // no retain of %new!
/// // no load/release of %old!
/// store %new to %1 : $*T
static bool expandCopyAddr(CopyAddrInst *CA) {
SILModule &M = CA->getModule();
SILFunction *F = CA->getFunction();
SILValue Source = CA->getSrc();
// If we have an address only type don't do anything.
SILType SrcType = Source->getType();
if (SrcType.isAddressOnly(M))
return false;
bool expand = shouldExpand(M, SrcType.getObjectType());
using TypeExpansionKind = Lowering::TypeLowering::TypeExpansionKind;
auto expansionKind = expand ? TypeExpansionKind::MostDerivedDescendents
: TypeExpansionKind::None;
SILBuilderWithScope Builder(CA);
// %new = load %0 : $*T
LoadInst *New = Builder.createLoad(CA->getLoc(), Source,
LoadOwnershipQualifier::Unqualified);
SILValue Destination = CA->getDest();
// If our object type is not trivial, we may need to release the old value and
// retain the new one.
auto &TL = F->getTypeLowering(SrcType);
// If we have a non-trivial type...
if (!TL.isTrivial()) {
// If we are not initializing:
// %old = load %1 : $*T
IsInitialization_t IsInit = CA->isInitializationOfDest();
LoadInst *Old = nullptr;
if (IsInitialization_t::IsNotInitialization == IsInit) {
Old = Builder.createLoad(CA->getLoc(), Destination,
LoadOwnershipQualifier::Unqualified);
}
// If we are not taking and have a reference type:
// strong_retain %new : $*T
// or if we have a non-trivial non-reference type.
// retain_value %new : $*T
IsTake_t IsTake = CA->isTakeOfSrc();
if (IsTake_t::IsNotTake == IsTake) {
TL.emitLoweredCopyValue(Builder, CA->getLoc(), New, expansionKind);
}
// If we are not initializing:
// strong_release %old : $*T
// *or*
// release_value %old : $*T
if (Old) {
TL.emitLoweredDestroyValue(Builder, CA->getLoc(), Old, expansionKind);
}
}
// Create the store.
Builder.createStore(CA->getLoc(), New, Destination,
StoreOwnershipQualifier::Unqualified);
++NumExpand;
return true;
}
static bool expandDestroyAddr(DestroyAddrInst *DA) {
SILFunction *F = DA->getFunction();
SILModule &Module = DA->getModule();
SILBuilderWithScope Builder(DA);
// Strength reduce destroy_addr inst into release/store if
// we have a non-address only type.
SILValue Addr = DA->getOperand();
// If we have an address only type, do nothing.
SILType Type = Addr->getType();
if (Type.isAddressOnly(Module))
return false;
bool expand = shouldExpand(Module, Type.getObjectType());
// If we have a non-trivial type...
if (!Type.isTrivial(Module)) {
// If we have a type with reference semantics, emit a load/strong release.
LoadInst *LI = Builder.createLoad(DA->getLoc(), Addr,
LoadOwnershipQualifier::Unqualified);
auto &TL = F->getTypeLowering(Type);
using TypeExpansionKind = Lowering::TypeLowering::TypeExpansionKind;
auto expansionKind = expand ? TypeExpansionKind::MostDerivedDescendents
: TypeExpansionKind::None;
TL.emitLoweredDestroyValue(Builder, DA->getLoc(), LI, expansionKind);
}
++NumExpand;
return true;
}
static bool expandReleaseValue(ReleaseValueInst *DV) {
SILFunction *F = DV->getFunction();
SILModule &Module = DV->getModule();
SILBuilderWithScope Builder(DV);
// Strength reduce destroy_addr inst into release/store if
// we have a non-address only type.
SILValue Value = DV->getOperand();
// If we have an address only type, do nothing.
SILType Type = Value->getType();
assert(!SILModuleConventions(Module).useLoweredAddresses()
|| Type.isLoadable(Module) &&
"release_value should never be called on a non-loadable type.");
if (!shouldExpand(Module, Type.getObjectType()))
return false;
auto &TL = F->getTypeLowering(Type);
TL.emitLoweredDestroyValueMostDerivedDescendents(Builder, DV->getLoc(),
Value);
LLVM_DEBUG(llvm::dbgs() << " Expanding Destroy Value: " << *DV);
++NumExpand;
return true;
}
static bool expandRetainValue(RetainValueInst *CV) {
SILFunction *F = CV->getFunction();
SILModule &Module = CV->getModule();
SILBuilderWithScope Builder(CV);
// Strength reduce destroy_addr inst into release/store if
// we have a non-address only type.
SILValue Value = CV->getOperand();
// If we have an address only type, do nothing.
SILType Type = Value->getType();
assert(!SILModuleConventions(Module).useLoweredAddresses()
|| Type.isLoadable(Module) &&
"Copy Value can only be called on loadable types.");
if (!shouldExpand(Module, Type.getObjectType()))
return false;
auto &TL = F->getTypeLowering(Type);
TL.emitLoweredCopyValueMostDerivedDescendents(Builder, CV->getLoc(), Value);
LLVM_DEBUG(llvm::dbgs() << " Expanding Copy Value: " << *CV);
++NumExpand;
return true;
}
//===----------------------------------------------------------------------===//
// Top Level Driver
//===----------------------------------------------------------------------===//
static bool processFunction(SILFunction &Fn) {
bool Changed = false;
for (auto BI = Fn.begin(), BE = Fn.end(); BI != BE; ++BI) {
auto II = BI->begin(), IE = BI->end();
while (II != IE) {
SILInstruction *Inst = &*II;
LLVM_DEBUG(llvm::dbgs() << "Visiting: " << *Inst);
if (auto *CA = dyn_cast<CopyAddrInst>(Inst))
if (expandCopyAddr(CA)) {
++II;
CA->eraseFromParent();
Changed = true;
continue;
}
if (auto *DA = dyn_cast<DestroyAddrInst>(Inst))
if (expandDestroyAddr(DA)) {
++II;
DA->eraseFromParent();
Changed = true;
continue;
}
if (auto *CV = dyn_cast<RetainValueInst>(Inst))
if (expandRetainValue(CV)) {
++II;
CV->eraseFromParent();
Changed = true;
continue;
}
if (auto *DV = dyn_cast<ReleaseValueInst>(Inst))
if (expandReleaseValue(DV)) {
++II;
DV->eraseFromParent();
Changed = true;
continue;
}
++II;
}
}
return Changed;
}
namespace {
class SILLowerAggregate : public SILFunctionTransform {
/// The entry point to the transformation.
void run() override {
SILFunction *F = getFunction();
// FIXME: Can we support ownership?
if (F->hasOwnership())
return;
LLVM_DEBUG(llvm::dbgs() << "***** LowerAggregate on function: " <<
F->getName() << " *****\n");
bool Changed = processFunction(*F);
if (Changed) {
invalidateAnalysis(SILAnalysis::InvalidationKind::CallsAndInstructions);
}
}
};
} // end anonymous namespace
SILTransform *swift::createLowerAggregateInstrs() {
return new SILLowerAggregate();
}
|
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <iostream>
#include <string.h>
#define BUF_SIZE 200
#define SERVER_ADDR "47.97.181.98"
#define SERVER_PORT 20000
int main()
{
int clientfd;
if ((clientfd = socket(AF_INET, SOCK_STREAM, 0) == -1) {
perror("create client socket error.");
exit(-1);
}
int socket_flag = fcntl(clientfd, F_GETFL, 0);// | O_NONBLOCK;
if (fcntl(clientfd, F_SETFL, socket_flag) == -1) {
close(clientfd);
std::cout << "set socket to nonblock error." << std::endl;
return -1;
}
struct sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr(SERVER_ADDR);
addr.sin_port = htons(SERVER_PORT);
while (true) {
int ret = connect(clientfd, (struct sockaddr*)&addr, sizeof(addr));
if (ret == 0) {
std::cout << "connect to server success." << std::endl;
break;
}
if (ret == -1) {
if (errno == EINTR) {
std::cout << "connect interruptted." << std::endl;
continue;
} else if (errno == EINPROGRESS) {
break;
} else {
close(clientfd);
return -1;
}
}
}
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
bool is_recv = false;
while (!is_recv) {
fd_set readset;
FD_ZERO(&readset);
FD_SET(clientfd, &readset);
int select_ret = select(clientfd + 1, &readset, nullptr, nullptr, &tv);
if (select_ret == 0) {
std::cout << "[select] timeout." << std::endl;
continue;
} else if (select_ret < 0) {
std::cout << "[select] connect to server error." << std::endl;
close(clientfd);
return -1;
}
int err;
socklen_t len = static_cast<socklen_t>(sizeof(err));
if (::getsockopt(clientfd, SOL_SOCKET, SO_ERROR, &err, &len) < 0) {
close(clientfd);
return -1;
}
if (err == 0) {
int is_set = FD_ISSET(clientfd, &readset);
if (is_set) {
char buf[BUF_SIZE];
memset(buf, 0, sizeof(buf));
int ret = recv(clientfd, buf, sizeof(buf), 0);
if (ret == -1) {
std::cout << "recv data error." << std::endl;
return EWOULDBLOCK;
} else {
std::cout << "recv data: " << buf << std::endl;
is_recv = true;
}
}
} else {
std::cout << "connect to server error." << std::endl;
}
}
close(clientfd);
return 0;
}
|
#include "std_lib_facilities.h"
#include <sstream> //maybe fix
int get_sum_of_ints(ifstream& ifs) {
int sum=0;
if (ifs.is_open())
{
int val = 0;
std::string line;
string tmp;
while (std::getline(ifs, line))
{
std::stringstream ss(line);
while (!ss.eof())
{
ss >>tmp;
if (stringstream(tmp) >> val) {
sum += val;
}
}
}
ifs.close();
}
return sum;
}
int main()
{
try
{
string input_f="input1.txt";
ifstream ifs(input_f);
cout << get_sum_of_ints(ifs) << endl;
}
catch (const std::exception& e) {
cout << e.what() << endl;
}
return 0;
};
|
/*=============================================================================
Copyright (c) 2011-2019 Bolero MURAKAMI
https://github.com/bolero-MURAKAMI/Sprout
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef SPROUT_COMPOST_ANALYSES_DFT_HPP
#define SPROUT_COMPOST_ANALYSES_DFT_HPP
#include <sprout/config.hpp>
#include <sprout/range/adaptor/dft.hpp>
namespace sprout {
namespace compost {
namespace analyses {
//
// dft
//
using sprout::adaptors::dft;
} // namespace analyses
using sprout::compost::analyses::dft;
} // namespace compost
} // namespace sprout
#endif // #ifndef SPROUT_COMPOST_ANALYSES_DFT_HPP
|
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <memory>
#include <ngraph/ngraph.hpp>
#include "low_precision/layer_transformation.hpp"
#include "common/fake_quantize_on_data.hpp"
#include "common/dequantization_operations.hpp"
namespace ngraph {
namespace builder {
namespace subgraph {
class ConcatFunction {
public:
static std::shared_ptr<ngraph::Function> getOriginal(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fakeQuantize1,
const FakeQuantizeOnData& fakeQuantize2);
static std::shared_ptr<ngraph::Function> getOriginal(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnDataWithConstant& fakeQuantize1,
const FakeQuantizeOnDataWithConstant& fakeQuantize2);
static std::shared_ptr<ngraph::Function> getOriginalWithChildAndOutput(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fakeQuantize1,
const FakeQuantizeOnData& fakeQuantize2);
static std::shared_ptr<ngraph::Function> getOriginalWithNeighbors(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const FakeQuantizeOnData& fqOnData3,
const std::string& neighborType,
const std::string& additionalLayer);
static std::shared_ptr<ngraph::Function> getOriginalWithIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getOriginalWithIntermediateAvgPool(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getOriginalWithSplitedIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const bool addConvolution);
static std::shared_ptr<ngraph::Function> getOriginalSelectionWithIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getOriginalWithStridedSlice(
const ngraph::element::Type precision,
const ngraph::Shape inputShape,
const FakeQuantizeOnData& fq1,
const FakeQuantizeOnData& fq2,
const bool ssBeforeConcat,
const bool ssAfterConcat);
static std::shared_ptr<ngraph::Function> getOriginalWithDifferentPrecisionOnChildren(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getOriginalWithIntermediateWithConstant(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getOriginalWithReshapeAtTheEndTransformation(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnDataWithConstant& fqOnData1,
const FakeQuantizeOnDataWithConstant& fqOnData2,
const FakeQuantizeOnDataWithConstant& fqOnData3);
static std::shared_ptr<ngraph::Function> getOriginalWithIntermediateReshape(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const ngraph::Shape& reshapeOutputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2);
static std::shared_ptr<ngraph::Function> getReference(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fakeQuantize1,
const FakeQuantizeOnData& fakeQuantize2,
const DequantizationOperations& dequantizationOperations);
static std::shared_ptr<ngraph::Function> get(
const ngraph::element::Type inputPrecision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnDataWithConstant& fakeQuantize1,
const DequantizationOperations::Convert& convert1,
const DequantizationOperations& dequantization1,
const FakeQuantizeOnDataWithConstant& fakeQuantize2,
const DequantizationOperations::Convert& convert2,
const DequantizationOperations& dequantization2,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationAfter,
const std::int64_t& axis);
static std::shared_ptr<ngraph::Function> getReferenceWithNeighbors(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const FakeQuantizeOnData& fqOnData3,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationOperations1,
const DequantizationOperations& dequantizationOperations2,
const std::string& neighborType,
const std::string& additionalLayer);
// TODO: refactor: dequantizationBefore2 <=> dequantizationOperations2
static std::shared_ptr<ngraph::Function> getReferenceWithIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore1,
const DequantizationOperations& dequantizationOperations2,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationOperations1,
const DequantizationOperations& dequantizationBefore2);
static std::shared_ptr<ngraph::Function> getReferenceWithIntermediateAvgPool(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore1,
const DequantizationOperations& dequantizationBefore2,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationOperations1,
const DequantizationOperations& dequantizationOperations2);
static std::shared_ptr<ngraph::Function> getReferenceWithSplitedIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore1,
const DequantizationOperations& dequantizationBefore2,
const ngraph::element::Type precisionAfterOperation,
const bool addConvolution,
const DequantizationOperations& dequantizationOperations1,
const DequantizationOperations& dequantizationOperations2);
static std::shared_ptr<ngraph::Function> getReferenceSelectionWithIntermediate(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore1,
const DequantizationOperations& dequantizationBefore2,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationOperations1,
const DequantizationOperations& dequantizationOperations2);
static std::shared_ptr<ngraph::Function> getReferenceWithStridedSlice(
const ngraph::element::Type inputPrecision,
const ngraph::Shape inputShape,
const FakeQuantizeOnData& fq1,
const FakeQuantizeOnData& fq2,
const DequantizationOperations& deqBefore,
const ngraph::element::Type precisionBeforeConcat,
const ngraph::element::Type precisionAfterConcat,
const bool ssBeforeConcat,
const bool ssAfterConcat,
const DequantizationOperations& deqAfter1,
const DequantizationOperations& deqAfter2);
static std::shared_ptr<ngraph::Function> getReferenceWithDifferentPrecisionOnChildren(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool multiChannel,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationAfter1,
const DequantizationOperations& dequantizationAfter2);
static std::shared_ptr<ngraph::Function> getReferenceWithIntermediateWithConstant(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const bool transparentIntermediate,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const ngraph::element::Type precisionBeforeOp,
const DequantizationOperations& dequantizationBefore,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationAfter,
const ngraph::element::Type precisionAfterDequantization);
static std::shared_ptr<ngraph::Function> getReferenceWithReshapeAtTheEndTransformation(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const FakeQuantizeOnDataWithConstant& fqOnData1,
const FakeQuantizeOnDataWithConstant& fqOnData2,
const FakeQuantizeOnDataWithConstant& fqOnData3,
const ngraph::element::Type precisionBeforeOp,
const ngraph::element::Type precisionAfterOperation,
const DequantizationOperations& dequantizationOperations);
static std::shared_ptr<ngraph::Function> getReferenceWithIntermediateReshape(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape,
const ngraph::Shape& reshapeOutputShape,
const FakeQuantizeOnData& fqOnData1,
const FakeQuantizeOnData& fqOnData2,
const DequantizationOperations& dequantizationAfter);
private:
static std::shared_ptr<Node> makeMaxPool(const Output<Node>& parent, const std::vector<size_t>& kernel);
};
} // namespace subgraph
} // namespace builder
} // namespace ngraph
|
/*
* Copyright (c) 2011-2014 ARM Limited
* Copyright (c) 2013 Advanced Micro Devices, Inc.
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Kevin Lim
* Korey Sewell
*/
#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
#define __CPU_O3_INST_QUEUE_IMPL_HH__
#include <limits>
#include <vector>
#include "cpu/o3/fu_pool.hh"
#include "cpu/o3/inst_queue.hh"
#include "debug/IQ.hh"
#include "enums/OpClass.hh"
#include "params/DerivO3CPU.hh"
#include "sim/core.hh"
// clang complains about std::set being overloaded with Packet::set if
// we open up the entire namespace std
using std::list;
template <class Impl>
InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
int fu_idx, InstructionQueue<Impl> *iq_ptr)
: Event(Stat_Event_Pri, AutoDelete),
inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
{
}
template <class Impl>
void
InstructionQueue<Impl>::FUCompletion::process()
{
iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
inst = NULL;
}
template <class Impl>
const char *
InstructionQueue<Impl>::FUCompletion::description() const
{
return "Functional unit completion";
}
template <class Impl>
InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
DerivO3CPUParams *params)
: cpu(cpu_ptr),
iewStage(iew_ptr),
fuPool(params->fuPool),
numEntries(params->numIQEntries),
totalWidth(params->issueWidth),
commitToIEWDelay(params->commitToIEWDelay)
{
assert(fuPool);
numThreads = params->numThreads;
// Set the number of total physical registers
// As the vector registers have two addressing modes, they are added twice
numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
params->numPhysVecRegs +
params->numPhysVecRegs * TheISA::NumVecElemPerVecReg +
params->numPhysCCRegs;
//Create an entry for each physical register within the
//dependency graph.
dependGraph.resize(numPhysRegs);
// Resize the register scoreboard.
regScoreboard.resize(numPhysRegs);
//Initialize Mem Dependence Units
for (ThreadID tid = 0; tid < numThreads; tid++) {
memDepUnit[tid].init(params, tid);
memDepUnit[tid].setIQ(this);
}
resetState();
std::string policy = params->smtIQPolicy;
//Convert string to lowercase
std::transform(policy.begin(), policy.end(), policy.begin(),
(int(*)(int)) tolower);
//Figure out resource sharing policy
if (policy == "dynamic") {
iqPolicy = Dynamic;
//Set Max Entries to Total ROB Capacity
for (ThreadID tid = 0; tid < numThreads; tid++) {
maxEntries[tid] = numEntries;
}
} else if (policy == "partitioned") {
iqPolicy = Partitioned;
//@todo:make work if part_amt doesnt divide evenly.
int part_amt = numEntries / numThreads;
//Divide ROB up evenly
for (ThreadID tid = 0; tid < numThreads; tid++) {
maxEntries[tid] = part_amt;
}
DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
"%i entries per thread.\n",part_amt);
} else if (policy == "threshold") {
iqPolicy = Threshold;
double threshold = (double)params->smtIQThreshold / 100;
int thresholdIQ = (int)((double)threshold * numEntries);
//Divide up by threshold amount
for (ThreadID tid = 0; tid < numThreads; tid++) {
maxEntries[tid] = thresholdIQ;
}
DPRINTF(IQ, "IQ sharing policy set to Threshold:"
"%i entries per thread.\n",thresholdIQ);
} else {
assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
"Partitioned, Threshold}");
}
}
template <class Impl>
InstructionQueue<Impl>::~InstructionQueue()
{
dependGraph.reset();
#ifdef DEBUG
cprintf("Nodes traversed: %i, removed: %i\n",
dependGraph.nodesTraversed, dependGraph.nodesRemoved);
#endif
}
template <class Impl>
std::string
InstructionQueue<Impl>::name() const
{
return cpu->name() + ".iq";
}
template <class Impl>
void
InstructionQueue<Impl>::regStats()
{
using namespace Stats;
iqInstsAdded
.name(name() + ".iqInstsAdded")
.desc("Number of instructions added to the IQ (excludes non-spec)")
.prereq(iqInstsAdded);
iqNonSpecInstsAdded
.name(name() + ".iqNonSpecInstsAdded")
.desc("Number of non-speculative instructions added to the IQ")
.prereq(iqNonSpecInstsAdded);
iqInstsIssued
.name(name() + ".iqInstsIssued")
.desc("Number of instructions issued")
.prereq(iqInstsIssued);
iqIntInstsIssued
.name(name() + ".iqIntInstsIssued")
.desc("Number of integer instructions issued")
.prereq(iqIntInstsIssued);
iqFloatInstsIssued
.name(name() + ".iqFloatInstsIssued")
.desc("Number of float instructions issued")
.prereq(iqFloatInstsIssued);
iqBranchInstsIssued
.name(name() + ".iqBranchInstsIssued")
.desc("Number of branch instructions issued")
.prereq(iqBranchInstsIssued);
iqMemInstsIssued
.name(name() + ".iqMemInstsIssued")
.desc("Number of memory instructions issued")
.prereq(iqMemInstsIssued);
iqMiscInstsIssued
.name(name() + ".iqMiscInstsIssued")
.desc("Number of miscellaneous instructions issued")
.prereq(iqMiscInstsIssued);
iqSquashedInstsIssued
.name(name() + ".iqSquashedInstsIssued")
.desc("Number of squashed instructions issued")
.prereq(iqSquashedInstsIssued);
iqSquashedInstsExamined
.name(name() + ".iqSquashedInstsExamined")
.desc("Number of squashed instructions iterated over during squash;"
" mainly for profiling")
.prereq(iqSquashedInstsExamined);
iqSquashedOperandsExamined
.name(name() + ".iqSquashedOperandsExamined")
.desc("Number of squashed operands that are examined and possibly "
"removed from graph")
.prereq(iqSquashedOperandsExamined);
iqSquashedNonSpecRemoved
.name(name() + ".iqSquashedNonSpecRemoved")
.desc("Number of squashed non-spec instructions that were removed")
.prereq(iqSquashedNonSpecRemoved);
/*
queueResDist
.init(Num_OpClasses, 0, 99, 2)
.name(name() + ".IQ:residence:")
.desc("cycles from dispatch to issue")
.flags(total | pdf | cdf )
;
for (int i = 0; i < Num_OpClasses; ++i) {
queueResDist.subname(i, opClassStrings[i]);
}
*/
numIssuedDist
.init(0,totalWidth,1)
.name(name() + ".issued_per_cycle")
.desc("Number of insts issued each cycle")
.flags(pdf)
;
/*
dist_unissued
.init(Num_OpClasses+2)
.name(name() + ".unissued_cause")
.desc("Reason ready instruction not issued")
.flags(pdf | dist)
;
for (int i=0; i < (Num_OpClasses + 2); ++i) {
dist_unissued.subname(i, unissued_names[i]);
}
*/
statIssuedInstType
.init(numThreads,Enums::Num_OpClass)
.name(name() + ".FU_type")
.desc("Type of FU issued")
.flags(total | pdf | dist)
;
statIssuedInstType.ysubnames(Enums::OpClassStrings);
//
// How long did instructions for a particular FU type wait prior to issue
//
/*
issueDelayDist
.init(Num_OpClasses,0,99,2)
.name(name() + ".")
.desc("cycles from operands ready to issue")
.flags(pdf | cdf)
;
for (int i=0; i<Num_OpClasses; ++i) {
std::stringstream subname;
subname << opClassStrings[i] << "_delay";
issueDelayDist.subname(i, subname.str());
}
*/
issueRate
.name(name() + ".rate")
.desc("Inst issue rate")
.flags(total)
;
issueRate = iqInstsIssued / cpu->numCycles;
statFuBusy
.init(Num_OpClasses)
.name(name() + ".fu_full")
.desc("attempts to use FU when none available")
.flags(pdf | dist)
;
for (int i=0; i < Num_OpClasses; ++i) {
statFuBusy.subname(i, Enums::OpClassStrings[i]);
}
fuBusy
.init(numThreads)
.name(name() + ".fu_busy_cnt")
.desc("FU busy when requested")
.flags(total)
;
fuBusyRate
.name(name() + ".fu_busy_rate")
.desc("FU busy rate (busy events/executed inst)")
.flags(total)
;
fuBusyRate = fuBusy / iqInstsIssued;
for (ThreadID tid = 0; tid < numThreads; tid++) {
// Tell mem dependence unit to reg stats as well.
memDepUnit[tid].regStats();
}
intInstQueueReads
.name(name() + ".int_inst_queue_reads")
.desc("Number of integer instruction queue reads")
.flags(total);
intInstQueueWrites
.name(name() + ".int_inst_queue_writes")
.desc("Number of integer instruction queue writes")
.flags(total);
intInstQueueWakeupAccesses
.name(name() + ".int_inst_queue_wakeup_accesses")
.desc("Number of integer instruction queue wakeup accesses")
.flags(total);
fpInstQueueReads
.name(name() + ".fp_inst_queue_reads")
.desc("Number of floating instruction queue reads")
.flags(total);
fpInstQueueWrites
.name(name() + ".fp_inst_queue_writes")
.desc("Number of floating instruction queue writes")
.flags(total);
fpInstQueueWakeupAccesses
.name(name() + ".fp_inst_queue_wakeup_accesses")
.desc("Number of floating instruction queue wakeup accesses")
.flags(total);
vecInstQueueReads
.name(name() + ".vec_inst_queue_reads")
.desc("Number of vector instruction queue reads")
.flags(total);
vecInstQueueWrites
.name(name() + ".vec_inst_queue_writes")
.desc("Number of vector instruction queue writes")
.flags(total);
vecInstQueueWakeupAccesses
.name(name() + ".vec_inst_queue_wakeup_accesses")
.desc("Number of vector instruction queue wakeup accesses")
.flags(total);
intAluAccesses
.name(name() + ".int_alu_accesses")
.desc("Number of integer alu accesses")
.flags(total);
fpAluAccesses
.name(name() + ".fp_alu_accesses")
.desc("Number of floating point alu accesses")
.flags(total);
vecAluAccesses
.name(name() + ".vec_alu_accesses")
.desc("Number of vector alu accesses")
.flags(total);
}
template <class Impl>
void
InstructionQueue<Impl>::resetState()
{
//Initialize thread IQ counts
for (ThreadID tid = 0; tid <numThreads; tid++) {
count[tid] = 0;
instList[tid].clear();
}
// Initialize the number of free IQ entries.
freeEntries = numEntries;
// Note that in actuality, the registers corresponding to the logical
// registers start off as ready. However this doesn't matter for the
// IQ as the instruction should have been correctly told if those
// registers are ready in rename. Thus it can all be initialized as
// unready.
for (int i = 0; i < numPhysRegs; ++i) {
regScoreboard[i] = false;
}
for (ThreadID tid = 0; tid < numThreads; ++tid) {
squashedSeqNum[tid] = 0;
}
for (int i = 0; i < Num_OpClasses; ++i) {
while (!readyInsts[i].empty())
readyInsts[i].pop();
queueOnList[i] = false;
readyIt[i] = listOrder.end();
}
nonSpecInsts.clear();
listOrder.clear();
deferredMemInsts.clear();
blockedMemInsts.clear();
retryMemInsts.clear();
wbOutstanding = 0;
}
template <class Impl>
void
InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
{
activeThreads = at_ptr;
}
template <class Impl>
void
InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
{
issueToExecuteQueue = i2e_ptr;
}
template <class Impl>
void
InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
{
timeBuffer = tb_ptr;
fromCommit = timeBuffer->getWire(-commitToIEWDelay);
}
template <class Impl>
bool
InstructionQueue<Impl>::isDrained() const
{
bool drained = dependGraph.empty() &&
instsToExecute.empty() &&
wbOutstanding == 0;
for (ThreadID tid = 0; tid < numThreads; ++tid)
drained = drained && memDepUnit[tid].isDrained();
return drained;
}
template <class Impl>
void
InstructionQueue<Impl>::drainSanityCheck() const
{
assert(dependGraph.empty());
assert(instsToExecute.empty());
for (ThreadID tid = 0; tid < numThreads; ++tid)
memDepUnit[tid].drainSanityCheck();
}
template <class Impl>
void
InstructionQueue<Impl>::takeOverFrom()
{
resetState();
}
template <class Impl>
int
InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
{
if (iqPolicy == Partitioned) {
return numEntries / num_threads;
} else {
return 0;
}
}
template <class Impl>
void
InstructionQueue<Impl>::resetEntries()
{
if (iqPolicy != Dynamic || numThreads > 1) {
int active_threads = activeThreads->size();
list<ThreadID>::iterator threads = activeThreads->begin();
list<ThreadID>::iterator end = activeThreads->end();
while (threads != end) {
ThreadID tid = *threads++;
if (iqPolicy == Partitioned) {
maxEntries[tid] = numEntries / active_threads;
} else if (iqPolicy == Threshold && active_threads == 1) {
maxEntries[tid] = numEntries;
}
}
}
}
template <class Impl>
unsigned
InstructionQueue<Impl>::numFreeEntries()
{
return freeEntries;
}
template <class Impl>
unsigned
InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
{
return maxEntries[tid] - count[tid];
}
// Might want to do something more complex if it knows how many instructions
// will be issued this cycle.
template <class Impl>
bool
InstructionQueue<Impl>::isFull()
{
if (freeEntries == 0) {
return(true);
} else {
return(false);
}
}
template <class Impl>
bool
InstructionQueue<Impl>::isFull(ThreadID tid)
{
if (numFreeEntries(tid) == 0) {
return(true);
} else {
return(false);
}
}
template <class Impl>
bool
InstructionQueue<Impl>::hasReadyInsts()
{
if (!listOrder.empty()) {
return true;
}
for (int i = 0; i < Num_OpClasses; ++i) {
if (!readyInsts[i].empty()) {
return true;
}
}
return false;
}
template <class Impl>
void
InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
{
if (new_inst->isFloating()) {
fpInstQueueWrites++;
} else if (new_inst->isVector()) {
vecInstQueueWrites++;
} else {
intInstQueueWrites++;
}
// Make sure the instruction is valid
assert(new_inst);
DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
new_inst->seqNum, new_inst->pcState());
assert(freeEntries != 0);
instList[new_inst->threadNumber].push_back(new_inst);
--freeEntries;
new_inst->setInIQ();
// Look through its source registers (physical regs), and mark any
// dependencies.
addToDependents(new_inst);
// Have this instruction set itself as the producer of its destination
// register(s).
addToProducers(new_inst);
if (new_inst->isMemRef()) {
memDepUnit[new_inst->threadNumber].insert(new_inst);
} else {
addIfReady(new_inst);
}
++iqInstsAdded;
count[new_inst->threadNumber]++;
assert(freeEntries == (numEntries - countInsts()));
}
template <class Impl>
void
InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
{
// @todo: Clean up this code; can do it by setting inst as unable
// to issue, then calling normal insert on the inst.
if (new_inst->isFloating()) {
fpInstQueueWrites++;
} else if (new_inst->isVector()) {
vecInstQueueWrites++;
} else {
intInstQueueWrites++;
}
assert(new_inst);
nonSpecInsts[new_inst->seqNum] = new_inst;
DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
"to the IQ.\n",
new_inst->seqNum, new_inst->pcState());
assert(freeEntries != 0);
instList[new_inst->threadNumber].push_back(new_inst);
--freeEntries;
new_inst->setInIQ();
// Have this instruction set itself as the producer of its destination
// register(s).
addToProducers(new_inst);
// If it's a memory instruction, add it to the memory dependency
// unit.
if (new_inst->isMemRef()) {
memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
}
++iqNonSpecInstsAdded;
count[new_inst->threadNumber]++;
assert(freeEntries == (numEntries - countInsts()));
}
template <class Impl>
void
InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
{
memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
insertNonSpec(barr_inst);
}
template <class Impl>
typename Impl::DynInstPtr
InstructionQueue<Impl>::getInstToExecute()
{
assert(!instsToExecute.empty());
DynInstPtr inst = instsToExecute.front();
instsToExecute.pop_front();
if (inst->isFloating()) {
fpInstQueueReads++;
} else if (inst->isVector()) {
vecInstQueueReads++;
} else {
intInstQueueReads++;
}
return inst;
}
template <class Impl>
void
InstructionQueue<Impl>::addToOrderList(OpClass op_class)
{
assert(!readyInsts[op_class].empty());
ListOrderEntry queue_entry;
queue_entry.queueType = op_class;
queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
ListOrderIt list_it = listOrder.begin();
ListOrderIt list_end_it = listOrder.end();
while (list_it != list_end_it) {
if ((*list_it).oldestInst > queue_entry.oldestInst) {
break;
}
list_it++;
}
readyIt[op_class] = listOrder.insert(list_it, queue_entry);
queueOnList[op_class] = true;
}
template <class Impl>
void
InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
{
// Get iterator of next item on the list
// Delete the original iterator
// Determine if the next item is either the end of the list or younger
// than the new instruction. If so, then add in a new iterator right here.
// If not, then move along.
ListOrderEntry queue_entry;
OpClass op_class = (*list_order_it).queueType;
ListOrderIt next_it = list_order_it;
++next_it;
queue_entry.queueType = op_class;
queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
while (next_it != listOrder.end() &&
(*next_it).oldestInst < queue_entry.oldestInst) {
++next_it;
}
readyIt[op_class] = listOrder.insert(next_it, queue_entry);
}
template <class Impl>
void
InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
{
DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
assert(!cpu->switchedOut());
// The CPU could have been sleeping until this op completed (*extremely*
// long latency op). Wake it if it was. This may be overkill.
--wbOutstanding;
iewStage->wakeCPU();
if (fu_idx > -1)
fuPool->freeUnitNextCycle(fu_idx);
// @todo: Ensure that these FU Completions happen at the beginning
// of a cycle, otherwise they could add too many instructions to
// the queue.
issueToExecuteQueue->access(-1)->size++;
instsToExecute.push_back(inst);
}
// @todo: Figure out a better way to remove the squashed items from the
// lists. Checking the top item of each list to see if it's squashed
// wastes time and forces jumps.
template <class Impl>
void
InstructionQueue<Impl>::scheduleReadyInsts()
{
DPRINTF(IQ, "Attempting to schedule ready instructions from "
"the IQ.\n");
IssueStruct *i2e_info = issueToExecuteQueue->access(0);
DynInstPtr mem_inst;
while (mem_inst = getDeferredMemInstToExecute()) {
addReadyMemInst(mem_inst);
}
// See if any cache blocked instructions are able to be executed
while (mem_inst = getBlockedMemInstToExecute()) {
addReadyMemInst(mem_inst);
}
// Have iterator to head of the list
// While I haven't exceeded bandwidth or reached the end of the list,
// Try to get a FU that can do what this op needs.
// If successful, change the oldestInst to the new top of the list, put
// the queue in the proper place in the list.
// Increment the iterator.
// This will avoid trying to schedule a certain op class if there are no
// FUs that handle it.
int total_issued = 0;
ListOrderIt order_it = listOrder.begin();
ListOrderIt order_end_it = listOrder.end();
while (total_issued < totalWidth && order_it != order_end_it) {
OpClass op_class = (*order_it).queueType;
assert(!readyInsts[op_class].empty());
DynInstPtr issuing_inst = readyInsts[op_class].top();
if (issuing_inst->isFloating()) {
fpInstQueueReads++;
} else if (issuing_inst->isVector()) {
vecInstQueueReads++;
} else {
intInstQueueReads++;
}
assert(issuing_inst->seqNum == (*order_it).oldestInst);
if (issuing_inst->isSquashed()) {
readyInsts[op_class].pop();
if (!readyInsts[op_class].empty()) {
moveToYoungerInst(order_it);
} else {
readyIt[op_class] = listOrder.end();
queueOnList[op_class] = false;
}
listOrder.erase(order_it++);
++iqSquashedInstsIssued;
continue;
}
int idx = FUPool::NoCapableFU;
Cycles op_latency = Cycles(1);
ThreadID tid = issuing_inst->threadNumber;
if (op_class != No_OpClass) {
idx = fuPool->getUnit(op_class);
if (issuing_inst->isFloating()) {
fpAluAccesses++;
} else if (issuing_inst->isVector()) {
vecAluAccesses++;
} else {
intAluAccesses++;
}
if (idx > FUPool::NoFreeFU) {
op_latency = fuPool->getOpLatency(op_class);
}
}
// If we have an instruction that doesn't require a FU, or a
// valid FU, then schedule for execution.
if (idx != FUPool::NoFreeFU) {
if (op_latency == Cycles(1)) {
i2e_info->size++;
instsToExecute.push_back(issuing_inst);
// Add the FU onto the list of FU's to be freed next
// cycle if we used one.
if (idx >= 0)
fuPool->freeUnitNextCycle(idx);
} else {
bool pipelined = fuPool->isPipelined(op_class);
// Generate completion event for the FU
++wbOutstanding;
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
cpu->schedule(execution,
cpu->clockEdge(Cycles(op_latency - 1)));
if (!pipelined) {
// If FU isn't pipelined, then it must be freed
// upon the execution completing.
execution->setFreeFU();
} else {
// Add the FU onto the list of FU's to be freed next cycle.
fuPool->freeUnitNextCycle(idx);
}
}
DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
"[sn:%lli]\n",
tid, issuing_inst->pcState(),
issuing_inst->seqNum);
readyInsts[op_class].pop();
if (!readyInsts[op_class].empty()) {
moveToYoungerInst(order_it);
} else {
readyIt[op_class] = listOrder.end();
queueOnList[op_class] = false;
}
issuing_inst->setIssued();
++total_issued;
#if TRACING_ON
issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
#endif
if (!issuing_inst->isMemRef()) {
// Memory instructions can not be freed from the IQ until they
// complete.
++freeEntries;
count[tid]--;
issuing_inst->clearInIQ();
} else {
memDepUnit[tid].issue(issuing_inst);
}
listOrder.erase(order_it++);
statIssuedInstType[tid][op_class]++;
} else {
statFuBusy[op_class]++;
fuBusy[tid]++;
++order_it;
}
}
numIssuedDist.sample(total_issued);
iqInstsIssued+= total_issued;
// If we issued any instructions, tell the CPU we had activity.
// @todo If the way deferred memory instructions are handeled due to
// translation changes then the deferredMemInsts condition should be removed
// from the code below.
if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) {
cpu->activityThisCycle();
} else {
DPRINTF(IQ, "Not able to schedule any instructions.\n");
}
}
template <class Impl>
void
InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
{
DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
"to execute.\n", inst);
NonSpecMapIt inst_it = nonSpecInsts.find(inst);
assert(inst_it != nonSpecInsts.end());
ThreadID tid = (*inst_it).second->threadNumber;
(*inst_it).second->setAtCommit();
(*inst_it).second->setCanIssue();
if (!(*inst_it).second->isMemRef()) {
addIfReady((*inst_it).second);
} else {
memDepUnit[tid].nonSpecInstReady((*inst_it).second);
}
(*inst_it).second = NULL;
nonSpecInsts.erase(inst_it);
}
template <class Impl>
void
InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
{
DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
tid,inst);
ListIt iq_it = instList[tid].begin();
while (iq_it != instList[tid].end() &&
(*iq_it)->seqNum <= inst) {
++iq_it;
instList[tid].pop_front();
}
assert(freeEntries == (numEntries - countInsts()));
}
template <class Impl>
int
InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
{
int dependents = 0;
// The instruction queue here takes care of both floating and int ops
if (completed_inst->isFloating()) {
fpInstQueueWakeupAccesses++;
} else if (completed_inst->isVector()) {
vecInstQueueWakeupAccesses++;
} else {
intInstQueueWakeupAccesses++;
}
DPRINTF(IQ, "Waking dependents of completed instruction.\n");
assert(!completed_inst->isSquashed());
// Tell the memory dependence unit to wake any dependents on this
// instruction if it is a memory instruction. Also complete the memory
// instruction at this point since we know it executed without issues.
// @todo: Might want to rename "completeMemInst" to something that
// indicates that it won't need to be replayed, and call this
// earlier. Might not be a big deal.
if (completed_inst->isMemRef()) {
memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
completeMemInst(completed_inst);
} else if (completed_inst->isMemBarrier() ||
completed_inst->isWriteBarrier()) {
memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
}
for (int dest_reg_idx = 0;
dest_reg_idx < completed_inst->numDestRegs();
dest_reg_idx++)
{
PhysRegIdPtr dest_reg =
completed_inst->renamedDestRegIdx(dest_reg_idx);
// Special case of uniq or control registers. They are not
// handled by the IQ and thus have no dependency graph entry.
if (dest_reg->isFixedMapping()) {
DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n",
dest_reg->index(), dest_reg->className());
continue;
}
DPRINTF(IQ, "Waking any dependents on register %i (%s).\n",
dest_reg->index(),
dest_reg->className());
//Go through the dependency chain, marking the registers as
//ready within the waiting instructions.
DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex());
while (dep_inst) {
DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
"PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
// Might want to give more information to the instruction
// so that it knows which of its source registers is
// ready. However that would mean that the dependency
// graph entries would need to hold the src_reg_idx.
dep_inst->markSrcRegReady();
addIfReady(dep_inst);
dep_inst = dependGraph.pop(dest_reg->flatIndex());
++dependents;
}
// Reset the head node now that all of its dependents have
// been woken up.
assert(dependGraph.empty(dest_reg->flatIndex()));
dependGraph.clearInst(dest_reg->flatIndex());
// Mark the scoreboard as having that register ready.
regScoreboard[dest_reg->flatIndex()] = true;
}
return dependents;
}
template <class Impl>
void
InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
{
OpClass op_class = ready_inst->opClass();
readyInsts[op_class].push(ready_inst);
// Will need to reorder the list if either a queue is not on the list,
// or it has an older instruction than last time.
if (!queueOnList[op_class]) {
addToOrderList(op_class);
} else if (readyInsts[op_class].top()->seqNum <
(*readyIt[op_class]).oldestInst) {
listOrder.erase(readyIt[op_class]);
addToOrderList(op_class);
}
DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
"the ready list, PC %s opclass:%i [sn:%lli].\n",
ready_inst->pcState(), op_class, ready_inst->seqNum);
}
template <class Impl>
void
InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
{
DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
// Reset DTB translation state
resched_inst->translationStarted(false);
resched_inst->translationCompleted(false);
resched_inst->clearCanIssue();
memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
}
template <class Impl>
void
InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
{
memDepUnit[replay_inst->threadNumber].replay();
}
template <class Impl>
void
InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
{
ThreadID tid = completed_inst->threadNumber;
DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
completed_inst->pcState(), completed_inst->seqNum);
++freeEntries;
completed_inst->memOpDone(true);
memDepUnit[tid].completed(completed_inst);
count[tid]--;
}
template <class Impl>
void
InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
{
deferredMemInsts.push_back(deferred_inst);
}
template <class Impl>
void
InstructionQueue<Impl>::blockMemInst(DynInstPtr &blocked_inst)
{
blocked_inst->translationStarted(false);
blocked_inst->translationCompleted(false);
blocked_inst->clearIssued();
blocked_inst->clearCanIssue();
blockedMemInsts.push_back(blocked_inst);
}
template <class Impl>
void
InstructionQueue<Impl>::cacheUnblocked()
{
retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts);
// Get the CPU ticking again
cpu->wakeCPU();
}
template <class Impl>
typename Impl::DynInstPtr
InstructionQueue<Impl>::getDeferredMemInstToExecute()
{
for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
++it) {
if ((*it)->translationCompleted() || (*it)->isSquashed()) {
DynInstPtr mem_inst = *it;
deferredMemInsts.erase(it);
return mem_inst;
}
}
return nullptr;
}
template <class Impl>
typename Impl::DynInstPtr
InstructionQueue<Impl>::getBlockedMemInstToExecute()
{
if (retryMemInsts.empty()) {
return nullptr;
} else {
DynInstPtr mem_inst = retryMemInsts.front();
retryMemInsts.pop_front();
return mem_inst;
}
}
template <class Impl>
void
InstructionQueue<Impl>::violation(DynInstPtr &store,
DynInstPtr &faulting_load)
{
intInstQueueWrites++;
memDepUnit[store->threadNumber].violation(store, faulting_load);
}
template <class Impl>
void
InstructionQueue<Impl>::squash(ThreadID tid)
{
DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
"the IQ.\n", tid);
// Read instruction sequence number of last instruction out of the
// time buffer.
squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
doSquash(tid);
// Also tell the memory dependence unit to squash.
memDepUnit[tid].squash(squashedSeqNum[tid], tid);
}
template <class Impl>
void
InstructionQueue<Impl>::doSquash(ThreadID tid)
{
// Start at the tail.
ListIt squash_it = instList[tid].end();
--squash_it;
DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
tid, squashedSeqNum[tid]);
// Squash any instructions younger than the squashed sequence number
// given.
while (squash_it != instList[tid].end() &&
(*squash_it)->seqNum > squashedSeqNum[tid]) {
DynInstPtr squashed_inst = (*squash_it);
if (squashed_inst->isFloating()) {
fpInstQueueWrites++;
} else if (squashed_inst->isVector()) {
vecInstQueueWrites++;
} else {
intInstQueueWrites++;
}
// Only handle the instruction if it actually is in the IQ and
// hasn't already been squashed in the IQ.
if (squashed_inst->threadNumber != tid ||
squashed_inst->isSquashedInIQ()) {
--squash_it;
continue;
}
if (!squashed_inst->isIssued() ||
(squashed_inst->isMemRef() &&
!squashed_inst->memOpDone())) {
DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
tid, squashed_inst->seqNum, squashed_inst->pcState());
bool is_acq_rel = squashed_inst->isMemBarrier() &&
(squashed_inst->isLoad() ||
(squashed_inst->isStore() &&
!squashed_inst->isStoreConditional()));
// Remove the instruction from the dependency list.
if (is_acq_rel ||
(!squashed_inst->isNonSpeculative() &&
!squashed_inst->isStoreConditional() &&
!squashed_inst->isMemBarrier() &&
!squashed_inst->isWriteBarrier())) {
for (int src_reg_idx = 0;
src_reg_idx < squashed_inst->numSrcRegs();
src_reg_idx++)
{
PhysRegIdPtr src_reg =
squashed_inst->renamedSrcRegIdx(src_reg_idx);
// Only remove it from the dependency graph if it
// was placed there in the first place.
// Instead of doing a linked list traversal, we
// can just remove these squashed instructions
// either at issue time, or when the register is
// overwritten. The only downside to this is it
// leaves more room for error.
if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
!src_reg->isFixedMapping()) {
dependGraph.remove(src_reg->flatIndex(),
squashed_inst);
}
++iqSquashedOperandsExamined;
}
} else if (!squashed_inst->isStoreConditional() ||
!squashed_inst->isCompleted()) {
NonSpecMapIt ns_inst_it =
nonSpecInsts.find(squashed_inst->seqNum);
// we remove non-speculative instructions from
// nonSpecInsts already when they are ready, and so we
// cannot always expect to find them
if (ns_inst_it == nonSpecInsts.end()) {
// loads that became ready but stalled on a
// blocked cache are alreayd removed from
// nonSpecInsts, and have not faulted
assert(squashed_inst->getFault() != NoFault ||
squashed_inst->isMemRef());
} else {
(*ns_inst_it).second = NULL;
nonSpecInsts.erase(ns_inst_it);
++iqSquashedNonSpecRemoved;
}
}
// Might want to also clear out the head of the dependency graph.
// Mark it as squashed within the IQ.
squashed_inst->setSquashedInIQ();
// @todo: Remove this hack where several statuses are set so the
// inst will flow through the rest of the pipeline.
squashed_inst->setIssued();
squashed_inst->setCanCommit();
squashed_inst->clearInIQ();
//Update Thread IQ Count
count[squashed_inst->threadNumber]--;
++freeEntries;
}
instList[tid].erase(squash_it--);
++iqSquashedInstsExamined;
}
}
template <class Impl>
bool
InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
{
// Loop through the instruction's source registers, adding
// them to the dependency list if they are not ready.
int8_t total_src_regs = new_inst->numSrcRegs();
bool return_val = false;
for (int src_reg_idx = 0;
src_reg_idx < total_src_regs;
src_reg_idx++)
{
// Only add it to the dependency graph if it's not ready.
if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
// Check the IQ's scoreboard to make sure the register
// hasn't become ready while the instruction was in flight
// between stages. Only if it really isn't ready should
// it be added to the dependency graph.
if (src_reg->isFixedMapping()) {
continue;
} else if (!regScoreboard[src_reg->flatIndex()]) {
DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
"is being added to the dependency chain.\n",
new_inst->pcState(), src_reg->index(),
src_reg->className());
dependGraph.insert(src_reg->flatIndex(), new_inst);
// Change the return value to indicate that something
// was added to the dependency graph.
return_val = true;
} else {
DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
"became ready before it reached the IQ.\n",
new_inst->pcState(), src_reg->index(),
src_reg->className());
// Mark a register ready within the instruction.
new_inst->markSrcRegReady(src_reg_idx);
}
}
}
return return_val;
}
template <class Impl>
void
InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
{
// Nothing really needs to be marked when an instruction becomes
// the producer of a register's value, but for convenience a ptr
// to the producing instruction will be placed in the head node of
// the dependency links.
int8_t total_dest_regs = new_inst->numDestRegs();
for (int dest_reg_idx = 0;
dest_reg_idx < total_dest_regs;
dest_reg_idx++)
{
PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
// Some registers have fixed mapping, and there is no need to track
// dependencies as these instructions must be executed at commit.
if (dest_reg->isFixedMapping()) {
continue;
}
if (!dependGraph.empty(dest_reg->flatIndex())) {
dependGraph.dump();
panic("Dependency graph %i (%s) (flat: %i) not empty!",
dest_reg->index(), dest_reg->className(),
dest_reg->flatIndex());
}
dependGraph.setInst(dest_reg->flatIndex(), new_inst);
// Mark the scoreboard to say it's not yet ready.
regScoreboard[dest_reg->flatIndex()] = false;
}
}
template <class Impl>
void
InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
{
// If the instruction now has all of its source registers
// available, then add it to the list of ready instructions.
if (inst->readyToIssue()) {
//Add the instruction to the proper ready list.
if (inst->isMemRef()) {
DPRINTF(IQ, "Checking if memory instruction can issue.\n");
// Message to the mem dependence unit that this instruction has
// its registers ready.
memDepUnit[inst->threadNumber].regsReady(inst);
return;
}
OpClass op_class = inst->opClass();
DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
"the ready list, PC %s opclass:%i [sn:%lli].\n",
inst->pcState(), op_class, inst->seqNum);
readyInsts[op_class].push(inst);
// Will need to reorder the list if either a queue is not on the list,
// or it has an older instruction than last time.
if (!queueOnList[op_class]) {
addToOrderList(op_class);
} else if (readyInsts[op_class].top()->seqNum <
(*readyIt[op_class]).oldestInst) {
listOrder.erase(readyIt[op_class]);
addToOrderList(op_class);
}
}
}
template <class Impl>
int
InstructionQueue<Impl>::countInsts()
{
#if 0
//ksewell:This works but definitely could use a cleaner write
//with a more intuitive way of counting. Right now it's
//just brute force ....
// Change the #if if you want to use this method.
int total_insts = 0;
for (ThreadID tid = 0; tid < numThreads; ++tid) {
ListIt count_it = instList[tid].begin();
while (count_it != instList[tid].end()) {
if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
if (!(*count_it)->isIssued()) {
++total_insts;
} else if ((*count_it)->isMemRef() &&
!(*count_it)->memOpDone) {
// Loads that have not been marked as executed still count
// towards the total instructions.
++total_insts;
}
}
++count_it;
}
}
return total_insts;
#else
return numEntries - freeEntries;
#endif
}
template <class Impl>
void
InstructionQueue<Impl>::dumpLists()
{
for (int i = 0; i < Num_OpClasses; ++i) {
cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
cprintf("\n");
}
cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
NonSpecMapIt non_spec_it = nonSpecInsts.begin();
NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
cprintf("Non speculative list: ");
while (non_spec_it != non_spec_end_it) {
cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
(*non_spec_it).second->seqNum);
++non_spec_it;
}
cprintf("\n");
ListOrderIt list_order_it = listOrder.begin();
ListOrderIt list_order_end_it = listOrder.end();
int i = 1;
cprintf("List order: ");
while (list_order_it != list_order_end_it) {
cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
(*list_order_it).oldestInst);
++list_order_it;
++i;
}
cprintf("\n");
}
template <class Impl>
void
InstructionQueue<Impl>::dumpInsts()
{
for (ThreadID tid = 0; tid < numThreads; ++tid) {
int num = 0;
int valid_num = 0;
ListIt inst_list_it = instList[tid].begin();
while (inst_list_it != instList[tid].end()) {
cprintf("Instruction:%i\n", num);
if (!(*inst_list_it)->isSquashed()) {
if (!(*inst_list_it)->isIssued()) {
++valid_num;
cprintf("Count:%i\n", valid_num);
} else if ((*inst_list_it)->isMemRef() &&
!(*inst_list_it)->memOpDone()) {
// Loads that have not been marked as executed
// still count towards the total instructions.
++valid_num;
cprintf("Count:%i\n", valid_num);
}
}
cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
"Issued:%i\nSquashed:%i\n",
(*inst_list_it)->pcState(),
(*inst_list_it)->seqNum,
(*inst_list_it)->threadNumber,
(*inst_list_it)->isIssued(),
(*inst_list_it)->isSquashed());
if ((*inst_list_it)->isMemRef()) {
cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
}
cprintf("\n");
inst_list_it++;
++num;
}
}
cprintf("Insts to Execute list:\n");
int num = 0;
int valid_num = 0;
ListIt inst_list_it = instsToExecute.begin();
while (inst_list_it != instsToExecute.end())
{
cprintf("Instruction:%i\n",
num);
if (!(*inst_list_it)->isSquashed()) {
if (!(*inst_list_it)->isIssued()) {
++valid_num;
cprintf("Count:%i\n", valid_num);
} else if ((*inst_list_it)->isMemRef() &&
!(*inst_list_it)->memOpDone()) {
// Loads that have not been marked as executed
// still count towards the total instructions.
++valid_num;
cprintf("Count:%i\n", valid_num);
}
}
cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
"Issued:%i\nSquashed:%i\n",
(*inst_list_it)->pcState(),
(*inst_list_it)->seqNum,
(*inst_list_it)->threadNumber,
(*inst_list_it)->isIssued(),
(*inst_list_it)->isSquashed());
if ((*inst_list_it)->isMemRef()) {
cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
}
cprintf("\n");
inst_list_it++;
++num;
}
}
#endif//__CPU_O3_INST_QUEUE_IMPL_HH__
|
/*
* Copyright (c) 2008, AMT – The Association For Manufacturing Technology (“AMT”)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the AMT nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* DISCLAIMER OF WARRANTY. ALL MTCONNECT MATERIALS AND SPECIFICATIONS PROVIDED
* BY AMT, MTCONNECT OR ANY PARTICIPANT TO YOU OR ANY PARTY ARE PROVIDED "AS IS"
* AND WITHOUT ANY WARRANTY OF ANY KIND. AMT, MTCONNECT, AND EACH OF THEIR
* RESPECTIVE MEMBERS, OFFICERS, DIRECTORS, AFFILIATES, SPONSORS, AND AGENTS
* (COLLECTIVELY, THE "AMT PARTIES") AND PARTICIPANTS MAKE NO REPRESENTATION OR
* WARRANTY OF ANY KIND WHATSOEVER RELATING TO THESE MATERIALS, INCLUDING, WITHOUT
* LIMITATION, ANY EXPRESS OR IMPLIED WARRANTY OF NONINFRINGEMENT,
* MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
* LIMITATION OF LIABILITY. IN NO EVENT SHALL AMT, MTCONNECT, ANY OTHER AMT
* PARTY, OR ANY PARTICIPANT BE LIABLE FOR THE COST OF PROCURING SUBSTITUTE GOODS
* OR SERVICES, LOST PROFITS, LOSS OF USE, LOSS OF DATA OR ANY INCIDENTAL,
* CONSEQUENTIAL, INDIRECT, SPECIAL OR PUNITIVE DAMAGES OR OTHER DIRECT DAMAGES,
* WHETHER UNDER CONTRACT, TORT, WARRANTY OR OTHERWISE, ARISING IN ANY WAY OUT OF
* THIS AGREEMENT, USE OR INABILITY TO USE MTCONNECT MATERIALS, WHETHER OR NOT
* SUCH PARTY HAD ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef COMPONENT_EVENT_TEST_HPP
#define COMPONENT_EVENT_TEST_HPP
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS 1
#endif
#include <map>
#include <string>
#include <cppunit/TestFixture.h>
#include <cppunit/extensions/HelperMacros.h>
#include "adapter.hpp"
class AdapterTest : public CppUnit::TestFixture
{
CPPUNIT_TEST_SUITE(AdapterTest);
CPPUNIT_TEST(testAdapter);
CPPUNIT_TEST_SUITE_END();
public:
void setUp();
void tearDown();
protected:
protected:
void testAdapter();
};
#endif
|
/*****************************************************************//**
* \file uncopyable_class.cpp
* \brief prevent copying - even with compiler generated
* copy-constructor and assignment operator
*
* declare the copy constructor and copy assignment operator
* private in a base class and deleberately NOT implement them
*
* \author Xuhua Huang
* \date March 2021
*********************************************************************/
#include <iostream>
#include <string>
class Uncopyable
{
protected:
// allow construction and destruction of derived class objects
Uncopyable() {}
~Uncopyable() {}
private:
// compiler sees these function, will attempt to call
// declared as private, will not be called succesfully
Uncopyable(const Uncopyable&) {}
Uncopyable& operator=(const Uncopyable&) {}
};
// class that inherits the private part of the base class 'Uncopyable'
class Secret : private Uncopyable
{
private:
std::string secret = "MY SECRET";
public:
Secret(std::string argSecret) : secret(argSecret) {}
~Secret() {}
};
int main(void)
{
Secret mySecret("My secret");
// Secret yrSecret(mySecret); // ERROR!
// yrSecret = mySecret; // ERROR!
return 0;
}
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//
#include "Profiling.hpp"
#include "JsonPrinter.hpp"
#if ARMNN_STREAMLINE_ENABLED
#include <streamline_annotate.h>
#endif
#if ARMCOMPUTECL_ENABLED
#include <arm_compute/runtime/CL/CLFunctions.h>
#endif
#include <algorithm>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <map>
#include <stack>
#include <boost/algorithm/string.hpp>
#include <boost/core/ignore_unused.hpp>
namespace armnn
{
// Controls the amount of memory initially allocated to store profiling events.
// If chosen carefully, the profiling system will not make any additional allocations, thus minimizing its impact on
// measured times.
constexpr std::size_t g_ProfilingEventCountHint = 1024;
// Whether profiling reports should include the sequence of events together with their timings.
constexpr bool g_WriteProfilingEventSequence = true;
// Whether profiling reports should also report detailed information on events grouped by inference.
// This can spam the output stream, so use carefully (or adapt the code to just output information
// of interest).
constexpr bool g_AggregateProfilingEventsByInference = true;
// Whether a call to Profiler::AnalyzeEventsAndWriteResults() will be made when the Profiler is destroyed.
// It can be convenient for local tests.
constexpr bool g_WriteReportToStdOutOnProfilerDestruction = false;
// Whether events denoting operations running on the GPU should force a sync before/after the event.
// This is hardcoded to true for now as the profiling timings are not very useful without it.
#if ARMCOMPUTECL_ENABLED
constexpr bool g_ProfilingForceGpuSync = true;
#endif
Measurement FindMeasurement(const std::string& name, const Event* event)
{
BOOST_ASSERT(event != nullptr);
// Search though the measurements.
for (const auto& measurement : event->GetMeasurements())
{
if (measurement.m_Name == name)
{
// Measurement found.
return measurement;
}
}
// Measurement not found.
return Measurement{ "", 0.f, Measurement::Unit::TIME_MS };
}
std::vector<Measurement> FindKernelMeasurements(const Event* event)
{
BOOST_ASSERT(event != nullptr);
std::vector<Measurement> measurements;
// Search through the measurements.
for (const auto& measurement : event->GetMeasurements())
{
if (measurement.m_Name.rfind("OpenClKernelTimer", 0) == 0
|| measurement.m_Name.rfind("NeonKernelTimer", 0) == 0)
{
// Measurement found.
measurements.push_back(measurement);
}
}
return measurements;
}
std::map<std::string, Profiler::ProfilingEventStats> Profiler::CalculateProfilingEventStats() const
{
std::map<std::string, ProfilingEventStats> nameToStatsMap;
for (const auto& event : m_EventSequence)
{
Measurement measurement = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME, event.get());
double durationMs = measurement.m_Value;
auto it = nameToStatsMap.find(event->GetName());
if (it != nameToStatsMap.end())
{
ProfilingEventStats& stats = it->second;
stats.m_TotalMs += durationMs;
stats.m_MinMs = std::min(stats.m_MinMs, durationMs);
stats.m_MaxMs = std::max(stats.m_MaxMs, durationMs);
++stats.m_Count;
}
else
{
nameToStatsMap.emplace(event->GetName(), ProfilingEventStats{ durationMs, durationMs, durationMs, 1 });
}
}
return nameToStatsMap;
}
const Event* GetEventPtr(const Event* ptr) { return ptr;}
const Event* GetEventPtr(const std::unique_ptr<Event>& ptr) {return ptr.get(); }
template<typename ItertType>
void Profiler::AnalyzeEventSequenceAndWriteResults(ItertType first, ItertType last, std::ostream& outStream) const
{
// Outputs event sequence, if needed.
if (g_WriteProfilingEventSequence)
{
// Makes sure timestamps are output with 6 decimals, and save old settings.
std::streamsize oldPrecision = outStream.precision();
outStream.precision(6);
std::ios_base::fmtflags oldFlags = outStream.flags();
outStream.setf(std::ios::fixed);
// Outputs fields.
outStream << "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" << std::endl;
for (auto event = first; event != last; ++event)
{
const Event* eventPtr = GetEventPtr((*event));
double startTimeMs = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME_START, eventPtr).m_Value;
double stopTimeMs = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME_STOP, eventPtr).m_Value;
// Find the WallClock measurement if there is one.
double durationMs = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME, eventPtr).m_Value;
outStream << std::setw(50) << eventPtr->GetName() << " "
<< std::setw(20) << durationMs
<< std::setw(20) << startTimeMs
<< std::setw(20) << stopTimeMs
<< std::setw(20) << GetComputeDeviceAsCString(eventPtr->GetComputeDevice())
<< std::endl;
}
outStream << std::endl;
// Restores previous precision settings.
outStream.flags(oldFlags);
outStream.precision(oldPrecision);
}
// Aggregates results per event name.
std::map<std::string, ProfilingEventStats> nameToStatsMap = CalculateProfilingEventStats();
// Outputs aggregated stats.
outStream << "Event Stats - Name | Avg (ms) | Min (ms) | Max (ms) | Total (ms) | Count" << std::endl;
for (const auto& pair : nameToStatsMap)
{
const std::string& eventLabel = pair.first;
const ProfilingEventStats& eventStats = pair.second;
const double avgMs = eventStats.m_TotalMs / double(eventStats.m_Count);
outStream << "\t" << std::setw(50) << eventLabel << " " << std::setw(9) << avgMs << " "
<< std::setw(9) << eventStats.m_MinMs << " " << std::setw(9) << eventStats.m_MaxMs << " "
<< std::setw(9) << eventStats.m_TotalMs << " " << std::setw(9) << eventStats.m_Count << std::endl;
}
outStream << std::endl;
}
Profiler::Profiler()
: m_ProfilingEnabled(false)
{
m_EventSequence.reserve(g_ProfilingEventCountHint);
#if ARMNN_STREAMLINE_ENABLED
// Initialises streamline annotations.
ANNOTATE_SETUP;
#endif
}
Profiler::~Profiler()
{
if (m_ProfilingEnabled)
{
if (g_WriteReportToStdOutOnProfilerDestruction)
{
Print(std::cout);
}
}
// Un-register this profiler from the current thread.
ProfilerManager::GetInstance().RegisterProfiler(nullptr);
}
bool Profiler::IsProfilingEnabled()
{
return m_ProfilingEnabled;
}
void Profiler::EnableProfiling(bool enableProfiling)
{
m_ProfilingEnabled = enableProfiling;
}
Event* Profiler::BeginEvent(Compute compute, const std::string& label, std::vector<InstrumentPtr>&& instruments)
{
// We need to sync just before the begin event to not include time before the period we want to time.
WaitForDevice(compute);
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
m_EventSequence.push_back(std::make_unique<Event>(label, this, parent, compute, std::move(instruments)));
Event* event = m_EventSequence.back().get();
event->Start();
#if ARMNN_STREAMLINE_ENABLED
ANNOTATE_CHANNEL_COLOR(m_Parents.size(), GetEventColor(compute), label.c_str());
#endif
m_Parents.push(event);
return event;
}
void Profiler::EndEvent(Event* event)
{
event->Stop();
BOOST_ASSERT(!m_Parents.empty());
BOOST_ASSERT(event == m_Parents.top());
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
boost::ignore_unused(parent);
BOOST_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
ANNOTATE_CHANNEL_END(m_Parents.size());
#endif
}
int CalcLevel(const Event* eventPtr)
{
int level=0;
while (eventPtr != nullptr)
{
eventPtr = eventPtr->GetParentEvent();
level++;
}
return level;
}
void Profiler::PopulateInferences(std::vector<const Event*>& outInferences, int& outBaseLevel) const
{
outInferences.reserve(m_EventSequence.size());
for (const auto& event : m_EventSequence)
{
const Event* eventPtrRaw = event.get();
if (eventPtrRaw->GetName() == "EnqueueWorkload")
{
outBaseLevel = (outBaseLevel == -1) ? CalcLevel(eventPtrRaw) : outBaseLevel;
outInferences.push_back(eventPtrRaw);
}
}
}
void Profiler::PopulateDescendants(std::map<const Event*, std::vector<const Event*>>& outDescendantsMap) const
{
for (const auto& event : m_EventSequence)
{
const Event* eventPtrRaw = event.get();
const Event* parent = eventPtrRaw->GetParentEvent();
if (!parent)
{
continue;
}
auto it = outDescendantsMap.find(parent);
if (it == outDescendantsMap.end())
{
outDescendantsMap.emplace(parent, std::vector<const Event*>({eventPtrRaw}));
}
else
{
it->second.push_back(eventPtrRaw);
}
}
}
void Profiler::Print(std::ostream& outStream) const
{
// Makes sure timestamps are output with 6 decimals, and save old settings.
std::streamsize oldPrecision = outStream.precision();
outStream.precision(6);
std::ios_base::fmtflags oldFlags = outStream.flags();
outStream.setf(std::ios::fixed);
JsonPrinter printer(outStream);
// First find all the "inference" Events and print out duration measurements.
int baseLevel = -1;
std::vector<const Event*> inferences;
PopulateInferences(inferences, baseLevel);
// Second map out descendants hierarchy
std::map<const Event*, std::vector<const Event*>> descendantsMap;
PopulateDescendants(descendantsMap);
JsonChildObject inferenceObject{"inference_measurements"};
JsonChildObject layerObject{"layer_measurements"};
std::vector<JsonChildObject> workloadObjects;
std::map<unsigned int, std::vector<JsonChildObject>> workloadToKernelObjects;
for (unsigned int inferenceIndex = 0; inferenceIndex < inferences.size(); ++inferenceIndex)
{
auto inference = inferences[inferenceIndex];
Measurement measurement = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME, inference);
inferenceObject.SetUnit(measurement.m_Unit);
inferenceObject.AddMeasurement(measurement.m_Value);
auto layerEventsIt = descendantsMap.find(inference);
// Assuming 1 Execute per inference
if (layerEventsIt != descendantsMap.end())
{
auto layerEvent = layerEventsIt->second[0];
Measurement measurement = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME, layerEvent);
layerObject.SetUnit(measurement.m_Unit);
layerObject.AddMeasurement(measurement.m_Value);
// Get Descendant Events for Execute
auto workloadEventsIt = descendantsMap.find(layerEvent);
for(unsigned int workloadIndex = 0; workloadIndex < workloadEventsIt->second.size(); ++workloadIndex)
{
auto workloadEvent = workloadEventsIt->second[workloadIndex];
Measurement measurement = FindMeasurement(WallClockTimer::WALL_CLOCK_TIME, workloadEvent);
std::vector<Measurement> kernelMeasurements = FindKernelMeasurements(workloadEvent);
if (inferenceIndex == 0)
{
// Only add second level once, in case of multiple inferences
JsonChildObject workloadObject{workloadEvent->GetName()};
workloadObject.SetUnit(measurement.m_Unit);
workloadObjects.push_back(workloadObject);
}
workloadObjects[workloadIndex].AddMeasurement(measurement.m_Value);
for(unsigned int kernelIndex = 0; kernelIndex < kernelMeasurements.size(); ++kernelIndex)
{
if (inferenceIndex == 0)
{
// Only add kernel measurement once, in case of multiple inferences
JsonChildObject kernelObject{kernelMeasurements[kernelIndex].m_Name};
kernelObject.SetUnit(kernelMeasurements[kernelIndex].m_Unit);
workloadToKernelObjects[workloadIndex].push_back(kernelObject);
}
workloadToKernelObjects[workloadIndex][kernelIndex].
AddMeasurement(kernelMeasurements[kernelIndex].m_Value);
}
}
}
}
for (auto workloadToKernelPair : workloadToKernelObjects)
{
for (auto kernelObject : workloadToKernelPair.second)
{
workloadObjects[workloadToKernelPair.first].AddChild(kernelObject);
}
}
for (auto workloadObject : workloadObjects)
{
layerObject.AddChild(workloadObject);
}
inferenceObject.AddChild(layerObject);
printer.PrintHeader();
printer.PrintArmNNHeader();
// print inference object, also prints child layer and kernel measurements
printer.PrintJsonChildObject(inferenceObject);
// end of ArmNN
printer.PrintNewLine();
printer.PrintFooter();
// end of main JSON object
printer.PrintNewLine();
printer.PrintFooter();
printer.PrintNewLine();
// Restores previous precision settings.
outStream.flags(oldFlags);
outStream.precision(oldPrecision);
}
void Profiler::AnalyzeEventsAndWriteResults(std::ostream& outStream) const
{
// Stack should be empty now.
const bool saneMarkerSequence = m_Parents.empty();
// Abort if the sequence of markers was found to have incorrect information:
// The stats cannot be trusted.
if (!saneMarkerSequence)
{
outStream << "Cannot write profiling stats. "
"Unexpected errors were found when analyzing the sequence of logged events, which may lead to plainly "
"wrong stats. The profiling system may contain implementation issues or could have been used in an "
"unsafe manner." << std::endl;
return;
}
// Analyzes the full sequence of events.
AnalyzeEventSequenceAndWriteResults(m_EventSequence.cbegin(),
m_EventSequence.cend(),
outStream);
// Aggregates events by tag if requested (spams the output stream if done for all tags).
if (g_AggregateProfilingEventsByInference)
{
outStream << std::endl;
outStream << "***" << std::endl;
outStream << "*** Per Inference Stats" << std::endl;
outStream << "***" << std::endl;
outStream << std::endl;
int baseLevel = -1;
std::vector<const Event*> inferences;
PopulateInferences(inferences, baseLevel);
// Second map out descendants hierarchy
std::map<const Event*, std::vector<const Event*>> descendantsMap;
PopulateDescendants(descendantsMap);
std::function<void (const Event*, std::vector<const Event*>&)>
FindDescendantEvents = [&](const Event* eventPtr,
std::vector<const Event*>& sequence)
{
sequence.push_back(eventPtr);
if (CalcLevel(eventPtr) > baseLevel+2) //We only care about levels as deep as workload executions.
{
return;
}
auto children = descendantsMap.find(eventPtr);
if (children == descendantsMap.end())
{
return;
}
for (const Event* child : children->second)
{
return FindDescendantEvents(child, sequence);
}
};
// Third, find events belonging to each inference
int inferenceIdx = 0;
for (auto inference : inferences)
{
std::vector<const Event*> sequence;
//build sequence, depth first
FindDescendantEvents(inference, sequence);
outStream << "> Begin Inference: " << inferenceIdx << std::endl;
outStream << std::endl;
AnalyzeEventSequenceAndWriteResults(sequence.cbegin(),
sequence.cend(),
outStream);
outStream << std::endl;
outStream << "> End Inference: " << inferenceIdx << std::endl;
inferenceIdx++;
}
}
}
void Profiler::WaitForDevice(Compute compute) const
{
#if ARMCOMPUTECL_ENABLED
if(compute == Compute::GpuAcc && g_ProfilingForceGpuSync)
{
arm_compute::CLScheduler::get().sync();
}
#endif
}
std::uint32_t Profiler::GetEventColor(Compute compute) const
{
switch(compute)
{
case Compute::CpuRef:
// Cyan
return 0xffff001b;
case Compute::CpuAcc:
// Green
return 0x00ff001b;
case Compute::GpuAcc:
// Purple
return 0xff007f1b;
default:
// Dark gray
return 0x5555551b;
}
}
// The thread_local pointer to the profiler instance.
thread_local Profiler* tl_Profiler = nullptr;
ProfilerManager& ProfilerManager::GetInstance()
{
// Global reference to the single ProfileManager instance allowed.
static ProfilerManager s_ProfilerManager;
return s_ProfilerManager;
}
void ProfilerManager::RegisterProfiler(Profiler* profiler)
{
tl_Profiler = profiler;
}
Profiler* ProfilerManager::GetProfiler()
{
return tl_Profiler;
}
} // namespace armnn
|
/**
* To compile this file use
* g++ PacketSniffer.cpp -lpcap
* Reference: http://www.tcpdump.org/pcap.htm
*/
#include <pcap.h>
#include <iostream>
#include <algorithm>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <vector>
#include <fstream>
#include <sstream>
#include <string>
#include <cstring>
/* Ethernet protocol ID's */
#define ETHERTYPE_IP 0x0800 /* IP */
#define ETHERTYPE_ARP 0x0806 /* ARP */
#define ETHERTYPE_RARP 0x8035 /* RARP */
/* default snap length (maximum bytes per packet to capture) */
#define SNAP_LEN 1518
/* ethernet headers are always exactly 14 bytes [1] */
#define SIZE_ETHERNET 14
/* Ethernet addresses are 6 bytes */
#define ETHER_ADDR_LEN 6
/* Ethernet header */
struct sniff_ethernet {
u_char ether_dhost[ETHER_ADDR_LEN]; /* destination host address */
u_char ether_shost[ETHER_ADDR_LEN]; /* source host address */
u_int16_t ether_type; /* IP? ARP? RARP? etc */
};
/* IP header */
struct sniff_ip {
u_char ip_vhl; /* version << 4 | header length >> 2 */
u_char ip_tos; /* type of service */
u_short ip_len; /* total length */
u_short ip_id; /* identification */
u_short ip_off; /* fragment offset field */
#define IP_RF 0x8000 /* reserved fragment flag */
#define IP_DF 0x4000 /* dont fragment flag */
#define IP_MF 0x2000 /* more fragments flag */
#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
u_char ip_ttl; /* time to live */
u_char ip_p; /* protocol */
u_short ip_sum; /* checksum */
struct in_addr ip_src,ip_dst; /* source and dest address */
};
#define IP_HL(ip) (((ip)->ip_vhl) & 0x0f)
#define IP_V(ip) (((ip)->ip_vhl) >> 4)
struct sniff_arp {
u_int16_t hType;
u_int16_t pType;
u_short haLength;
u_short paLength;
char16_t opcode;
u_char senderHA_addr[ETHER_ADDR_LEN];
struct in_addr sender_proto;
u_char targetHA_addr[ETHER_ADDR_LEN];
struct in_addr target_proto;
};
/* TCP header */
typedef u_int tcp_seq;
struct sniff_tcp {
u_short th_sport; /* source port */
u_short th_dport; /* destination port */
tcp_seq th_seq; /* sequence number */
tcp_seq th_ack; /* acknowledgement number */
u_char th_offx2; /* data offset, rsvd */
#define TH_OFF(th) (((th)->th_offx2 & 0xf0) >> 4)
u_char th_flags;
#define TH_FIN 0x01
#define TH_SYN 0x02
#define TH_RST 0x04
#define TH_PUSH 0x08
#define TH_ACK 0x10
#define TH_URG 0x20
#define TH_ECE 0x40
#define TH_CWR 0x80
#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG|TH_ECE|TH_CWR)
u_short th_win; /* window */
u_short th_sum; /* checksum */
u_short th_urp; /* urgent pointer */
};
using namespace std;
//const int BUFSIZ=65535;
vector<string> whiteList,blackList;
void displayPacketContents(u_char *args, const struct pcap_pkthdr *header,const u_char *packet);
void readWhiteBlackLists();
void displayEntirePacketInHexFormat(const u_char *packet);
void findNWAddr(string IP);
int main(int argc, char** argv){
readWhiteBlackLists();
char errbuf[PCAP_ERRBUF_SIZE];
char* dev;
pcap_t *handle;
struct bpf_program fp;
bpf_u_int32 mask,net;
char filter_exp[]="tcp";
//struct pcap_pkthdr header;
const u_char *packet;
dev=pcap_lookupdev(errbuf);
if(dev == NULL){
cout<<"Default Device not found"<<endl;
return -1;
}
cout<<"Default Device: "<<dev<<endl;
//cout<<"P1";
if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
cout<<"Couldn't get netmask for device"<<endl;
net = 0;
mask = 0;
}
handle = pcap_open_live(dev,SNAP_LEN,1,1000,errbuf);
//cout<<"P2";
if(handle == NULL){
cout<<"Couldn't open device "<<errbuf<<endl;
return -1;
}
if (pcap_datalink(handle) != DLT_EN10MB) {
cout<<"Device doesn't provide Ethernet headers - not supported"<<endl;
return -1;
}
if(pcap_compile(handle,&fp,filter_exp,0,net) == -1){
cout<<"Couldn't parse filter "<<endl;
return -1;
}
if (pcap_setfilter(handle, &fp) == -1) {
cout<<"Couldn't apply filter "<<endl;
return -1;
}
pcap_loop(handle,300,displayPacketContents,NULL);
//packet=pcap_next(handle,&header);
//displayPacketContents(packet);
pcap_freecode(&fp);
pcap_close(handle);
return 0;
}
void displayPacketContents(u_char *args, const struct pcap_pkthdr *header,const u_char *packet){
static int pcktCount=0;
pcktCount++;
cout<<endl<<endl<<"-----------------------Packet "<<pcktCount<<"----------------------------"<<endl;
cout<<" Packet Header Length: "<<header->len<<endl<<endl;
const u_char *ch;
struct sniff_ethernet *ethernet;
struct sniff_ip *ip;
struct sniff_tcp *tcp;
u_char *payload;
u_int size_ip;
u_int size_tcp;
ethernet = (struct sniff_ethernet*)(packet);
cout<<" At Data Link Layer"<<endl;
cout<<" Source MAC Address: ";
ch = ethernet->ether_shost;
for(int i = 0; i < 6; i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
cout<<" Destination MAC Address: ";
ch = ethernet->ether_dhost;
for(int i = 0; i < 6; i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
cout<<" Type: ("<<ethernet->ether_type<<")"<<endl;
if(ethernet->ether_type == 8){
ip = (struct sniff_ip*)(packet + SIZE_ETHERNET);
auto srcIPWLCheck = find(whiteList.begin(),whiteList.end(),inet_ntoa(ip->ip_src));
auto srcIPBLCheck = find(blackList.begin(),blackList.end(),inet_ntoa(ip->ip_src));
auto destIPWLCheck = find(whiteList.begin(),whiteList.end(),inet_ntoa(ip->ip_dst));
auto destIPBLCheck = find(blackList.begin(),blackList.end(),inet_ntoa(ip->ip_dst));
if(srcIPBLCheck != blackList.end() || destIPBLCheck != blackList.end()){
cout<<endl<<"Packet is rejected since source/destination IP Address is in Blacklist"<<endl<<endl;
}
else if(srcIPWLCheck != whiteList.end() || destIPWLCheck != whiteList.end()){
cout<<endl<<"Packet is accepted since source/destination IP Address is in Whitelist"<<endl<<endl;
}
else{
cout<<endl<<"Packet is dropped since source/destination IP Address is neither in White or BlackList"<<endl<<endl;
return;
}
cout<<" IP Packet Header Contents Found:"<<endl;
cout<<" IP Version: "<<(ip->ip_vhl & 0xf0)<<endl;
cout<<" Header Length: "<<(ip->ip_vhl & 0x0f)*4<<endl;
cout<<" Type Of Service: ";
printf("%02x%02x",ip->ip_tos,ip->ip_tos+1);
cout<<endl;
cout<<" Identification: "<<ip->ip_id<<endl;
cout<<" Offset: "<<ip->ip_off<<endl;
cout<<" TTL: ";
printf("%02x ", ip->ip_ttl);
cout<<endl;
cout<<" Protocol: ";
printf("%02x ", ip->ip_p);
cout<<endl;
cout<<" IP Source Address: "<<inet_ntoa(ip->ip_src)<<endl;
findNWAddr(inet_ntoa(ip->ip_src));
cout<<" IP Destination Address: "<<inet_ntoa(ip->ip_dst)<<endl;
findNWAddr(inet_ntoa(ip->ip_dst));
/*if(ip->ip_p==0x06){
size_ip = IP_HL(ip)*4;
tcp = (struct sniff_tcp*)(packet + SIZE_ETHERNET + size_ip);
cout<<" TCP Packet Header Contents:"<<endl;
cout<<" Source Port:"<<tcp->th_sport<<endl;
cout<<" Destination Port:"<<tcp->th_dport<<endl;
cout<<" Sequence Number:"<<tcp->th_seq<<endl;
cout<<" Acknowledgement Number:"<<tcp->th_ack<<endl;
cout<<" Data Offset:";
printf("%02x",tcp->th_offx2);
cout<<endl;
cout<<" Window Size:"<<tcp->th_win<<endl;
size_tcp = TH_OFF(tcp)*4;
payload = (u_char*)(packet + SIZE_ETHERNET + size_ip + size_tcp);
ch=payload;
cout<<" Payload:"<<endl<<" ";
for(int i = 0; i < sizeof(payload); i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
}*/
}
else if(ethernet->ether_type == 1544){
cout<<" ARP Packet Header Contents: "<<endl;
struct sniff_arp *arp=(struct sniff_arp *)(packet + SIZE_ETHERNET-2);
ch = (packet + SIZE_ETHERNET-2);
for(int i = 0; i < sizeof(arp); i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
printf(" Hardware Type: %04x\n",arp->hType);
//printf(" Hardware Type: %d\n",arp->hType);
printf(" Protocol Type: %04x\n",arp->pType);
//printf(" Protocol Type: %d\n",arp->pType);
cout<< " Hardware Address Length: "<<arp->haLength<<endl;
cout<< " Protocol Address Length: "<<arp->paLength<<endl;
printf(" Opcode: %04x\n",arp->opcode);
cout<< " Sender Hardware Address: ";
ch = arp->senderHA_addr;
for(int i = 0; i < 6; i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
cout<< " Sender Protocol Address: "<<inet_ntoa(arp->sender_proto)<<endl;
cout<< " Target Hardware Address: ";
ch = arp->targetHA_addr;
for(int i = 0; i < 6; i++){ printf("%02x ", *ch);ch++;}
cout<<endl;
cout<< " Target Protocol Address: "<<inet_ntoa(arp->target_proto)<<endl;
}
}
void displayEntirePacketInHexFormat(const u_char *packet){
const u_char *ch;
ch = packet;
for(int i = 0; i < sizeof(packet); i++){
printf("%02x ", *ch);
ch++;
if(i%8==0){
cout<<endl;
}
}
}
void readWhiteBlackLists(){
ifstream fileInW,fileInB;
fileInW.open("whiteList.txt");
fileInB.open("blackList.txt");
string line;
while(getline(fileInW,line)){
whiteList.push_back(line);
}
while(getline(fileInB,line)){
blackList.push_back(line);
}
}
void findNWAddr(string IP){
string subnet = "";
const char *ad172 = "172",*ad192 = "192";
if(strncmp(IP.c_str(),ad192,3)){
subnet = "255.255.255.0";
}
else if(strncmp(IP.c_str(),ad172,3)){
subnet = "255.255.254.0";
}
else{
return;
}
IP += ".";
subnet += ".";
int prevSN = -1, prevAddr = -1;
string netAddr = " ";
for(int i=0;i<4;i++){
int snMask = stoi(subnet.substr(prevSN+1,subnet.find(".",prevSN+1)));
prevSN = subnet.find(".",prevSN+1);
int addr = stoi(IP.substr(prevAddr+1,IP.find(".",prevAddr+1)));
prevAddr = IP.find(".",prevAddr+1);
netAddr += to_string((int)(addr & snMask));
if(i!=3) netAddr += ".";
}
cout<<" Network Address: "<<netAddr<<endl;
}
|
// Copyright (c) 2001, Daniel C. Nuffer
// Copyright (c) 2001-2009 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(BOOST_SPIRIT_ITERATOR_BUF_ID_CHECK_POLICY_MAR_16_2007_1108AM)
#define BOOST_SPIRIT_ITERATOR_BUF_ID_CHECK_POLICY_MAR_16_2007_1108AM
#include <boost/spirit/home/support/iterators/multi_pass_fwd.hpp>
#include <boost/spirit/home/support/iterators/detail/multi_pass.hpp>
#include <boost/config.hpp>
#include <boost/throw_exception.hpp>
#include <exception> // for std::exception
namespace boost { namespace spirit { namespace iterator_policies
{
///////////////////////////////////////////////////////////////////////////
// class illegal_backtracking
// thrown by buf_id_check CheckingPolicy if an instance of an iterator is
// used after another one has invalidated the queue
///////////////////////////////////////////////////////////////////////////
class illegal_backtracking : public std::exception
{
public:
illegal_backtracking() throw() {}
~illegal_backtracking() throw() {}
char const* what() const throw()
{
return "boost::spirit::multi_pass::illegal_backtracking";
}
};
///////////////////////////////////////////////////////////////////////////////
// class buf_id_check
// Implementation of the CheckingPolicy used by multi_pass
// This policy is most effective when used together with the std_deque
// StoragePolicy.
//
// If used with the fixed_size_queue StoragePolicy, it will not detect
// iterator dereferences that are out of the range of the queue.
///////////////////////////////////////////////////////////////////////////////
struct buf_id_check
{
///////////////////////////////////////////////////////////////////////
struct unique //: detail::default_checking_policy
{
unique() : buf_id(0) {}
unique(unique const& x) : buf_id(x.buf_id) {}
void swap(unique& x)
{
spirit::detail::swap(buf_id, x.buf_id);
}
// called to verify that everything is ok.
template <typename MultiPass>
static void check(MultiPass const& mp)
{
if (mp.buf_id != mp.shared()->shared_buf_id)
boost::throw_exception(illegal_backtracking());
}
// called from multi_pass::clear_queue, so we can increment the count
template <typename MultiPass>
static void clear_queue(MultiPass& mp)
{
++mp.shared()->shared_buf_id;
++mp.buf_id;
}
template <typename MultiPass>
static void destroy(MultiPass&) {}
protected:
unsigned long buf_id;
};
///////////////////////////////////////////////////////////////////////
struct shared
{
shared() : shared_buf_id(0) {}
unsigned long shared_buf_id;
};
};
}}}
#endif
|
/* Copyright 2017 R. Thomas
* Copyright 2017 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_MACHO_HEADER_H_
#define LIEF_MACHO_HEADER_H_
#include <iostream>
#include <set>
#include "LIEF/Visitable.hpp"
#include "LIEF/visibility.h"
#include "LIEF/Abstract/enums.hpp"
#include "LIEF/MachO/Structures.hpp"
namespace LIEF {
namespace MachO {
class DLL_PUBLIC Header : public Visitable {
public:
Header(void);
Header(const mach_header_64 *header);
Header(const mach_header *header);
Header& operator=(const Header& copy);
Header(const Header& copy);
virtual ~Header(void);
MACHO_TYPES magic(void) const;
CPU_TYPES cpu_type(void) const;
uint32_t cpu_subtype(void) const;
FILE_TYPES file_type(void) const;
std::set<HEADER_FLAGS> flags_list(void) const;
bool has(HEADER_FLAGS flag) const;
uint32_t nb_cmds(void) const;
uint32_t sizeof_cmds(void) const;
uint32_t flags(void) const;
uint32_t reserved(void) const;
void add(HEADER_FLAGS flag);
//! @brief LIEF abstract object type
OBJECT_TYPES abstract_object_type(void) const;
std::pair<ARCHITECTURES, std::set<MODES>> abstract_architecture(void) const;
//! @brief LIEF abstract endiannes
ENDIANNESS abstract_endianness(void) const;
void magic(MACHO_TYPES magic);
void cpu_type(CPU_TYPES cputype);
void cpu_subtype(uint32_t cpusubtype);
void file_type(FILE_TYPES filetype);
void nb_cmds(uint32_t ncmds);
void sizeof_cmds(uint32_t sizeofcmds);
void flags(uint32_t flags);
void remove(HEADER_FLAGS flag);
void reserved(uint32_t reserved);
Header& operator+=(HEADER_FLAGS c);
Header& operator-=(HEADER_FLAGS c);
bool operator==(const Header& rhs) const;
bool operator!=(const Header& rhs) const;
virtual void accept(Visitor& visitor) const override;
DLL_PUBLIC friend std::ostream& operator<<(std::ostream& os, const Header& hdr);
private:
MACHO_TYPES magic_;
CPU_TYPES cputype_;
uint32_t cpusubtype_;
FILE_TYPES filetype_;
uint32_t ncmds_;
uint32_t sizeofcmds_;
uint32_t flags_;
uint32_t reserved_;
};
}
}
#endif
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/batchnorm_expander.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
namespace {
using absl::optional;
// BatchNormExpanderVisitor traverses the HLO computation and rewrites BatchNorm
// operations into smaller operations.
class BatchNormExpanderVisitor : public DfsHloVisitorWithDefault {
public:
// Default visitor action is to do nothing and return OK.
Status DefaultAction(HloInstruction* /*hlo_instruction*/) override {
return Status::OK();
}
Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
Status HandleBatchNormInference(HloInstruction* batch_norm) override;
Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
// Runs the visitor on a computation.
static bool Run(HloComputation* computation, bool rewrite_training_op,
bool rewrite_inference_op, bool rewrite_grad_op);
// Returns whether any batch norm ops were rewritten.
const bool changed() const { return changed_; }
~BatchNormExpanderVisitor() override = default;
private:
explicit BatchNormExpanderVisitor(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op)
: computation_(computation),
rewrite_training_op_(rewrite_training_op),
rewrite_inference_op_(rewrite_inference_op),
rewrite_grad_op_(rewrite_grad_op) {}
HloComputation* GetOrCreateScalarAddComputation(
PrimitiveType primitive_type) {
HloComputation::Builder b("scalar_add_computation");
Shape shape = ShapeUtil::MakeShape(primitive_type, {});
auto scalar_lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, shape, "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, shape, "scalar_rhs"));
auto scalar_op = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, scalar_lhs, scalar_rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
}
std::unique_ptr<HloInstruction> Rsqrt(
HloInstruction* operand,
const std::function<HloInstruction*(std::unique_ptr<HloInstruction>)>&
add_instruction) {
HloInstruction* exponent = add_instruction(HloInstruction::CreateBroadcast(
operand->shape(),
add_instruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<float>(-0.5f))))),
{}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kPower,
operand, exponent);
}
std::unique_ptr<HloInstruction> Mean(
int64 element_count, HloInstruction* operand,
const std::function<HloInstruction*(std::unique_ptr<HloInstruction>)>&
add_instruction) {
HloInstruction* elem_count_recip =
add_instruction(HloInstruction::CreateBroadcast(
operand->shape(),
add_instruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<float>(1.0 / element_count))))),
{}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kMultiply,
operand, elem_count_recip);
}
// Replaces the existing HLO instruction old_instruction, with
// new_instruction, and marks the optimizer status as changed.
// Returns the Status representing the result of the replace operation.
Status ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
TF_RETURN_IF_ERROR(computation_->ReplaceWithNewInstruction(
old_instruction, std::move(new_instruction)));
changed_ = true;
return Status::OK();
}
// Replaces the existing HLO instruction old_instruction, with
// new_instruction, and marks the optimizer status as changed.
// Returns the Status representing the result of the replace operation.
Status ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(old_instruction, new_instruction));
changed_ = true;
return Status::OK();
}
// Current HloComputation instance the BatchNormExpander is
// traversing.
HloComputation* computation_;
bool rewrite_training_op_;
bool rewrite_inference_op_;
bool rewrite_grad_op_;
// Whether rewrite has occurred.
bool changed_ = false;
};
} // namespace
bool BatchNormExpanderVisitor::Run(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op) {
BatchNormExpanderVisitor visitor(
computation,
/*rewrite_training_op=*/rewrite_training_op,
/*rewrite_inference_op=*/rewrite_inference_op,
/*rewrite_grad_op=*/rewrite_grad_op);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
Status BatchNormExpanderVisitor::HandleBatchNormTraining(
HloInstruction* batch_norm) {
if (!rewrite_training_op_) {
return Status::OK();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64 instruction_count_before = computation_->instruction_count();
// Expand batch norm training into smaller HLO ops.
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
PrimitiveType ptype = operand_shape.element_type();
int64 feature_index = batch_norm->feature_index();
const int64 feature_count = operand_shape.dimensions(feature_index);
const int64 size_in_elements = ShapeUtil::ElementsIn(operand_shape);
int64 elements_per_feature_int64 = size_in_elements / feature_count;
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
const Shape feature_shape = scale->shape();
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon = add(HloInstruction::CreateBroadcast(
operand_shape,
add(HloInstruction::CreateConstant(std::move(epsilon_literal))), {}));
std::vector<int64> dimensions_without_feature;
for (int64 i = 0; i < ShapeUtil::Rank(operand_shape); ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto scale_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, scale, {feature_index}));
auto offset_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, offset, {feature_index}));
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
// X^2.
auto operand_squared =
add_binary(operand_shape, HloOpcode::kMultiply, operand, operand);
// Sum[X].
auto sum = add(HloInstruction::CreateReduce(feature_shape, operand, zero,
dimensions_without_feature,
add_reduce_computation));
// Sum[X^2].
auto squared_sum = add(HloInstruction::CreateReduce(
feature_shape, operand_squared, zero, dimensions_without_feature,
add_reduce_computation));
// E[X].
auto mean = add(Mean(elements_per_feature_int64, sum, add));
auto mean_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, mean, {feature_index}));
// E[X^2].
auto square_mean = add(Mean(elements_per_feature_int64, squared_sum, add));
// E^2[X].
auto mean_square =
add_binary(feature_shape, HloOpcode::kMultiply, mean, mean);
// Var[X].
auto var =
add_binary(feature_shape, HloOpcode::kSubtract, square_mean, mean_square);
auto var_broadcasted =
add(HloInstruction::CreateBroadcast(operand_shape, var, {feature_index}));
// Var[X] + epsilon.
auto var_add_epsilon =
add_binary(operand_shape, HloOpcode::kAdd, var_broadcasted, epsilon);
// 1 / Sqrt[Var[X] + epsilon].
auto rsqrt_var_add_epsilon = add(Rsqrt(var_add_epsilon, add));
// X - E[X].
auto operand_minus_mean = add_binary(operand_shape, HloOpcode::kSubtract,
operand, mean_broadcasted);
// (X - E[X]) / Sqrt[Var[X] + epsilon].
auto normalized = add_binary(operand_shape, HloOpcode::kMultiply,
operand_minus_mean, rsqrt_var_add_epsilon);
// (X - E[X]) / Sqrt[Var[X] + epsilon] * scale.
auto scaled_normalized = add_binary(operand_shape, HloOpcode::kMultiply,
normalized, scale_broadcasted);
// (X - E[X]) / Sqrt[Var[X] + epsilon] * scale + offset.
auto shifted_normalized = add_binary(operand_shape, HloOpcode::kAdd,
scaled_normalized, offset_broadcasted);
auto tuple = HloInstruction::CreateTuple({shifted_normalized, mean, var});
if (batch_norm->has_sharding()) {
int64 instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
const HloSharding& sharding = batch_norm->sharding();
HloSharding operand_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
optional<int64> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(operand_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return Status::OK();
}
Status BatchNormExpanderVisitor::HandleBatchNormInference(
HloInstruction* batch_norm) {
if (!rewrite_inference_op_) {
return Status::OK();
}
// Expand batch norm inference into smaller HLO ops.
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
int64 feature_index = batch_norm->feature_index();
PrimitiveType ptype = operand_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
HloInstruction* mean = batch_norm->mutable_operand(3);
HloInstruction* var = batch_norm->mutable_operand(4);
const Shape feature_shape = scale->shape();
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon = computation_->AddInstruction(HloInstruction::CreateBroadcast(
operand_shape,
computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(epsilon_literal))),
{}));
std::vector<int64> dimensions_without_feature;
for (int64 i = 0; i < ShapeUtil::Rank(operand_shape); ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64 instruction_count_before = computation_->instruction_count();
auto scale_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, scale, {feature_index}));
auto offset_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, offset, {feature_index}));
auto mean_broadcasted = add(
HloInstruction::CreateBroadcast(operand_shape, mean, {feature_index}));
auto var_broadcasted =
add(HloInstruction::CreateBroadcast(operand_shape, var, {feature_index}));
// Var[X] + epsilon.
auto var_add_epsilon =
add_binary(operand_shape, HloOpcode::kAdd, var_broadcasted, epsilon);
// 1 / Sqrt[Var[X] + epsilon].
auto rsqrt_var_add_epsilon = add(Rsqrt(var_add_epsilon, add));
// X - E[X].
auto operand_minus_mean = add_binary(operand_shape, HloOpcode::kSubtract,
operand, mean_broadcasted);
// (X - E[X]) / Sqrt[Var[X] + epsilon].
auto normalized = add_binary(operand_shape, HloOpcode::kMultiply,
operand_minus_mean, rsqrt_var_add_epsilon);
// (X - E[X]) / Sqrt[Var[X] + epsilon] * scale.
auto scaled_normalized = add_binary(operand_shape, HloOpcode::kMultiply,
normalized, scale_broadcasted);
// (X - E[X]) / Sqrt[Var[X] + epsilon] * scale + offset.
auto shifted_normalized = HloInstruction::CreateBinary(
operand_shape, HloOpcode::kAdd, scaled_normalized, offset_broadcasted);
int64 instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
optional<int64> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(sharding);
} else {
inst->set_sharding(default_sharding);
}
}
shifted_normalized->set_sharding(sharding);
}
TF_CHECK_OK(
ReplaceWithNewInstruction(batch_norm, std::move(shifted_normalized)));
return Status::OK();
}
Status BatchNormExpanderVisitor::HandleBatchNormGrad(
HloInstruction* batch_norm) {
// Use the following formulas to calculate gradients:
// scale_grad =
// sum(output_grad * (activation - mean(activation))) * rsqrt(var + epsilon)
//
// offset_grad =
// sum(output_grad)
//
// activation_grad =
// 1/N * scale * rsqrt(var + epsilon) *
// (N * output_grad - sum(output_grad) - (activation - mean(activation)) *
// sum(output_grad * (activation - mean(activation))) / (variance +
// epsilon))
if (!rewrite_grad_op_) {
return Status::OK();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64 instruction_count_before = computation_->instruction_count();
HloInstruction* activation = batch_norm->mutable_operand(0);
const Shape activation_shape = activation->shape();
PrimitiveType ptype = activation_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
const Shape feature_shape = scale->shape();
HloInstruction* mean = batch_norm->mutable_operand(2);
HloInstruction* variance = batch_norm->mutable_operand(3);
HloInstruction* grad_output = batch_norm->mutable_operand(4);
int64 feature_index = batch_norm->feature_index();
const int64 size_in_elements = ShapeUtil::ElementsIn(activation_shape);
const int64 feature_count = activation_shape.dimensions(feature_index);
const int64 elements_per_feature_int64 = size_in_elements / feature_count;
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon_scalar =
add(HloInstruction::CreateConstant(std::move(epsilon_literal)));
auto epsilon_activation = add(
HloInstruction::CreateBroadcast(activation_shape, epsilon_scalar, {}));
auto epsilon_feature =
add(HloInstruction::CreateBroadcast(feature_shape, epsilon_scalar, {}));
std::vector<int64> dimensions_without_feature;
for (int64 i = 0; i < ShapeUtil::Rank(activation_shape); ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto scale_broadcasted = add(HloInstruction::CreateBroadcast(
activation_shape, scale, {feature_index}));
auto variance_broadcasted = add(HloInstruction::CreateBroadcast(
activation_shape, variance, {feature_index}));
// E[X].
auto mean_broadcasted = add(
HloInstruction::CreateBroadcast(activation_shape, mean, {feature_index}));
// rsqrt[Var[X] + epsilon].
auto rsqrt_var_add_epsilon_broadcasted =
add(Rsqrt(add_binary(activation_shape, HloOpcode::kAdd,
variance_broadcasted, epsilon_activation),
add));
auto rsqrt_var_add_epsilon = add(Rsqrt(
add_binary(feature_shape, HloOpcode::kAdd, variance, epsilon_feature),
add));
// X - E[X].
auto activation_minus_mean = add_binary(
activation_shape, HloOpcode::kSubtract, activation, mean_broadcasted);
// Grad[Y] * (X - E[X]).
auto grad_output_times_activiation_minus_mean =
add_binary(activation_shape, HloOpcode::kMultiply, grad_output,
activation_minus_mean);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
// sum(Grad[Y] * (X - E[X])).
auto sum_grad_output_times_activiation_minus_mean =
add(HloInstruction::CreateReduce(
feature_shape, grad_output_times_activiation_minus_mean, zero,
dimensions_without_feature, add_reduce_computation));
// Grad[beta] = Sum(Grad[Y]).
auto grad_beta = add(HloInstruction::CreateReduce(
feature_shape, grad_output, zero, dimensions_without_feature,
add_reduce_computation));
// Grad[scale] = Sum(Grad[Y] * (X - E[X]) * rsqrt[Var[X] + epsilon]).
auto grad_scale = add_binary(feature_shape, HloOpcode::kMultiply,
sum_grad_output_times_activiation_minus_mean,
rsqrt_var_add_epsilon);
// I2 = Sum(Grad[Y])
auto i2 = add(HloInstruction::CreateBroadcast(activation_shape, grad_beta,
{feature_index}));
// I3 = Sum(Grad[Y] * (X - E[X]))
auto i3 = add(HloInstruction::CreateBroadcast(
activation_shape, sum_grad_output_times_activiation_minus_mean,
{feature_index}));
// I4 = (X - E[X]) * I3
auto i4 = add_binary(activation_shape, HloOpcode::kMultiply, i3,
activation_minus_mean);
// I5 = I4 / (Var[X] + epsilon)
auto i5 = add_binary(activation_shape, HloOpcode::kDivide, i4,
add_binary(activation_shape, HloOpcode::kAdd,
variance_broadcasted, epsilon_activation));
// scale * rsqrt[Var[X] + epsilon] * 1/N
auto scale_times_rsqrt_var_add_epsilon =
add_binary(activation_shape, HloOpcode::kMultiply, scale_broadcasted,
rsqrt_var_add_epsilon_broadcasted);
scale_times_rsqrt_var_add_epsilon = add(
Mean(elements_per_feature_int64, scale_times_rsqrt_var_add_epsilon, add));
auto elements_per_feature_literal =
LiteralUtil::CreateR0<float>(elements_per_feature_int64);
TF_ASSIGN_OR_RETURN(elements_per_feature_literal,
elements_per_feature_literal.Convert(ptype));
auto elements_per_feature = add(
HloInstruction::CreateConstant(std::move(elements_per_feature_literal)));
auto i1 = add_binary(activation_shape, HloOpcode::kMultiply, grad_output,
add(HloInstruction::CreateBroadcast(
activation_shape, elements_per_feature, {})));
// I6 = I1 - I2 - I5
auto i6 = add_binary(
activation_shape, HloOpcode::kSubtract,
add_binary(activation_shape, HloOpcode::kSubtract, i1, i2), i5);
// Grad[X] = scale * rsqrt[Var[X] + epsilon] * 1/N * I6.
auto grad_activation = add_binary(activation_shape, HloOpcode::kMultiply,
scale_times_rsqrt_var_add_epsilon, i6);
auto tuple =
HloInstruction::CreateTuple({grad_activation, grad_scale, grad_beta});
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
int64 instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
HloSharding activation_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
auto unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
inst->set_sharding(activation_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return Status::OK();
}
StatusOr<bool> BatchNormExpander::Run(HloModule* module) {
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations()) {
if (BatchNormExpanderVisitor::Run(comp, rewrite_training_op_,
rewrite_inference_op_,
rewrite_grad_op_)) {
changed = true;
}
}
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), after:\n" + module->ToString());
return changed;
}
} // namespace xla
|
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup freestyle
*/
#include "BPy_CurvePoint.h"
#include "../BPy_Convert.h"
#include "../Interface0D/BPy_SVertex.h"
#ifdef __cplusplus
extern "C" {
#endif
///////////////////////////////////////////////////////////////////////////////////////////
/*----------------------CurvePoint methods----------------------------*/
PyDoc_STRVAR(CurvePoint_doc,
"Class hierarchy: :class:`Interface0D` > :class:`CurvePoint`\n"
"\n"
"Class to represent a point of a curve. A CurvePoint can be any point\n"
"of a 1D curve (it doesn't have to be a vertex of the curve). Any\n"
":class:`Interface1D` is built upon ViewEdges, themselves built upon\n"
"FEdges. Therefore, a curve is basically a polyline made of a list of\n"
":class:`SVertex` objects. Thus, a CurvePoint is built by linearly\n"
"interpolating two :class:`SVertex` instances. CurvePoint can be used\n"
"as virtual points while querying 0D information along a curve at a\n"
"given resolution.\n"
"\n"
".. method:: __init__()\n"
" __init__(brother)\n"
" __init__(first_vertex, second_vertex, t2d)\n"
" __init__(first_point, second_point, t2d)\n"
"\n"
" Builds a CurvePoint using the default constructor, copy constructor,\n"
" or one of the overloaded constructors. The over loaded constructors\n"
" can either take two :class:`SVertex` or two :class:`CurvePoint`\n"
" objects and an interpolation parameter\n"
"\n"
" :arg brother: A CurvePoint object.\n"
" :type brother: :class:`CurvePoint`\n"
" :arg first_vertex: The first SVertex.\n"
" :type first_vertex: :class:`SVertex`\n"
" :arg second_vertex: The second SVertex.\n"
" :type second_vertex: :class:`SVertex`\n"
" :arg first_point: The first CurvePoint.\n"
" :type first_point: :class:`CurvePoint`\n"
" :arg second_point: The second CurvePoint.\n"
" :type second_point: :class:`CurvePoint`\n"
" :arg t2d: A 2D interpolation parameter used to linearly interpolate\n"
" first_vertex and second_vertex or first_point and second_point.\n"
" :type t2d: float\n");
static int CurvePoint_init(BPy_CurvePoint *self, PyObject *args, PyObject *kwds)
{
static const char *kwlist_1[] = {"brother", nullptr};
static const char *kwlist_2[] = {"first_vertex", "second_vertex", "t2d", nullptr};
static const char *kwlist_3[] = {"first_point", "second_point", "t2d", nullptr};
PyObject *obj1 = nullptr, *obj2 = nullptr;
float t2d;
if (PyArg_ParseTupleAndKeywords(args, kwds, "|O!", (char **)kwlist_1, &CurvePoint_Type, &obj1)) {
if (!obj1) {
self->cp = new CurvePoint();
}
else {
self->cp = new CurvePoint(*(((BPy_CurvePoint *)obj1)->cp));
}
}
else if ((void)PyErr_Clear(),
PyArg_ParseTupleAndKeywords(args,
kwds,
"O!O!f",
(char **)kwlist_2,
&SVertex_Type,
&obj1,
&SVertex_Type,
&obj2,
&t2d)) {
self->cp = new CurvePoint(((BPy_SVertex *)obj1)->sv, ((BPy_SVertex *)obj2)->sv, t2d);
}
else if ((void)PyErr_Clear(),
PyArg_ParseTupleAndKeywords(args,
kwds,
"O!O!f",
(char **)kwlist_3,
&CurvePoint_Type,
&obj1,
&CurvePoint_Type,
&obj2,
&t2d)) {
CurvePoint *cp1 = ((BPy_CurvePoint *)obj1)->cp;
CurvePoint *cp2 = ((BPy_CurvePoint *)obj2)->cp;
if (!cp1 || cp1->A() == nullptr || cp1->B() == nullptr) {
PyErr_SetString(PyExc_TypeError, "argument 1 is an invalid CurvePoint object");
return -1;
}
if (!cp2 || cp2->A() == nullptr || cp2->B() == nullptr) {
PyErr_SetString(PyExc_TypeError, "argument 2 is an invalid CurvePoint object");
return -1;
}
self->cp = new CurvePoint(cp1, cp2, t2d);
}
else {
PyErr_SetString(PyExc_TypeError, "invalid argument(s)");
return -1;
}
self->py_if0D.if0D = self->cp;
self->py_if0D.borrowed = false;
return 0;
}
/// bool operator== (const CurvePoint &b)
/*----------------------CurvePoint get/setters ----------------------------*/
PyDoc_STRVAR(CurvePoint_first_svertex_doc,
"The first SVertex upon which the CurvePoint is built.\n"
"\n"
":type: :class:`SVertex`");
static PyObject *CurvePoint_first_svertex_get(BPy_CurvePoint *self, void *UNUSED(closure))
{
SVertex *A = self->cp->A();
if (A) {
return BPy_SVertex_from_SVertex(*A);
}
Py_RETURN_NONE;
}
static int CurvePoint_first_svertex_set(BPy_CurvePoint *self,
PyObject *value,
void *UNUSED(closure))
{
if (!BPy_SVertex_Check(value)) {
PyErr_SetString(PyExc_TypeError, "value must be an SVertex");
return -1;
}
self->cp->setA(((BPy_SVertex *)value)->sv);
return 0;
}
PyDoc_STRVAR(CurvePoint_second_svertex_doc,
"The second SVertex upon which the CurvePoint is built.\n"
"\n"
":type: :class:`SVertex`");
static PyObject *CurvePoint_second_svertex_get(BPy_CurvePoint *self, void *UNUSED(closure))
{
SVertex *B = self->cp->B();
if (B) {
return BPy_SVertex_from_SVertex(*B);
}
Py_RETURN_NONE;
}
static int CurvePoint_second_svertex_set(BPy_CurvePoint *self,
PyObject *value,
void *UNUSED(closure))
{
if (!BPy_SVertex_Check(value)) {
PyErr_SetString(PyExc_TypeError, "value must be an SVertex");
return -1;
}
self->cp->setB(((BPy_SVertex *)value)->sv);
return 0;
}
PyDoc_STRVAR(CurvePoint_fedge_doc,
"Gets the FEdge for the two SVertices that given CurvePoints consists out of.\n"
"A shortcut for CurvePoint.first_svertex.get_fedge(CurvePoint.second_svertex).\n"
"\n"
":type: :class:`FEdge`");
static PyObject *CurvePoint_fedge_get(BPy_CurvePoint *self, void *UNUSED(closure))
{
SVertex *A = self->cp->A();
Interface0D *B = (Interface0D *)self->cp->B();
// B can be NULL under certain circumstances
if (B) {
return Any_BPy_Interface1D_from_Interface1D(*(A->getFEdge(*B)));
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(CurvePoint_t2d_doc,
"The 2D interpolation parameter.\n"
"\n"
":type: float");
static PyObject *CurvePoint_t2d_get(BPy_CurvePoint *self, void *UNUSED(closure))
{
return PyFloat_FromDouble(self->cp->t2d());
}
static int CurvePoint_t2d_set(BPy_CurvePoint *self, PyObject *value, void *UNUSED(closure))
{
float scalar;
if ((scalar = PyFloat_AsDouble(value)) == -1.0f && PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError, "value must be a number");
return -1;
}
self->cp->setT2d(scalar);
return 0;
}
static PyGetSetDef BPy_CurvePoint_getseters[] = {
{"first_svertex",
(getter)CurvePoint_first_svertex_get,
(setter)CurvePoint_first_svertex_set,
CurvePoint_first_svertex_doc,
nullptr},
{"second_svertex",
(getter)CurvePoint_second_svertex_get,
(setter)CurvePoint_second_svertex_set,
CurvePoint_second_svertex_doc,
nullptr},
{"fedge", (getter)CurvePoint_fedge_get, nullptr, CurvePoint_fedge_doc, nullptr},
{"t2d", (getter)CurvePoint_t2d_get, (setter)CurvePoint_t2d_set, CurvePoint_t2d_doc, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr} /* Sentinel */
};
/*-----------------------BPy_CurvePoint type definition ------------------------------*/
PyTypeObject CurvePoint_Type = {
PyVarObject_HEAD_INIT(nullptr, 0) "CurvePoint", /* tp_name */
sizeof(BPy_CurvePoint), /* tp_basicsize */
0, /* tp_itemsize */
nullptr, /* tp_dealloc */
#if PY_VERSION_HEX >= 0x03080000
0, /* tp_vectorcall_offset */
#else
nullptr, /* tp_print */
#endif
nullptr, /* tp_getattr */
nullptr, /* tp_setattr */
nullptr, /* tp_reserved */
nullptr, /* tp_repr */
nullptr, /* tp_as_number */
nullptr, /* tp_as_sequence */
nullptr, /* tp_as_mapping */
nullptr, /* tp_hash */
nullptr, /* tp_call */
nullptr, /* tp_str */
nullptr, /* tp_getattro */
nullptr, /* tp_setattro */
nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
CurvePoint_doc, /* tp_doc */
nullptr, /* tp_traverse */
nullptr, /* tp_clear */
nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
nullptr, /* tp_iter */
nullptr, /* tp_iternext */
nullptr, /* tp_methods */
nullptr, /* tp_members */
BPy_CurvePoint_getseters, /* tp_getset */
&Interface0D_Type, /* tp_base */
nullptr, /* tp_dict */
nullptr, /* tp_descr_get */
nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)CurvePoint_init, /* tp_init */
nullptr, /* tp_alloc */
nullptr, /* tp_new */
};
///////////////////////////////////////////////////////////////////////////////////////////
#ifdef __cplusplus
}
#endif
|
/*
* Copyright (C) 2005-2011 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//#include "DatabaseEnv.h"
|
// Copyright (c) 2010-2019 The Regents of the University of Michigan
// This file is from the freud project, released under the BSD 3-Clause License.
#include <complex>
#include <stdexcept>
#include "HexOrderParameter.h"
using namespace std;
using namespace tbb;
/*! \file HexOrderParameter.cc
\brief Compute the hexatic order parameter for each particle.
*/
namespace freud { namespace order {
HexOrderParameter::HexOrderParameter(float rmax, unsigned int k, unsigned int n)
: m_box(box::Box()), m_k(k), m_Np(0)
{}
HexOrderParameter::~HexOrderParameter() {}
void HexOrderParameter::compute(box::Box& box, const freud::locality::NeighborList* nlist,
const vec3<float>* points, unsigned int Np)
{
// Compute the cell list
m_box = box;
nlist->validate(Np, Np);
const size_t* neighbor_list(nlist->getNeighbors());
// Reallocate the output array if it is not the right size
if (Np != m_Np)
{
m_psi_array = std::shared_ptr<complex<float>>(new complex<float>[Np],
std::default_delete<complex<float>[]>());
}
// Compute the order parameter
parallel_for(blocked_range<size_t>(0, Np), [=](const blocked_range<size_t>& r) {
size_t bond(nlist->find_first_index(r.begin()));
for (size_t i = r.begin(); i != r.end(); ++i)
{
m_psi_array.get()[i] = 0;
vec3<float> ref = points[i];
for (; bond < nlist->getNumBonds() && neighbor_list[2 * bond] == i; ++bond)
{
const size_t j(neighbor_list[2 * bond + 1]);
// Compute r between the two particles
vec3<float> delta = m_box.wrap(points[j] - ref);
float rsq = dot(delta, delta);
if (rsq > 1e-6)
{
// Compute psi for neighboring particle
// (only constructed for 2d)
float psi_ij = atan2f(delta.y, delta.x);
m_psi_array.get()[i] += exp(complex<float>(0, m_k * psi_ij));
}
}
m_psi_array.get()[i] /= complex<float>(m_k);
}
});
// Save the last computed number of particles
m_Np = Np;
}
}; }; // end namespace freud::order
|
#include "libmul/mul.h"
extern int mul4( const int a, const int b ) noexcept {
return a * b * 4;
}
|
#pragma once
#include "program.hpp"
#include "util.hpp"
class Minimizer
{
public:
Minimizer( const Settings &settings )
:
settings( settings )
{
}
void minimize( Program &p, size_t num_terms ) const;
void optimizeAndMinimize( Program &p, size_t num_reserved_cells, size_t num_initialized_cells,
size_t num_terms ) const;
private:
Settings settings;
};
|
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "accessors.h"
#include "api.h"
#include "arguments.h"
#include "codegen.h"
#include "execution.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
static char TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
case MEGAMORPHIC: return 'N';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
// these cases fall through to the unreachable code below.
case DEBUG_BREAK: break;
case DEBUG_PREPARE_STEP_IN: break;
}
UNREACHABLE();
return 0;
}
void IC::TraceIC(const char* type,
Handle<Object> name,
State old_state,
Code* new_target,
const char* extra_info) {
if (FLAG_trace_ic) {
State new_state = StateFrom(new_target,
HEAP->undefined_value(),
HEAP->undefined_value());
PrintF("[%s in ", type);
StackFrameIterator it;
while (it.frame()->fp() != this->fp()) it.Advance();
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
Isolate* isolate = new_target->GetIsolate();
Code* apply_builtin = isolate->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
it.Advance();
raw_frame = it.frame();
}
}
if (raw_frame->is_java_script()) {
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Code* js_code = frame->unchecked_code();
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = JSFunction::cast(frame->function());
function->PrintName();
int code_offset = address() - js_code->instruction_start();
PrintF("+%d", code_offset);
} else {
PrintF("<unknown>");
}
PrintF(" (%c->%c)%s",
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state),
extra_info);
name->Print();
PrintF("]\n");
}
}
#endif
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
// a few levels of the stack frame iteration code. This yields a
// ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
// If there's another JavaScript frame on the stack, we need to look
// one frame further down the stack to find the frame pointer and
// the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
}
#ifdef DEBUG
StackFrameIterator it;
for (int i = 0; i < depth + 1; i++) it.Advance();
StackFrame* frame = it.frame();
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
pc_address_ = pc_address;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() {
HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
StackFrameIterator it;
while (it.frame()->fp() != this->fp()) it.Advance();
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = JSFunction::cast(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
Code* code = shared->code();
ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code();
ASSERT(original_code->IsCode());
// Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC
// normally would be.
Address addr = pc() - Assembler::kCallTargetAddressOffset;
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
intptr_t delta =
original_code->instruction_start() - code->instruction_start();
return addr + delta;
}
#endif
static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
LookupResult* lookup,
Object* receiver) {
Object* end = lookup->IsProperty()
? lookup->holder() : isolate->heap()->null_value();
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
if (current->IsJSObject() &&
!JSObject::cast(current)->HasFastProperties() &&
!current->IsJSGlobalProxy() &&
!current->IsJSGlobalObject()) {
return true;
}
}
return false;
}
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheHolder is not applicable.
return false;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
// IC::GetCodeCacheHolder is not applicable.
return false;
}
Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
//
// If there are changes to the receiver itself, the map of the
// receiver will have changed and the current target will not be in
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), target, index);
return true;
}
return false;
}
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
// For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
if (kind == Code::KEYED_LOAD_IC ||
kind == Code::KEYED_STORE_IC ||
kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
// Call stubs handle this later to allow extra IC state
// transitions.
if (kind != Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
// The builtins object is special. It only changes when JavaScript
// builtins are loaded lazily. It is important to keep inline
// caches for the builtins object monomorphic. Therefore, if we get
// an inline cache miss for the builtins object after lazily loading
// JavaScript builtins, we return uninitialized as the state to
// force the inline cache back to monomorphic state.
if (receiver->IsJSBuiltinsObject()) {
return UNINITIALIZED;
}
return MONOMORPHIC;
}
RelocInfo::Mode IC::ComputeMode() {
Address addr = address();
Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() == addr) return info->rmode();
}
UNREACHABLE();
return RelocInfo::NONE;
}
Failure* IC::TypeError(const char* type,
Handle<Object> object,
Handle<Object> key) {
HandleScope scope(isolate());
Handle<Object> args[2] = { key, object };
Handle<Object> error = isolate()->factory()->NewTypeError(
type, HandleVector(args, 2));
return isolate()->Throw(*error);
}
Failure* IC::ReferenceError(const char* type, Handle<String> name) {
HandleScope scope(isolate());
Handle<Object> error = isolate()->factory()->NewReferenceError(
type, HandleVector(&name, 1));
return isolate()->Throw(*error);
}
void IC::Clear(Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
if (target->ic_state() == DEBUG_BREAK) return;
switch (target->kind()) {
case Code::LOAD_IC: return LoadIC::Clear(address, target);
case Code::KEYED_LOAD_IC:
return KeyedLoadIC::Clear(address, target);
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC:
return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
// Clearing these is tricky and does not
// make any performance difference.
return;
default: UNREACHABLE();
}
}
void CallICBase::Clear(Address address, Code* target) {
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code =
Isolate::Current()->stub_cache()->FindCallInitialize(
target->arguments_count(),
target->ic_in_loop(),
contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
target->kind());
SetTargetAtAddress(address, code);
}
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
SetTargetAtAddress(address, initialize_stub());
}
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode)
? initialize_stub_strict()
: initialize_stub());
}
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode)
? initialize_stub_strict()
: initialize_stub());
}
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
static void LookupForRead(Object* object,
String* name,
LookupResult* lookup) {
AssertNoAllocation no_gc; // pointers must stay valid
// Skip all the objects with named interceptors, but
// without actual getter.
while (true) {
object->Lookup(name, lookup);
// Besides normal conditions (property not found or it's not
// an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
if (!lookup->IsFound()
|| (lookup->type() != INTERCEPTOR)
|| !lookup->IsCacheable()) {
return;
}
JSObject* holder = lookup->holder();
if (HasInterceptorGetter(holder)) {
return;
}
holder->LocalLookupRealNamedProperty(name, lookup);
if (lookup->IsProperty()) {
ASSERT(lookup->type() != INTERCEPTOR);
return;
}
Object* proto = holder->GetPrototype();
if (proto->IsNull()) {
lookup->NotFound();
return;
}
object = proto;
}
}
Object* CallICBase::TryCallAsFunction(Object* object) {
HandleScope scope(isolate());
Handle<Object> target(object, isolate());
Handle<Object> delegate = Execution::GetFunctionDelegate(target);
if (delegate->IsJSFunction()) {
// Patch the receiver and use the delegate as the function to
// invoke. This is used for invoking objects as if they were
// functions.
const int argc = this->target()->arguments_count();
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *target);
}
return *delegate;
}
void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
Handle<Object> object) {
if (callee->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
if (function->shared()->strict_mode() || function->IsBuiltin()) {
// Do not wrap receiver for strict mode functions or for builtins.
return;
}
}
// And only wrap string, number or boolean.
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
// Change the receiver to the result of calling ToObject on it.
const int argc = this->target()->arguments_count();
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *isolate()->factory()->ToObject(object));
}
}
MaybeObject* CallICBase::LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, name);
}
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Object* result;
{ MaybeObject* maybe_result = object->GetElement(index);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (result->IsJSFunction()) return result;
// Try to find a suitable function delegate for the object at hand.
result = TryCallAsFunction(result);
if (result->IsJSFunction()) return result;
// Otherwise, it will fail in the lookup step.
}
// Lookup the property in the object.
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
if (!lookup.IsProperty()) {
// If the object does not have the requested property, check which
// exception we need to throw.
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
}
// Lookup is valid: Update inline cache and stub cache.
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, extra_ic_state, object, name);
}
// Get the property.
PropertyAttributes attr;
Object* result;
{ MaybeObject* maybe_result =
object->GetProperty(*object, &lookup, *name, &attr);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (lookup.type() == INTERCEPTOR) {
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
}
}
ASSERT(!result->IsTheHole());
HandleScope scope(isolate());
// Wrap result in a handle because ReceiverToObjectIfRequired may allocate
// new object and cause GC.
Handle<Object> result_handle(result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
ReceiverToObjectIfRequired(result_handle, object);
if (result_handle->IsJSFunction()) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
Debug* debug = isolate()->debug();
if (debug->StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
debug->HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
return *result_handle;
}
// Try to find a suitable function delegate for the object at hand.
result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
if (result_handle->IsJSFunction()) return *result_handle;
return TypeError("property_not_function", object, name);
}
bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state) {
ASSERT(kind_ == Code::CALL_IC);
if (lookup->type() != CONSTANT_FUNCTION) return false;
JSFunction* function = lookup->GetConstantFunction();
if (!function->shared()->HasBuiltinFunctionId()) return false;
// Fetch the arguments passed to the called function.
const int argc = target()->arguments_count();
Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
Arguments args(argc + 1,
&Memory::Object_at(fp +
StandardFrameConstants::kCallerSPOffset +
argc * kPointerSize));
switch (function->shared()->builtin_function_id()) {
case kStringCharCodeAt:
case kStringCharAt:
if (object->IsString()) {
String* string = String::cast(*object);
// Check there's the right string value or wrapper in the receiver slot.
ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
// If we're in the default (fastest) state and the index is
// out of bounds, update the state to record this fact.
if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
argc >= 1 && args[1]->IsNumber()) {
double index = DoubleToInteger(args.number_at(1));
if (index < 0 || index >= string->length()) {
*extra_ic_state =
StringStubState::update(*extra_ic_state,
STRING_INDEX_OUT_OF_BOUNDS);
return true;
}
}
}
break;
default:
return false;
}
return false;
}
MaybeObject* CallICBase::ComputeMonomorphicStub(
LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
MaybeObject* maybe_code = NULL;
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
in_loop,
kind_,
extra_ic_state,
*name,
*object,
lookup->holder(),
index);
break;
}
case CONSTANT_FUNCTION: {
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
maybe_code =
isolate()->stub_cache()->ComputeCallConstant(argc,
in_loop,
kind_,
extra_ic_state,
*name,
*object,
lookup->holder(),
function);
break;
}
case NORMAL: {
if (!object->IsJSObject()) return NULL;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return NULL;
JSFunction* function = JSFunction::cast(cell->value());
maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
in_loop,
kind_,
extra_ic_state,
*name,
*receiver,
global,
cell,
function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return NULL;
maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
in_loop,
kind_,
extra_ic_state,
*name,
*receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
argc,
kind_,
extra_ic_state,
*name,
*object,
lookup->holder());
break;
}
default:
maybe_code = NULL;
break;
}
return maybe_code;
}
void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (lookup->holder() != *object &&
HasNormalObjectsInPrototypeChain(
isolate(), lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
}
// Compute the number of arguments.
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
MaybeObject* maybe_code = NULL;
bool had_proto_failure = false;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
maybe_code =
isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
in_loop,
kind_,
extra_ic_state);
} else if (state == MONOMORPHIC) {
if (kind_ == Code::CALL_IC &&
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
} else if (kind_ == Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
had_proto_failure = true;
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
} else {
maybe_code =
isolate()->stub_cache()->ComputeCallMegamorphic(argc,
in_loop,
kind_,
extra_ic_state);
}
} else {
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
Object* code;
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED ||
state == PREMONOMORPHIC ||
state == MONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
// Update the stub cache.
isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
USE(had_proto_failure);
#ifdef DEBUG
if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
name, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<Object> object,
Handle<Object> key) {
if (key->IsSymbol()) {
return CallICBase::LoadFunction(state,
Code::kNoExtraICState,
object,
Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
Map* map = heap->non_strict_arguments_elements_map();
if (object->IsJSObject() &&
Handle<JSObject>::cast(object)->elements()->map() == map) {
MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
argc, in_loop, Code::KEYED_CALL_IC);
Object* code;
if (maybe_code->ToObject(&code)) {
set_target(Code::cast(code));
#ifdef DEBUG
TraceIC(
"KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
} else if (FLAG_use_ic && state != MEGAMORPHIC &&
!object->IsAccessCheckNeeded()) {
MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
argc, in_loop, Code::KEYED_CALL_IC, Code::kNoExtraICState);
Object* code;
if (maybe_code->ToObject(&code)) {
set_target(Code::cast(code));
#ifdef DEBUG
TraceIC(
"KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
#endif
}
}
}
HandleScope scope(isolate());
Handle<Object> result = GetProperty(object, key);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
ReceiverToObjectIfRequired(result, object);
if (result->IsJSFunction()) return *result;
result = Handle<Object>(TryCallAsFunction(*result));
if (result->IsJSFunction()) return *result;
return TypeError("property_not_function", object, key);
}
#ifdef DEBUG
#define TRACE_IC_NAMED(msg, name) \
if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
#else
#define TRACE_IC_NAMED(msg, name)
#endif
MaybeObject* LoadIC::Load(State state,
Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_load", object, name);
}
if (FLAG_use_ic) {
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(isolate()->heap()->length_symbol())) {
AssertNoAllocation no_allocation;
Code* stub = NULL;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
if (object->IsString()) {
stub = isolate()->builtins()->builtin(
Builtins::kLoadIC_StringLength);
} else {
stub = isolate()->builtins()->builtin(
Builtins::kLoadIC_StringWrapperLength);
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
stub = isolate()->builtins()->builtin(
Builtins::kLoadIC_StringWrapperLength);
} else if (state != MEGAMORPHIC) {
stub = megamorphic_stub();
}
if (stub != NULL) {
set_target(stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
return Smi::FromInt(
String::cast(Handle<JSValue>::cast(object)->value())->length());
}
return Smi::FromInt(String::cast(*object)->length());
}
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
AssertNoAllocation no_allocation;
Code* stub = NULL;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
stub = isolate()->builtins()->builtin(
Builtins::kLoadIC_ArrayLength);
} else if (state != MEGAMORPHIC) {
stub = megamorphic_stub();
}
if (stub != NULL) {
set_target(stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
}
return JSArray::cast(*object)->length();
}
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
{ AssertNoAllocation no_allocation;
Code* stub = NULL;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
stub = isolate()->builtins()->builtin(
Builtins::kLoadIC_FunctionPrototype);
} else if (state != MEGAMORPHIC) {
stub = megamorphic_stub();
}
if (stub != NULL) {
set_target(stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
}
}
return Accessors::FunctionGetPrototype(*object, 0);
}
}
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) return object->GetElement(index);
// Named lookup in the object.
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
}
// Update inline cache and stub cache.
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name);
}
PropertyAttributes attr;
if (lookup.IsProperty() &&
(lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
// Get the property.
Object* result;
{ MaybeObject* maybe_result =
object->GetProperty(*object, &lookup, *name, &attr);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return result;
}
// Get the property.
return object->GetProperty(*object, &lookup, *name, &attr);
}
void LoadIC::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
// Bail out if the result is not cacheable.
if (!lookup->IsCacheable()) return;
// Loading properties from values is not common, so don't try to
// deal with non-JS objects here.
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
Object* code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
maybe_code = pre_monomorphic_stub();
} else if (!lookup->IsProperty()) {
// Nonexistent property. The result is undefined.
maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
*receiver);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
maybe_code = isolate()->stub_cache()->ComputeLoadField(
*name,
*receiver,
lookup->holder(),
lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
*name, *receiver, lookup->holder(), constant);
break;
}
case NORMAL: {
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
*receiver,
global,
cell,
lookup->IsDontDelete());
} else {
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
}
break;
}
case CALLBACKS: {
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
*name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
*name, *receiver, lookup->holder());
break;
}
default:
return;
}
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
TraceIC("LoadIC", name, state, target());
#endif
}
MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind) {
return KeyedLoadElementStub(elements_kind).TryGetCode();
}
MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
StrictModeFlag strict_mode) {
Object* object;
KeyedLoadStubCompiler compiler;
MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
targets);
if (!maybe_code->ToObject(&object)) return maybe_code;
isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
PROFILE(isolate(), CodeCreateEvent(
Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
Code::cast(object), 0));
return object;
}
MaybeObject* KeyedLoadIC::Load(State state,
Handle<Object> object,
Handle<Object> key,
bool force_generic_stub) {
// Check for values that can be converted into a symbol.
// TODO(1295): Remove this code.
HandleScope scope(isolate());
if (key->IsHeapNumber() &&
isnan(HeapNumber::cast(*key)->value())) {
key = isolate()->factory()->nan_symbol();
} else if (key->IsUndefined()) {
key = isolate()->factory()->undefined_symbol();
}
if (key->IsSymbol()) {
Handle<String> name = Handle<String>::cast(key);
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_load", object, name);
}
if (FLAG_use_ic) {
// TODO(1073): don't ignore the current stub state.
// Use specialized code for getting the length of strings.
if (object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
Object* code = NULL;
{ MaybeObject* maybe_code =
isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
*string);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
#ifdef DEBUG
TraceIC("KeyedLoadIC", name, state, target());
#endif // DEBUG
return Smi::FromInt(string->length());
}
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
*array);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
#ifdef DEBUG
TraceIC("KeyedLoadIC", name, state, target());
#endif // DEBUG
return JSArray::cast(*object)->length();
}
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
*name, *function);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
#ifdef DEBUG
TraceIC("KeyedLoadIC", name, state, target());
#endif // DEBUG
return Accessors::FunctionGetPrototype(*object, 0);
}
}
// Check if the name is trivially convertible to an index and get
// the element or char if so.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
HandleScope scope(isolate());
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(generic_stub());
return Runtime::GetElementOrCharAt(isolate(), object, index);
}
// Named lookup.
LookupResult lookup;
LookupForRead(*object, *name, &lookup);
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty() && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name);
}
PropertyAttributes attr;
if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property.
Object* result;
{ MaybeObject* maybe_result =
object->GetProperty(*object, &lookup, *name, &attr);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return result;
}
return object->GetProperty(*object, &lookup, *name, &attr);
}
// Do not use ICs for objects that require access checks (including
// the global object).
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
if (use_ic) {
Code* stub = generic_stub();
if (!force_generic_stub) {
if (object->IsString() && key->IsNumber()) {
if (state == UNINITIALIZED) {
stub = string_stub();
}
} else if (object->IsJSObject()) {
JSObject* receiver = JSObject::cast(*object);
Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
if (elements_map == heap->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
MaybeObject* maybe_stub = ComputeStub(receiver,
false,
kNonStrictMode,
stub);
stub = maybe_stub->IsFailure() ?
NULL : Code::cast(maybe_stub->ToObjectUnchecked());
}
}
}
if (stub != NULL) set_target(stub);
}
#ifdef DEBUG
TraceIC("KeyedLoadIC", key, state, target());
#endif // DEBUG
// Get the property.
return Runtime::GetObjectProperty(isolate(), object, key);
}
void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
Handle<Object> object, Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
Object* code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
maybe_code = pre_monomorphic_stub();
} else {
// Compute a monomorphic stub.
switch (lookup->type()) {
case FIELD: {
maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
*name, *receiver, lookup->holder(), lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
*name, *receiver, lookup->holder(), constant);
break;
}
case CALLBACKS: {
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
*name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
*name, *receiver, lookup->holder());
break;
}
default: {
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
maybe_code = generic_stub();
break;
}
}
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
}
#ifdef DEBUG
TraceIC("KeyedLoadIC", name, state, target());
#endif
}
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current
// state.
if (lookup->IsReadOnly()) return false;
return true;
}
static bool LookupForWrite(JSReceiver* receiver,
String* name,
LookupResult* lookup) {
receiver->LocalLookup(name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
}
if (lookup->type() == INTERCEPTOR) {
JSObject* object = JSObject::cast(receiver);
if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
object->LocalLookupRealNamedProperty(name, lookup);
return StoreICableLookup(lookup);
}
}
return true;
}
MaybeObject* StoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_store", object, name);
}
if (!object->IsJSReceiver()) {
// The length property of string values is read-only. Throw in strict mode.
if (strict_mode == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
return TypeError("strict_read_only_property", object, name);
}
// Ignore stores where the receiver is not a JSObject.
return *value;
}
// Handle proxies.
if (object->IsJSProxy()) {
return JSReceiver::cast(*object)->
SetProperty(*name, *value, NONE, strict_mode);
}
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(isolate()->heap()->length_symbol())
&& JSArray::cast(*receiver)->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
Builtins::Name target = (strict_mode == kStrictMode)
? Builtins::kStoreIC_ArrayLength_Strict
: Builtins::kStoreIC_ArrayLength;
set_target(isolate()->builtins()->builtin(target));
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
if (LookupForWrite(*receiver, *name, &lookup)) {
// Generate a stub for this store.
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} else {
// Strict mode doesn't allow setting non-existent global property
// or an assignment to a read only property.
if (strict_mode == kStrictMode) {
if (lookup.IsFound() && lookup.IsReadOnly()) {
return TypeError("strict_read_only_property", object, name);
} else if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
}
}
}
if (receiver->IsJSGlobalProxy()) {
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
Code* stub = (strict_mode == kStrictMode)
? global_proxy_stub_strict()
: global_proxy_stub();
if (target() != stub) {
set_target(stub);
#ifdef DEBUG
TraceIC("StoreIC", name, state, target());
#endif
}
}
// Set the property.
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
void StoreIC::UpdateCaches(LookupResult* lookup,
State state,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
// Skip JSGlobalProxy.
ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
// current state.
PropertyType type = lookup->type();
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
MaybeObject* maybe_code = NULL;
Object* code = NULL;
switch (type) {
case FIELD: {
maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() != NONE) return;
HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
case NORMAL: {
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
*name, *global, cell, strict_mode);
} else {
if (lookup->holder() != *receiver) return;
maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
}
case CALLBACKS: {
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return;
maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
*name, *receiver, callback, strict_mode);
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
*name, *receiver, strict_mode);
break;
}
default:
return;
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes.
if (target() != Code::cast(code)) {
set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
}
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
isolate()->stub_cache()->Set(*name,
receiver->map(),
Code::cast(code));
}
#ifdef DEBUG
TraceIC("StoreIC", name, state, target());
#endif
}
static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
Map* new_receiver_map) {
for (int current = 0; current < receiver_maps->length(); ++current) {
if (receiver_maps->at(current) == new_receiver_map) {
return false;
}
}
receiver_maps->Add(new_receiver_map);
return true;
}
void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
ASSERT(stub->is_inline_cache_stub());
if (stub == string_stub()) {
return result->Add(isolate()->heap()->string_map());
} else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
if (stub->ic_state() == MONOMORPHIC) {
result->Add(Map::cast(stub->FindFirstMap()));
} else {
ASSERT(stub->ic_state() == MEGAMORPHIC);
AssertNoAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(stub, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
ASSERT(object->IsMap());
result->Add(Map::cast(object));
}
}
}
}
MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode,
Code* generic_stub) {
State ic_state = target()->ic_state();
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
Code* monomorphic_stub;
MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
is_store,
strict_mode,
generic_stub);
if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
return monomorphic_stub;
}
ASSERT(target() != generic_stub);
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != NORMAL) {
return generic_stub;
}
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
MapList target_receiver_maps;
GetReceiverMapsForStub(target(), &target_receiver_maps);
if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
// If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
// won't help, use the generic stub.
return generic_stub;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
return generic_stub;
}
PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
Code::Flags flags = Code::ComputeFlags(this->kind(),
NOT_IN_LOOP,
MEGAMORPHIC,
strict_mode);
Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
// If there is a cached stub, use it.
if (!maybe_cached_stub->IsUndefined()) {
ASSERT(maybe_cached_stub->IsCode());
return Code::cast(maybe_cached_stub);
}
// Collect MONOMORPHIC stubs for all target_receiver_maps.
CodeList handler_ics(target_receiver_maps.length());
for (int i = 0; i < target_receiver_maps.length(); ++i) {
Map* receiver_map(target_receiver_maps.at(i));
MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
receiver_map, strict_mode);
Code* cached_stub;
if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
handler_ics.Add(cached_stub);
}
// Build the MEGAMORPHIC stub.
Code* stub;
MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
&handler_ics,
strict_mode);
if (!maybe_stub->To(&stub)) return maybe_stub;
MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
if (maybe_update->IsFailure()) return maybe_update;
return stub;
}
MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
StrictModeFlag strict_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(string_stub() != NULL);
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetElementStubWithoutMapCheck(is_js_array,
receiver_map->elements_kind());
}
}
MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode,
Code* generic_stub) {
Code* result = NULL;
if (receiver->HasFastElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
receiver, is_store, strict_mode);
if (!maybe_stub->To(&result)) return maybe_stub;
} else {
result = generic_stub;
}
return result;
}
MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind) {
return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
}
MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
StrictModeFlag strict_mode) {
Object* object;
KeyedStoreStubCompiler compiler(strict_mode);
MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
targets);
if (!maybe_code->ToObject(&object)) return maybe_code;
isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
PROFILE(isolate(), CodeCreateEvent(
Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
Code::cast(object), 0));
return object;
}
MaybeObject* KeyedStoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
bool force_generic) {
if (key->IsSymbol()) {
Handle<String> name = Handle<String>::cast(key);
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_store", object, name);
}
// Ignore stores where the receiver is not a JSObject.
if (!object->IsJSObject()) return *value;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
// Lookup the property locally in the receiver.
LookupResult lookup;
receiver->LocalLookup(*name, &lookup);
// Update inline cache and stub cache.
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
// Set the property.
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
// Do not use ICs for objects that require access checks (including
// the global object).
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
Code* stub = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
if (object->IsJSObject()) {
JSObject* receiver = JSObject::cast(*object);
Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
if (elements_map == heap->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (!force_generic) {
if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
HandleScope scope(isolate());
MaybeObject* maybe_stub = ComputeStub(receiver,
true,
strict_mode,
stub);
stub = maybe_stub->IsFailure() ?
NULL : Code::cast(maybe_stub->ToObjectUnchecked());
}
}
}
if (stub != NULL) set_target(stub);
}
#ifdef DEBUG
TraceIC("KeyedStoreIC", key, state, target());
#endif
// Set the property.
return Runtime::SetObjectProperty(
isolate(), object , key, value, NONE, strict_mode);
}
void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
State state,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
// Skip JSGlobalProxy.
if (receiver->IsJSGlobalProxy()) return;
// Bail out if we didn't find a result.
if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
// If the property is read-only, we leave the IC in its current
// state.
if (lookup->IsReadOnly()) return;
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
// current state.
PropertyType type = lookup->type();
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
MaybeObject* maybe_code = NULL;
Object* code = NULL;
switch (type) {
case FIELD: {
maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() == NONE) {
HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
// fall through.
}
default: {
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
maybe_code = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
break;
}
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
}
#ifdef DEBUG
TraceIC("KeyedStoreIC", name, state, target());
#endif
}
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
static JSFunction* CompileFunction(Isolate* isolate,
JSFunction* function,
InLoopFlag in_loop) {
// Compile now with optimization.
HandleScope scope(isolate);
Handle<JSFunction> function_handle(function, isolate);
if (in_loop == IN_LOOP) {
CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else {
CompileLazy(function_handle, CLEAR_EXCEPTION);
}
return *function_handle;
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
CallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
MaybeObject* maybe_result = ic.LoadFunction(state,
extra_ic_state,
args.at<Object>(0),
args.at<String>(1));
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// The first time the inline cache is updated may be the first time the
// function it references gets called. If the function was lazily compiled
// then the first call will trigger a compilation. We check for this case
// and we do the compilation immediately, instead of waiting for the stub
// currently attached to the JSFunction object to trigger compilation. We
// do this in the case where we know that the inline cache is inside a loop,
// because then we know that we want to optimize the function.
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
return CompileFunction(isolate,
JSFunction::cast(result),
ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedCallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result;
{ MaybeObject* maybe_result =
ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
return CompileFunction(isolate,
JSFunction::cast(result),
ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
LoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
StoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<String>(1),
args.at<Object>(2));
}
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
NoHandleAllocation nha;
ASSERT(args.length() == 2);
JSObject* receiver = JSObject::cast(args[0]);
Object* len = args[1];
// The generated code should filter out non-Smis before we get here.
ASSERT(len->IsSmi());
Object* result;
{ MaybeObject* maybe_result = receiver->SetElementsLength(len);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return len;
}
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
// Convert the parameters
JSObject* object = JSObject::cast(args[0]);
Map* transition = Map::cast(args[1]);
Object* value = args[2];
// Check the object has run out out property space.
ASSERT(object->HasFastProperties());
ASSERT(object->map()->unused_property_fields() == 0);
// Expand the properties array.
FixedArray* old_storage = object->properties();
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
Object* result;
{ MaybeObject* maybe_result = old_storage->CopySize(new_size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* new_storage = FixedArray::cast(result);
new_storage->set(old_storage->length(), value);
// Set the new property value and do the map transition.
object->set_properties(new_storage);
object->set_map(transition);
// Return the stored value.
return value;
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
false);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
StrictModeFlag strict_mode =
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
return Runtime::SetObjectProperty(isolate,
object,
key,
value,
NONE,
strict_mode);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
true);
}
void UnaryOpIC::patch(Code* code) {
set_target(code);
}
const char* UnaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "Smi";
case HEAP_NUMBER: return "HeapNumbers";
case GENERIC: return "Generic";
default: return "Invalid";
}
}
UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
case SMI:
case HEAP_NUMBER:
return MONOMORPHIC;
case GENERIC:
return MEGAMORPHIC;
}
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
}
UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
::v8::internal::TypeInfo operand_type =
::v8::internal::TypeInfo::TypeFromValue(operand);
if (operand_type.IsSmi()) {
return SMI;
} else if (operand_type.IsNumber()) {
return HEAP_NUMBER;
} else {
return GENERIC;
}
}
UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
UnaryOpIC::TypeInfo current_type,
UnaryOpIC::TypeInfo previous_type) {
switch (previous_type) {
case UnaryOpIC::UNINITIALIZED:
return current_type;
case UnaryOpIC::SMI:
return (current_type == UnaryOpIC::GENERIC)
? UnaryOpIC::GENERIC
: UnaryOpIC::HEAP_NUMBER;
case UnaryOpIC::HEAP_NUMBER:
return UnaryOpIC::GENERIC;
case UnaryOpIC::GENERIC:
// We should never do patching if we are in GENERIC state.
UNREACHABLE();
return UnaryOpIC::GENERIC;
}
UNREACHABLE();
return UnaryOpIC::GENERIC;
}
void BinaryOpIC::patch(Code* code) {
set_target(code);
}
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "SMI";
case INT32: return "Int32s";
case HEAP_NUMBER: return "HeapNumbers";
case ODDBALL: return "Oddball";
case BOTH_STRING: return "BothStrings";
case STRING: return "Strings";
case GENERIC: return "Generic";
default: return "Invalid";
}
}
BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
case SMI:
case INT32:
case HEAP_NUMBER:
case ODDBALL:
case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
return MEGAMORPHIC;
}
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
}
BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
BinaryOpIC::TypeInfo y) {
if (x == UNINITIALIZED) return y;
if (y == UNINITIALIZED) return x;
if (x == y) return x;
if (x == BOTH_STRING && y == STRING) return STRING;
if (x == STRING && y == BOTH_STRING) return STRING;
if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
return GENERIC;
}
if (x > y) return x;
return y;
}
BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
Handle<Object> right) {
::v8::internal::TypeInfo left_type =
::v8::internal::TypeInfo::TypeFromValue(left);
::v8::internal::TypeInfo right_type =
::v8::internal::TypeInfo::TypeFromValue(right);
if (left_type.IsSmi() && right_type.IsSmi()) {
return SMI;
}
if (left_type.IsInteger32() && right_type.IsInteger32()) {
// Platforms with 32-bit Smis have no distinct INT32 type.
if (kSmiValueSize == 32) return SMI;
return INT32;
}
if (left_type.IsNumber() && right_type.IsNumber()) {
return HEAP_NUMBER;
}
// Patching for fast string ADD makes sense even if only one of the
// arguments is a string.
if (left_type.IsString()) {
return right_type.IsString() ? BOTH_STRING : STRING;
} else if (right_type.IsString()) {
return STRING;
}
// Check for oddball objects.
if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
return GENERIC;
}
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
HandleScope scope(isolate);
Handle<Object> operand = args.at<Object>(0);
Token::Value op = static_cast<Token::Value>(args.smi_at(1));
UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
UnaryOpIC::TypeInfo previous_type =
static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
type = UnaryOpIC::ComputeNewType(type, previous_type);
UnaryOpStub stub(op, mode, type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
PrintF("[UnaryOpIC (%s->%s)#%s]\n",
UnaryOpIC::GetName(previous_type),
UnaryOpIC::GetName(type),
Token::Name(op));
}
UnaryOpIC ic(isolate);
ic.patch(*code);
}
Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
isolate->thread_local_top()->context_->builtins(), isolate);
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::SUB:
builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
break;
case Token::BIT_NOT:
builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
break;
default:
UNREACHABLE();
}
Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
&caught_exception);
if (caught_exception) {
return Failure::Exception();
}
return *result;
}
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
ASSERT(args.length() == 5);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
Token::Value op = static_cast<Token::Value>(args.smi_at(3));
BinaryOpIC::TypeInfo previous_type =
static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
type = BinaryOpIC::JoinTypes(type, previous_type);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
op != Token::ADD) {
type = BinaryOpIC::GENERIC;
}
if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
result_type = BinaryOpIC::HEAP_NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
result_type = BinaryOpIC::INT32;
}
}
if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
// We must be here because an operation on two INT32 types overflowed.
result_type = BinaryOpIC::HEAP_NUMBER;
}
BinaryOpStub stub(key, type, result_type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
BinaryOpIC::GetName(previous_type),
BinaryOpIC::GetName(type),
BinaryOpIC::GetName(result_type),
Token::Name(op));
}
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address());
}
}
Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
isolate->thread_local_top()->context_->builtins(), isolate);
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
builtin = builtins->javascript_builtin(Builtins::ADD);
break;
case Token::SUB:
builtin = builtins->javascript_builtin(Builtins::SUB);
break;
case Token::MUL:
builtin = builtins->javascript_builtin(Builtins::MUL);
break;
case Token::DIV:
builtin = builtins->javascript_builtin(Builtins::DIV);
break;
case Token::MOD:
builtin = builtins->javascript_builtin(Builtins::MOD);
break;
case Token::BIT_AND:
builtin = builtins->javascript_builtin(Builtins::BIT_AND);
break;
case Token::BIT_OR:
builtin = builtins->javascript_builtin(Builtins::BIT_OR);
break;
case Token::BIT_XOR:
builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
break;
case Token::SHR:
builtin = builtins->javascript_builtin(Builtins::SHR);
break;
case Token::SAR:
builtin = builtins->javascript_builtin(Builtins::SAR);
break;
case Token::SHL:
builtin = builtins->javascript_builtin(Builtins::SHL);
break;
default:
UNREACHABLE();
}
Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
Object** builtin_args[] = { right.location() };
Handle<Object> result = Execution::Call(builtin_function,
left,
ARRAY_SIZE(builtin_args),
builtin_args,
&caught_exception);
if (caught_exception) {
return Failure::Exception();
}
return *result;
}
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode();
}
CompareIC::State CompareIC::ComputeState(Code* target) {
int key = target->major_key();
if (key == CodeStub::Compare) return GENERIC;
ASSERT(key == CodeStub::CompareIC);
return static_cast<State>(target->compare_state());
}
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
return NULL;
}
}
CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
return GENERIC;
}
if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
if (state == UNINITIALIZED &&
x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
if ((state == UNINITIALIZED || state == SYMBOLS) &&
x->IsString() && y->IsString()) return STRINGS;
if (state == UNINITIALIZED &&
x->IsJSObject() && y->IsJSObject()) return OBJECTS;
return GENERIC;
}
// Used from ic_<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
return ic.target();
}
static const Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
NULL
#undef ADDR
};
Address IC::AddressFromUtilityId(IC::UtilityId id) {
return IC_utilities[id];
}
} } // namespace v8::internal
|
/* Copyright (c) 2009, 2010 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "Segment.h"
#include "SegmentIterator.h"
#include "LogTypes.h"
namespace RAMCloud {
/**
* Statically turn prefetching on or off. Prefetching attempts to prime
* the cache for the next log entry each time next() is called.
*/
const bool prefetching = true;
/**
* Construct a new SegmentIterator for the given Segment object.
* \param[in] segment
* The Segment object to be iterated over.
* \return
* The newly constructed SegmentIterator object.
*/
SegmentIterator::SegmentIterator(const Segment *segment)
: baseAddress(segment->getBaseAddress()),
segmentCapacity(segment->getCapacity()),
id(segment->getId()),
type(LOG_ENTRY_TYPE_INVALID),
length(0),
blobPtr(NULL),
sawFooter(false),
firstEntry(NULL),
currentEntry(NULL)
{
commonConstructor(false);
}
/**
* Construct a new SegmentIterator for a piece of memory that was or is used
* as the backing for a Segment object.
* \param[in] buffer
* A pointer to the first byte of the Segment backing memory.
* \param[in] capacity
* The total capacity of the Segment in bytes.
* \param[in] ignoreCapacityMismatch
* If true, do not throw an exception if the capacity passed in to the
* constructor does not match what the SegmentHeader claims. This is
* useful, for instance, with filtered recovery segments, where the
* header indicates the full capacity of the unfiltered segment, but the
* actual buffer received by the new master is shorter. SegmentIterator
* will still ensure that bounds are not exceeded, but the warning is
* suppressed.
*/
SegmentIterator::SegmentIterator(const void *buffer, uint64_t capacity,
bool ignoreCapacityMismatch)
: baseAddress(buffer),
segmentCapacity(capacity),
id(-1),
type(LOG_ENTRY_TYPE_INVALID),
length(0),
blobPtr(NULL),
sawFooter(false),
firstEntry(NULL),
currentEntry(NULL)
{
commonConstructor(ignoreCapacityMismatch);
}
/**
* Perform initialisation operations common to all constructors. This
* includes sanity checking and setting up the first iteration's state.
* \param[in] ignoreCapacityMismatch
* See the #SegmentIterator constructor.
*/
void
SegmentIterator::commonConstructor(bool ignoreCapacityMismatch)
{
if (segmentCapacity < (sizeof(SegmentEntry) + sizeof(SegmentHeader))) {
throw SegmentIteratorException(HERE,
"impossibly small Segment provided");
}
const SegmentEntry *entry = (const SegmentEntry *)baseAddress;
if (entry->type != LOG_ENTRY_TYPE_SEGHEADER ||
entry->length != sizeof(SegmentHeader) ||
!isEntryValid(entry)) {
throw SegmentIteratorException(HERE,
"no valid SegmentHeader entry found");
}
const SegmentHeader *header = reinterpret_cast<const SegmentHeader *>(
reinterpret_cast<const char *>(baseAddress) + sizeof(SegmentEntry));
if (header->segmentCapacity != segmentCapacity && !ignoreCapacityMismatch) {
throw SegmentIteratorException(HERE,
"SegmentHeader disagrees with claimed "
"Segment capacity");
}
if (id != (uint64_t)-1 && header->segmentId != id)
throw SegmentIteratorException(HERE, "id mismatch");
id = header->segmentId;
SegmentEntryHandle handle = reinterpret_cast<SegmentEntryHandle>(entry);
type = handle->type();
length = handle->length();
blobPtr = handle->userData<char*>();
currentEntry = firstEntry = entry;
}
/**
* Determine if the SegmentEntry provided is valid, i.e. that the SegmentEntry
* does not overrun or underrun the buffer.
* \param[in] entry
* The entry to validate.
* \return
* true if the entry is valid, false otherwise.
*/
bool
SegmentIterator::isEntryValid(const SegmentEntry *entry) const
{
uintptr_t pastEnd = (uintptr_t)baseAddress + segmentCapacity;
uintptr_t entryStart = (uintptr_t)entry;
// this is an internal error
assert(entryStart >= (uintptr_t)baseAddress);
if (entryStart + sizeof(*entry) > pastEnd ||
entryStart + sizeof(*entry) + entry->length > pastEnd) {
return false;
}
return true;
}
/**
* Test if the SegmentIterator has exhausted all entries.
* \return
* true if there are no more entries left to iterate, else false.
*/
bool
SegmentIterator::isDone() const
{
return (sawFooter || currentEntry == NULL || !isEntryValid(currentEntry));
}
/**
* Progress the iterator to the next entry in the Segment, if there is one.
* Future calls to #getType, #getLength, #getPointer, and #getOffset will
* reflect the next SegmentEntry's parameters.
*/
void
SegmentIterator::next()
{
type = LOG_ENTRY_TYPE_INVALID;
length = 0;
blobPtr = NULL;
if (currentEntry == NULL)
return;
if (currentEntry->type == LOG_ENTRY_TYPE_SEGFOOTER) {
sawFooter = true;
return;
}
uintptr_t nextEntry = (uintptr_t)currentEntry + sizeof(*currentEntry) +
currentEntry->length;
const SegmentEntry *entry = (const SegmentEntry *)nextEntry;
if (!isEntryValid(entry)) {
currentEntry = NULL;
return;
}
SegmentEntryHandle handle = reinterpret_cast<SegmentEntryHandle>(entry);
type = handle->type();
length = handle->length();
blobPtr = handle->userData<char*>();
currentEntry = entry;
if (prefetching) {
nextEntry = (uintptr_t)currentEntry + sizeof(*currentEntry) +
currentEntry->length;
entry = (const SegmentEntry *)nextEntry;
prefetch(entry, 128);
}
}
/**
* Obtain the type of the SegmentEntry currently being iterated over.
* \return
* The type of the current entry.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
LogEntryType
SegmentIterator::getType() const
{
if (currentEntry == NULL)
throw SegmentIteratorException(HERE,
"getType after iteration complete");
return type;
}
/**
* Obtain the length of the SegmentEntry currently being iterated over.
* \return
* The length of the current entry in bytes.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
uint32_t
SegmentIterator::getLength() const
{
if (currentEntry == NULL)
throw SegmentIteratorException(HERE,
"getLength after iteration complete");
return length;
}
/**
* Obtain the length of the SegmentEntry currently being iterated over.
* \return
* The length of the current entry in bytes.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
uint32_t
SegmentIterator::getLengthInLog() const
{
return getLength() + downCast<uint32_t>(sizeof(SegmentEntry));
}
/**
* Obtain the LogTime corresponding to the append of this entry.
* \return
* The LogTime corresponding to this entry's append.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
LogTime
SegmentIterator::getLogTime() const
{
if (currentEntry == NULL)
throw SegmentIteratorException(HERE,
"getLogTime after iteration complete");
assert(getOffset() >= sizeof(SegmentEntry));
return LogTime(id, getOffset() - sizeof(SegmentEntry));
}
/**
* Obtain a SegmentEntryHandle for this iterator.
* \return
* The SegmentEntryHandle corresponding to the current entry in the
* iteration.
* \throw
* An exception is thrown if the iterator has no more entries.
*/
SegmentEntryHandle
SegmentIterator::getHandle() const
{
if (currentEntry == NULL)
throw SegmentIteratorException(HERE,
"getHandle after iteration complete");
return reinterpret_cast<SegmentEntryHandle>(currentEntry);
}
/**
* Obtain a const void* to the data associated with the current SegmentEntry.
* \return
* A const void* to the current data.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
const void *
SegmentIterator::getPointer() const
{
return get<void>();
}
/**
* Obtain the byte offset of the current SegmentEntry's data within the Segment
* being iterated over. Note that the data offset is not the SegmentEntry
* structure, but the typed data immediately following it.
* \return
* The byte offset of the current SegmentEntry's data.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
uint64_t
SegmentIterator::getOffset() const
{
if (currentEntry == NULL)
throw SegmentIteratorException(HERE,
"getOffset after iteration complete");
return (uintptr_t)blobPtr - (uintptr_t)baseAddress;
}
/**
* Generate the checksum for the current entry.
* \return
* The current checksum for the current entry. If the entry
* is corrupt, this may differ from what is stored.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
SegmentChecksum::ResultType
SegmentIterator::generateChecksum() const
{
return getHandle()->generateChecksum();
}
/**
* Determine whether the current entry's checksum is valid or not.
* \return
* true if the checksum is valid, else false.
* \throw SegmentIteratorException
* An exception is thrown if the iterator has no more entries.
*/
bool
SegmentIterator::isChecksumValid() const
{
return getHandle()->isChecksumValid();
}
/**
* Determine whether the checksum appended to the Segment this iterator
* is associated with is correct. If a checksum does not exist, an
* exception is thrown.
*
* TODO(Rumble): This probably belongs in Segment.cc, not here.
*
* \return
* true if the check is valid, else false.
* \throw SegmentIteratorException
* An exception is thrown if no checksum is present in the Segment.
*/
bool
SegmentIterator::isSegmentChecksumValid() const
{
// find the stored checksum and calculate what it should be as we go.
SegmentIterator i(baseAddress, segmentCapacity);
SegmentChecksum checksum;
while (!i.isDone()) {
if (i.getType() == LOG_ENTRY_TYPE_SEGFOOTER)
break;
SegmentChecksum::ResultType entryChecksum = i.generateChecksum();
checksum.update(&entryChecksum, sizeof(entryChecksum));
i.next();
}
if (i.isDone()) {
throw SegmentIteratorException(HERE,
"no checksum exists in the Segment");
}
const SegmentFooter *f =
reinterpret_cast<const SegmentFooter *>(i.getPointer());
return (f->checksum == checksum.getResult());
}
} // namespace
|
// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <qt/patentcoinamountfield.h>
#include <qt/patentcoinunits.h>
#include <qt/guiconstants.h>
#include <qt/guiutil.h>
#include <qt/qvaluecombobox.h>
#include <QApplication>
#include <QAbstractSpinBox>
#include <QHBoxLayout>
#include <QKeyEvent>
#include <QLineEdit>
/** QSpinBox that uses fixed-point numbers internally and uses our own
* formatting/parsing functions.
*/
class AmountSpinBox: public QAbstractSpinBox
{
Q_OBJECT
public:
explicit AmountSpinBox(QWidget *parent):
QAbstractSpinBox(parent)
{
setAlignment(Qt::AlignRight);
connect(lineEdit(), &QLineEdit::textEdited, this, &AmountSpinBox::valueChanged);
}
QValidator::State validate(QString &text, int &pos) const override
{
if(text.isEmpty())
return QValidator::Intermediate;
bool valid = false;
parse(text, &valid);
/* Make sure we return Intermediate so that fixup() is called on defocus */
return valid ? QValidator::Intermediate : QValidator::Invalid;
}
void fixup(QString &input) const override
{
bool valid;
CAmount val;
if (input.isEmpty() && !m_allow_empty) {
valid = true;
val = m_min_amount;
} else {
valid = false;
val = parse(input, &valid);
}
if (valid) {
val = qBound(m_min_amount, val, m_max_amount);
input = PatentcoinUnits::format(currentUnit, val, false, PatentcoinUnits::separatorAlways);
lineEdit()->setText(input);
}
}
CAmount value(bool *valid_out=nullptr) const
{
return parse(text(), valid_out);
}
void setValue(const CAmount& value)
{
lineEdit()->setText(PatentcoinUnits::format(currentUnit, value, false, PatentcoinUnits::separatorAlways));
Q_EMIT valueChanged();
}
void SetAllowEmpty(bool allow)
{
m_allow_empty = allow;
}
void SetMinValue(const CAmount& value)
{
m_min_amount = value;
}
void SetMaxValue(const CAmount& value)
{
m_max_amount = value;
}
void stepBy(int steps) override
{
bool valid = false;
CAmount val = value(&valid);
val = val + steps * singleStep;
val = qBound(m_min_amount, val, m_max_amount);
setValue(val);
}
void setDisplayUnit(int unit)
{
bool valid = false;
CAmount val = value(&valid);
currentUnit = unit;
lineEdit()->setPlaceholderText(PatentcoinUnits::format(currentUnit, m_min_amount, false, PatentcoinUnits::separatorAlways));
if(valid)
setValue(val);
else
clear();
}
void setSingleStep(const CAmount& step)
{
singleStep = step;
}
QSize minimumSizeHint() const override
{
if(cachedMinimumSizeHint.isEmpty())
{
ensurePolished();
const QFontMetrics fm(fontMetrics());
int h = lineEdit()->minimumSizeHint().height();
int w = GUIUtil::TextWidth(fm, PatentcoinUnits::format(PatentcoinUnits::PC, PatentcoinUnits::maxMoney(), false, PatentcoinUnits::separatorAlways));
w += 2; // cursor blinking space
QStyleOptionSpinBox opt;
initStyleOption(&opt);
QSize hint(w, h);
QSize extra(35, 6);
opt.rect.setSize(hint + extra);
extra += hint - style()->subControlRect(QStyle::CC_SpinBox, &opt,
QStyle::SC_SpinBoxEditField, this).size();
// get closer to final result by repeating the calculation
opt.rect.setSize(hint + extra);
extra += hint - style()->subControlRect(QStyle::CC_SpinBox, &opt,
QStyle::SC_SpinBoxEditField, this).size();
hint += extra;
hint.setHeight(h);
opt.rect = rect();
cachedMinimumSizeHint = style()->sizeFromContents(QStyle::CT_SpinBox, &opt, hint, this)
.expandedTo(QApplication::globalStrut());
}
return cachedMinimumSizeHint;
}
private:
int currentUnit{PatentcoinUnits::PC};
CAmount singleStep{CAmount(100000)}; // satoshis
mutable QSize cachedMinimumSizeHint;
bool m_allow_empty{true};
CAmount m_min_amount{CAmount(0)};
CAmount m_max_amount{PatentcoinUnits::maxMoney()};
/**
* Parse a string into a number of base monetary units and
* return validity.
* @note Must return 0 if !valid.
*/
CAmount parse(const QString &text, bool *valid_out=nullptr) const
{
CAmount val = 0;
bool valid = PatentcoinUnits::parse(currentUnit, text, &val);
if(valid)
{
if(val < 0 || val > PatentcoinUnits::maxMoney())
valid = false;
}
if(valid_out)
*valid_out = valid;
return valid ? val : 0;
}
protected:
bool event(QEvent *event) override
{
if (event->type() == QEvent::KeyPress || event->type() == QEvent::KeyRelease)
{
QKeyEvent *keyEvent = static_cast<QKeyEvent *>(event);
if (keyEvent->key() == Qt::Key_Comma)
{
// Translate a comma into a period
QKeyEvent periodKeyEvent(event->type(), Qt::Key_Period, keyEvent->modifiers(), ".", keyEvent->isAutoRepeat(), keyEvent->count());
return QAbstractSpinBox::event(&periodKeyEvent);
}
}
return QAbstractSpinBox::event(event);
}
StepEnabled stepEnabled() const override
{
if (isReadOnly()) // Disable steps when AmountSpinBox is read-only
return StepNone;
if (text().isEmpty()) // Allow step-up with empty field
return StepUpEnabled;
StepEnabled rv = StepNone;
bool valid = false;
CAmount val = value(&valid);
if (valid) {
if (val > m_min_amount)
rv |= StepDownEnabled;
if (val < m_max_amount)
rv |= StepUpEnabled;
}
return rv;
}
Q_SIGNALS:
void valueChanged();
};
#include <qt/patentcoinamountfield.moc>
PatentcoinAmountField::PatentcoinAmountField(QWidget *parent) :
QWidget(parent),
amount(nullptr)
{
amount = new AmountSpinBox(this);
amount->setLocale(QLocale::c());
amount->installEventFilter(this);
amount->setMaximumWidth(240);
QHBoxLayout *layout = new QHBoxLayout(this);
layout->addWidget(amount);
unit = new QValueComboBox(this);
unit->setModel(new PatentcoinUnits(this));
layout->addWidget(unit);
layout->addStretch(1);
layout->setContentsMargins(0,0,0,0);
setLayout(layout);
setFocusPolicy(Qt::TabFocus);
setFocusProxy(amount);
// If one if the widgets changes, the combined content changes as well
connect(amount, &AmountSpinBox::valueChanged, this, &PatentcoinAmountField::valueChanged);
connect(unit, static_cast<void (QComboBox::*)(int)>(&QComboBox::currentIndexChanged), this, &PatentcoinAmountField::unitChanged);
// Set default based on configuration
unitChanged(unit->currentIndex());
}
void PatentcoinAmountField::clear()
{
amount->clear();
unit->setCurrentIndex(0);
}
void PatentcoinAmountField::setEnabled(bool fEnabled)
{
amount->setEnabled(fEnabled);
unit->setEnabled(fEnabled);
}
bool PatentcoinAmountField::validate()
{
bool valid = false;
value(&valid);
setValid(valid);
return valid;
}
void PatentcoinAmountField::setValid(bool valid)
{
if (valid)
amount->setStyleSheet("");
else
amount->setStyleSheet(STYLE_INVALID);
}
bool PatentcoinAmountField::eventFilter(QObject *object, QEvent *event)
{
if (event->type() == QEvent::FocusIn)
{
// Clear invalid flag on focus
setValid(true);
}
return QWidget::eventFilter(object, event);
}
QWidget *PatentcoinAmountField::setupTabChain(QWidget *prev)
{
QWidget::setTabOrder(prev, amount);
QWidget::setTabOrder(amount, unit);
return unit;
}
CAmount PatentcoinAmountField::value(bool *valid_out) const
{
return amount->value(valid_out);
}
void PatentcoinAmountField::setValue(const CAmount& value)
{
amount->setValue(value);
}
void PatentcoinAmountField::SetAllowEmpty(bool allow)
{
amount->SetAllowEmpty(allow);
}
void PatentcoinAmountField::SetMinValue(const CAmount& value)
{
amount->SetMinValue(value);
}
void PatentcoinAmountField::SetMaxValue(const CAmount& value)
{
amount->SetMaxValue(value);
}
void PatentcoinAmountField::setReadOnly(bool fReadOnly)
{
amount->setReadOnly(fReadOnly);
}
void PatentcoinAmountField::unitChanged(int idx)
{
// Use description tooltip for current unit for the combobox
unit->setToolTip(unit->itemData(idx, Qt::ToolTipRole).toString());
// Determine new unit ID
int newUnit = unit->itemData(idx, PatentcoinUnits::UnitRole).toInt();
amount->setDisplayUnit(newUnit);
}
void PatentcoinAmountField::setDisplayUnit(int newUnit)
{
unit->setValue(newUnit);
}
void PatentcoinAmountField::setSingleStep(const CAmount& step)
{
amount->setSingleStep(step);
}
|
// Copyright (c) 2011-2013 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "governancedialog.h"
#include "ui_governancedialog.h"
#include "masternode.h"
#include "masternode-sync.h"
#include "masternodeconfig.h"
#include "masternodeman.h"
#include "governance.h"
#include "governance-vote.h"
#include "governance-classes.h"
#include "governance-validators.h"
#include "bitcoinunits.h"
#include "guiconstants.h"
#include "guiutil.h"
#include "messagesigner.h"
#include "optionsmodel.h"
#include "walletmodel.h"
#include "../governance.h"
#include "validation.h"
#include <QClipboard>
#include <QDrag>
#include <QMenu>
#include <QMimeData>
#include <QMouseEvent>
#include <QPixmap>
#if QT_VERSION < 0x050000
#include <QUrl>
#endif
#if defined(HAVE_CONFIG_H)
#include "config/securetag-config.h" /* for USE_QRCODE */
#endif
#ifdef USE_QRCODE
#include <qrencode.h>
#endif
GovernanceDialog::GovernanceDialog(QWidget *parent) :
QDialog(parent),
walletModel(0),
ui(new Ui::GovernanceDialog),
model(0)
{
ui->setupUi(this);
}
GovernanceDialog::~GovernanceDialog()
{
delete ui;
}
void GovernanceDialog::setModel(OptionsModel *model)
{
this->model = model;
if (model)
connect(model, SIGNAL(displayUnitChanged(int)), this, SLOT(update()));
// update the display unit if necessary
update();
}
void GovernanceDialog::setInfo(QString strWindowtitle, QString strQRCode, QString strTextInfo, QString strQRCodeTitle)
{
this->strWindowtitle = strWindowtitle;
this->strQRCode = strQRCode;
this->strTextInfo = strTextInfo;
this->strQRCodeTitle = strQRCodeTitle;
update();
}
void GovernanceDialog::setWalletModel(WalletModel *model)
{
this->walletModel = model;
}
void GovernanceDialog::update()
{
if(!model)
return;
setWindowTitle(strWindowtitle);
ui->outUri->setText(strTextInfo);
}
|
/*********************************************************************************
* File Name : /home/yuewu/work/sol/src/sol/pario/csv_reader.cc
* Created By : yuewu
* Creation Date : [2015-11-13 19:39]
* Last Modified : [2016-02-12 21:23]
* Description :
**********************************************************************************/
#include "sol/pario/csv_reader.h"
#include <cstdlib>
#include "sol/pario/numeric_parser.h"
namespace sol {
namespace pario {
CSVReader::CSVReader() : DataFileReader() { this->feat_dim_ = 0; }
int CSVReader::Open(const std::string& path, const char* mode) {
int ret = DataFileReader::Open(path);
if (ret == Status_OK) {
ret = this->LoadFeatDim();
}
this->is_good_ = ret == Status_OK ? true : false;
return ret;
}
void CSVReader::Rewind() {
DataFileReader::Rewind();
// read the first line for csv
this->file_reader_.ReadLine(this->read_buf_, this->read_buf_size_);
}
int CSVReader::Next(DataPoint& dst_data) {
int ret = this->file_reader_.ReadLine(this->read_buf_, this->read_buf_size_);
if (ret != Status_OK) return ret;
char* iter = this->read_buf_, *endptr = nullptr;
if (*iter == '\0') {
fprintf(stderr, "incorrect line\n");
return Status_Invalid_Format;
}
dst_data.Clear();
// 1. parse label
dst_data.set_label(label_t(NumericParser::ParseInt(iter, endptr)));
if (endptr == iter) {
fprintf(stderr, "parse label failed.\n");
this->is_good_ = false;
return Status_Invalid_Format;
}
iter = endptr;
// 2. parse features
dst_data.Reserve(this->feat_dim_);
index_t index = 1;
while (*iter != '\0') {
if (*iter != ',') {
fprintf(stderr, "incorrect input file (%s)!\n", iter);
this->is_good_ = false;
return Status_Invalid_Format;
}
++iter;
real_t feat = NumericParser::ParseFloat(iter, endptr);
if (endptr == iter) {
fprintf(stderr, "parse feature value (%s) failed!\n", iter);
this->is_good_ = false;
return Status_Invalid_Format;
}
iter = endptr;
if (feat != 0) {
dst_data.AddNewFeat(index, feat);
}
++index;
}
return ret;
}
int CSVReader::LoadFeatDim() {
int ret = this->file_reader_.ReadLine(this->read_buf_, this->read_buf_size_);
if (ret != Status_OK) return ret;
char* p = this->read_buf_;
this->feat_dim_ = 0;
while (*p != '\0') {
if (*p++ == ',') ++this->feat_dim_;
}
++this->feat_dim_;
return ret;
}
RegisterDataReader(CSVReader, "csv", "csv format data reader");
} // namespace pario
} // namespace sol
|
/*-
* Copyright (c) 2016 Landon Fuller <landonf@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <string>
#include "ast_match.hh"
using namespace std;
using namespace clang;
string
ASTMatchUtil::describe (const SourceLocation &loc) {
string desc;
unsigned line, column;
line = 0;
column = 0;
if (loc.isInvalid())
return ("<loc-invalid>");
else if (loc.isMacroID()) {
if (_srcManager.isMacroArgExpansion(loc))
return ("<macro-arg>");
else
return ("<macro-body>");
}
auto locInfo = _srcManager.getDecomposedLoc(loc);
auto fid = locInfo.first;
auto fileOffset = locInfo.second;
if (fid.isInvalid())
return ("<fid-invalid>");
auto file = _srcManager.getFileEntryForID(fid);
if (file == NULL)
return ("<null-file>");
line = _srcManager.getLineNumber(fid, fileOffset);
column = _srcManager.getColumnNumber(fid, fileOffset);
desc = file->getName();
desc += ":" + to_string(line) + ":" + to_string(column);
return (desc);
}
void
ASTMatchUtil::dump (const SourceLocation &loc)
{
auto &os = llvm::errs();
os << describe(loc) << " (";
switch (getLocationType(loc)) {
case LOC_INVALID:
os << "INVALID";
break;
case LOC_MACRO:
os << "MACRO";
break;
case LOC_EXTERNAL:
os << "EXTERNAL";
break;
case LOC_SOURCE:
os << "SOURCE";
break;
case LOC_HOST:
os << "LOC_HOST";
break;
}
os << ")\n";
}
void
ASTMatchUtil::dumpTree (const SourceLocation &loc, std::string::size_type indent)
{
auto &os = llvm::errs();
auto istr = string(indent*2, ' ');
os << istr << describe(loc) << "\n";
auto nistr = string(indent+1*2, ' ');
if (loc.isMacroID()) {
SourceLocation next;
if (_srcManager.isMacroArgExpansion(loc)) {
next = _srcManager.getImmediateSpellingLoc(loc);
} else if (_srcManager.isMacroBodyExpansion(loc)) {
next = _srcManager.getImmediateExpansionRange(loc).first;
} else {
return;
}
dumpTree(next, indent+1);
}
}
bool
ASTMatchUtil::locMatches (const clang::SourceLocation &loc, const PathPattern &p)
{
if (!loc.isValid())
return (false);
auto fid = _srcManager.getFileID(loc);
if (fid.isInvalid())
return (false);
auto fentry = _srcManager.getFileEntryForID(fid);
if (fentry == NULL)
return (false);
return (p.match(fentry->getName()));
}
ASTMatchUtil::loc_type
ASTMatchUtil::getLocationType (clang::SourceLocation loc)
{
if (loc.isInvalid())
return (LOC_INVALID);
else if (loc.isMacroID())
return (LOC_MACRO);
auto fid = _srcManager.getFileID(loc);
if (fid.isInvalid())
return (LOC_EXTERNAL);
auto fentry = _srcManager.getFileEntryForID(fid);
if (fentry == NULL)
return (LOC_EXTERNAL);
/* Check project path matching */
Path path(fentry->getName());
if (_project->isDefinitionPath(path))
return (LOC_HOST);
if (_project->isReferencePath(path))
return (LOC_SOURCE);
return (LOC_EXTERNAL);
}
/**
* Return true if @p usedAt falls within defined source paths, and @p definedAt
* falls within defined host paths.
*/
bool
ASTMatchUtil::isHostRef (SourceLocation usedAt, SourceLocation definedAt)
{
if (getLocationType(usedAt) != LOC_SOURCE)
return (false);
if (getLocationType(definedAt) != LOC_HOST)
return (false);
return (true);
}
|
/// \page gepetto_viewer_corba_introduction Gepetto Viewer server
///
/// This package implements a corba server embedding gepetto-viewer library.
///
/// The idl interface of the server is defined by gepetto::corbaserver::GraphicalInterface.
///
/// To use the graphical interface via python,
/// \li start executable
/// \code{bash} gepetto-gui \endcode.
/// \li in a python terminal, create a client
/// \code{py}
/// from gepetto.corbaserver import Client
/// client = Client ()
/// # open a new window
/// client.gui.createWindow ("w")
/// # refer to the idl interface to control gepetto-viewer.
/// \endcode
|
// Copyright 1998-2015 Epic Games, Inc. All Rights Reserved.
#include "UnrealEd.h"
#include "FoliageEditModule.h"
#include "Runtime/AssetRegistry/Public/AssetRegistryModule.h"
const FName FoliageEditAppIdentifier = FName(TEXT("FoliageEdApp"));
#include "FoliageEdMode.h"
#include "PropertyEditing.h"
#include "FoliageTypeDetails.h"
#include "ProceduralFoliageComponent.h"
#include "ProceduralFoliageComponentVisualizer.h"
#include "ProceduralFoliageComponentDetails.h"
#include "ActorFactoryProceduralFoliage.h"
#include "ComponentVisualizer.h"
#include "ProceduralFoliageVolume.h"
#include "ProceduralFoliageBlockingVolume.h"
#include "ProceduralFoliageComponent.h"
#include "FoliageTypeObjectCustomization.h"
#include "FoliageType_InstancedStaticMesh.h"
#include "FoliageType_ISMThumbnailRenderer.h"
/**
* Foliage Edit Mode module
*/
class FFoliageEditModule : public IFoliageEditModule
{
public:
/**
* Called right after the module DLL has been loaded and the module object has been created
*/
virtual void StartupModule() override
{
FEditorModeRegistry::Get().RegisterMode<FEdModeFoliage>(
FBuiltinEditorModes::EM_Foliage,
NSLOCTEXT("EditorModes", "FoliageMode", "Foliage"),
FSlateIcon(FEditorStyle::GetStyleSetName(), "LevelEditor.FoliageMode", "LevelEditor.FoliageMode.Small"),
true, 400
);
// Register the details customizer
FPropertyEditorModule& PropertyModule = FModuleManager::LoadModuleChecked<FPropertyEditorModule>("PropertyEditor");
PropertyModule.RegisterCustomClassLayout("FoliageType", FOnGetDetailCustomizationInstance::CreateStatic(&FFoliageTypeDetails::MakeInstance));
PropertyModule.RegisterCustomPropertyTypeLayout("FoliageTypeObject", FOnGetPropertyTypeCustomizationInstance::CreateStatic(&FFoliageTypeObjectCustomization::MakeInstance));
GUnrealEd->RegisterComponentVisualizer(UProceduralFoliageComponent::StaticClass()->GetFName(), MakeShareable(new FProceduralFoliageComponentVisualizer));
FPropertyEditorModule& PropertyEditor = FModuleManager::LoadModuleChecked<FPropertyEditorModule>("PropertyEditor");
PropertyEditor.RegisterCustomClassLayout("ProceduralFoliageComponent", FOnGetDetailCustomizationInstance::CreateStatic(&FProceduralFoliageComponentDetails::MakeInstance));
// Actor Factories
auto ProceduralFoliageVolumeFactory = NewObject<UActorFactoryProceduralFoliage>();
GEditor->ActorFactories.Add(ProceduralFoliageVolumeFactory);
#if WITH_EDITOR
// Volume placeability
if (!GetDefault<UEditorExperimentalSettings>()->bProceduralFoliage)
{
AProceduralFoliageVolume::StaticClass()->ClassFlags |= CLASS_NotPlaceable;
AProceduralFoliageBlockingVolume::StaticClass()->ClassFlags |= CLASS_NotPlaceable;
}
SubscribeEvents();
#endif
// Register thumbnail renderer
UThumbnailManager::Get().RegisterCustomRenderer(UFoliageType_InstancedStaticMesh::StaticClass(), UFoliageType_ISMThumbnailRenderer::StaticClass());
}
/**
* Called before the module is unloaded, right before the module object is destroyed.
*/
virtual void ShutdownModule() override
{
FEditorModeRegistry::Get().UnregisterMode(FBuiltinEditorModes::EM_Foliage);
if (!UObjectInitialized())
{
return;
}
#if WITH_EDITOR
UnsubscribeEvents();
#endif
// Unregister the details customization
if (FModuleManager::Get().IsModuleLoaded("PropertyEditor"))
{
FPropertyEditorModule& PropertyModule = FModuleManager::LoadModuleChecked<FPropertyEditorModule>("PropertyEditor");
PropertyModule.UnregisterCustomClassLayout("FoliageType");
PropertyModule.NotifyCustomizationModuleChanged();
}
}
#if WITH_EDITOR
void OnLevelActorDeleted(AActor* Actor)
{
if (AProceduralFoliageVolume* ProceduralFoliageVolume = Cast<AProceduralFoliageVolume>(Actor))
{
if (UProceduralFoliageComponent* ProceduralComponent = ProceduralFoliageVolume->ProceduralComponent)
{
ProceduralComponent->RemoveProceduralContent();
}
}
}
void NotifyAssetRemoved(const FAssetData& AssetInfo)
{
// Go through all FoliageActors in the world and delete
for(TObjectIterator<AInstancedFoliageActor> It; It; ++It)
{
AInstancedFoliageActor* IFA = *It;
IFA->CleanupDeletedFoliageType();
}
}
void SubscribeEvents()
{
GEngine->OnLevelActorDeleted().Remove(OnLevelActorDeletedDelegateHandle);
OnLevelActorDeletedDelegateHandle = GEngine->OnLevelActorDeleted().AddRaw(this, &FFoliageEditModule::OnLevelActorDeleted);
FAssetRegistryModule& AssetRegistryModule = FModuleManager::GetModuleChecked<FAssetRegistryModule>(TEXT("AssetRegistry"));
AssetRegistryModule.Get().OnAssetRemoved().AddRaw(this, &FFoliageEditModule::NotifyAssetRemoved);
auto ExperimentalSettings = GetMutableDefault<UEditorExperimentalSettings>();
ExperimentalSettings->OnSettingChanged().Remove(OnExperimentalSettingChangedDelegateHandle);
OnExperimentalSettingChangedDelegateHandle = ExperimentalSettings->OnSettingChanged().AddRaw(this, &FFoliageEditModule::HandleExperimentalSettingChanged);
}
void UnsubscribeEvents()
{
GEngine->OnLevelActorDeleted().Remove(OnLevelActorDeletedDelegateHandle);
GetMutableDefault<UEditorExperimentalSettings>()->OnSettingChanged().Remove(OnExperimentalSettingChangedDelegateHandle);
if (FModuleManager::Get().IsModuleLoaded(TEXT("AssetRegistry")))
{
FAssetRegistryModule& AssetRegistryModule = FModuleManager::GetModuleChecked<FAssetRegistryModule>(TEXT("AssetRegistry"));
AssetRegistryModule.Get().OnAssetRemoved().RemoveAll(this);
}
}
void HandleExperimentalSettingChanged(FName PropertyName)
{
if (GetDefault<UEditorExperimentalSettings>()->bProceduralFoliage)
{
AProceduralFoliageVolume::StaticClass()->ClassFlags &= ~CLASS_NotPlaceable;
AProceduralFoliageBlockingVolume::StaticClass()->ClassFlags &= ~CLASS_NotPlaceable;
}
else
{
AProceduralFoliageVolume::StaticClass()->ClassFlags |= CLASS_NotPlaceable;
AProceduralFoliageBlockingVolume::StaticClass()->ClassFlags |= CLASS_NotPlaceable;
}
}
FDelegateHandle OnLevelActorDeletedDelegateHandle;
FDelegateHandle OnExperimentalSettingChangedDelegateHandle;
#endif
};
IMPLEMENT_MODULE( FFoliageEditModule, FoliageEdit );
|
#include <stdint.h>
#ifndef WEBUI_HPP
#define WEBUI_HPP
extern uint16_t webLabelLoad;
extern uint16_t webLabelPgnStatus;
extern uint16_t webButtonReboot;
extern uint16_t webTabHardware;
extern uint16_t webTabGPS;
extern uint16_t webTabIMU;
extern uint16_t webTabSteeringAngle;
extern int16_t webTabSteeringActuator;
extern uint16_t webTabUturn;
extern uint16_t webTabWorkSteerSwitch;
void webInitCore();
void webStart();
void webChangeNeedsReboot();
#endif
|
#include <iostream>
using namespace std;
namespace my_space {
void helloInSpace() {
cout << "Hello in space!" << endl;
}
}
namespace my_space {
void helloInSpace2() {
cout << "Hello in space!" << endl;
}
}
int main() {
my_space::helloInSpace();
my_space::helloInSpace2();
return 0;
}
|
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <vector>
#include "inference_engine.hpp"
namespace FuncTestUtils {
namespace TestModel {
/**
* @brief generates IR files (XML and BIN files) with the test model.
* Passed reference vector is filled with CNN layers to validate after the network reading.
* @param modelPath used to serialize the generated network
* @param weightsPath used to serialize the generated weights
* @param netPrc precision of the generated network
* @param inputDims dims on the input layer of the generated network
*/
void generateTestModel(const std::string &modelPath,
const std::string &weightsPath,
const InferenceEngine::Precision &netPrc = InferenceEngine::Precision::FP32,
const InferenceEngine::SizeVector &inputDims = {1, 3, 227, 227});
const char incorrect_input_name[] = "incorrect_input_name";
} // namespace TestModel
} // namespace FuncTestUtils
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2015 Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/vm/jit/alias-class.h"
#include <limits>
#include <algorithm>
#include <bitset>
#include <folly/Hash.h>
#include <folly/Format.h>
#include "hphp/util/safe-cast.h"
#include "hphp/runtime/base/string-data.h"
#include "hphp/runtime/vm/jit/ir-instruction.h"
#include "hphp/runtime/vm/jit/ssa-tmp.h"
#include "hphp/runtime/vm/jit/analysis.h"
namespace HPHP { namespace jit {
namespace {
//////////////////////////////////////////////////////////////////////
// Helper for returning the lowest index of an AStack range, non inclusive.
// I.e. a AStack class affects stack slots in [sp+offset,lowest_offset).
int32_t lowest_offset(AStack stk) {
auto const off = int64_t{stk.offset};
auto const sz = int64_t{stk.size};
auto const low = off - sz;
auto const i32min = int64_t{std::numeric_limits<int32_t>::min()};
return safe_cast<int32_t>(std::max(low, i32min));
}
std::string bit_str(AliasClass::rep bits, AliasClass::rep skip) {
using A = AliasClass;
switch (bits) {
case A::BEmpty: return "Empty";
case A::BHeap: return "Heap";
case A::BUnknownTV: return "UnkTV";
case A::BUnknown: return "Unk";
case A::BElem: return "Elem";
case A::BFrame: break;
case A::BIterPos: break;
case A::BIterBase: break;
case A::BProp: break;
case A::BElemI: break;
case A::BElemS: break;
case A::BStack: break;
case A::BMIState: break;
case A::BRef: break;
}
auto ret = std::string{};
auto const bset = std::bitset<32>{bits};
for (auto i = 0; i < 32; ++i) {
if (!bset.test(i)) continue;
if ((1ul << i) & skip) continue;
switch (1ul << i) {
case A::BEmpty:
case A::BHeap:
case A::BUnknown:
case A::BUnknownTV:
case A::BElem:
always_assert(0);
case A::BFrame: ret += "Fr"; break;
case A::BIterPos: ret += "ItP"; break;
case A::BIterBase: ret += "ItB"; break;
case A::BProp: ret += "Pr"; break;
case A::BElemI: ret += "Ei"; break;
case A::BElemS: ret += "Es"; break;
case A::BStack: ret += "St"; break;
case A::BMIState: ret += "Mis"; break;
case A::BRef: ret += "Ref"; break;
}
}
return ret;
}
//////////////////////////////////////////////////////////////////////
template<class T>
size_t framelike_hash(size_t hash, T t) {
return folly::hash::hash_combine(hash, t.fp, t.id);
}
template<class T>
void framelike_checkInvariants(T t) {
assertx(t.fp->type() <= TFramePtr);
}
template<class T, class U>
bool framelike_equal(T a, U b) {
return a.fp == b.fp && a.id == b.id;
}
//////////////////////////////////////////////////////////////////////
}
AStack::AStack(SSATmp* base, int32_t o, int32_t s)
: offset(o), size(s)
{
// Always canonicalize to the outermost frame pointer.
if (base->isA(TStkPtr)) {
auto const defSP = base->inst();
always_assert_flog(defSP->is(DefSP),
"unexpected StkPtr: {}\n", base->toString());
offset -= defSP->extra<DefSP>()->offset.offset;
return;
}
assertx(base->isA(TFramePtr));
auto const defInlineFP = base->inst();
if (defInlineFP->is(DefInlineFP)) {
auto const sp = defInlineFP->src(0)->inst();
offset += defInlineFP->extra<DefInlineFP>()->spOffset.offset;
offset -= sp->extra<DefSP>()->offset.offset;
always_assert_flog(sp->src(0)->inst()->is(DefFP),
"failed to canonicalize to outermost FramePtr: {}\n",
sp->src(0)->toString());
}
}
//////////////////////////////////////////////////////////////////////
size_t AliasClass::Hash::operator()(AliasClass acls) const {
auto const hash = folly::hash::twang_mix64(
acls.m_bits | static_cast<uint32_t>(acls.m_stag)
);
switch (acls.m_stag) {
case STag::None:
return hash;
case STag::Frame: return framelike_hash(hash, acls.m_frame);
case STag::IterPos: return framelike_hash(hash, acls.m_iterPos);
case STag::IterBase: return framelike_hash(hash, acls.m_iterBase);
case STag::IterBoth: return framelike_hash(hash, acls.m_iterBoth);
case STag::Prop:
return folly::hash::hash_combine(hash,
acls.m_prop.obj,
acls.m_prop.offset);
case STag::ElemI:
return folly::hash::hash_combine(hash,
acls.m_elemI.arr,
acls.m_elemI.idx);
case STag::ElemS:
return folly::hash::hash_combine(hash,
acls.m_elemS.arr,
acls.m_elemS.key->hash());
case STag::Stack:
return folly::hash::hash_combine(hash,
acls.m_stack.offset,
acls.m_stack.size);
case STag::MIState:
return folly::hash::hash_combine(hash, acls.m_mis.offset);
case STag::Ref:
return folly::hash::hash_combine(hash, acls.m_ref.boxed);
}
not_reached();
}
//////////////////////////////////////////////////////////////////////
#define X(What, what) \
AliasClass::AliasClass(A##What x) \
: m_bits(B##What) \
, m_stag(STag::What) \
, m_##what(x) \
{ \
assertx(checkInvariants()); \
} \
\
folly::Optional<A##What> AliasClass::is_##what() const { \
if (*this <= A##What##Any) return what(); \
return folly::none; \
}
X(Frame, frame)
X(IterPos, iterPos)
X(IterBase, iterBase)
X(Prop, prop)
X(ElemI, elemI)
X(ElemS, elemS)
X(Stack, stack)
X(MIState, mis)
X(Ref, ref)
#undef X
#define X(What, what) \
folly::Optional<A##What> AliasClass::what() const { \
if (m_stag == STag::What) return m_##what; \
return folly::none; \
}
X(Frame, frame)
X(Prop, prop)
X(ElemI, elemI)
X(ElemS, elemS)
X(Stack, stack)
X(MIState, mis)
X(Ref, ref)
#undef X
#define X(What, what) \
folly::Optional<A##What> AliasClass::what() const { \
if (m_stag == STag::What) return m_##what; \
if (m_stag == STag::IterBoth) { \
auto const ui = asUIter(); \
assertx(ui.hasValue()); \
return A##What { ui->fp, ui->id }; \
} \
return folly::none; \
}
X(IterPos, iterPos)
X(IterBase, iterBase)
#undef X
AliasClass::rep AliasClass::stagBits(STag tag) {
switch (tag) {
case STag::None: return BEmpty;
case STag::Frame: return BFrame;
case STag::IterPos: return BIterPos;
case STag::IterBase: return BIterBase;
case STag::Prop: return BProp;
case STag::ElemI: return BElemI;
case STag::ElemS: return BElemS;
case STag::Stack: return BStack;
case STag::MIState: return BMIState;
case STag::Ref: return BRef;
case STag::IterBoth: return static_cast<rep>(BIterPos | BIterBase);
}
always_assert(0);
}
bool AliasClass::checkInvariants() const {
switch (m_stag) {
case STag::None: break;
case STag::Frame: framelike_checkInvariants(m_frame); break;
case STag::IterPos: framelike_checkInvariants(m_iterPos); break;
case STag::IterBase: framelike_checkInvariants(m_iterBase); break;
case STag::IterBoth: framelike_checkInvariants(m_iterBoth); break;
case STag::Prop: break;
case STag::ElemI: break;
case STag::Stack:
assertx(m_stack.size > 0);
break;
case STag::ElemS:
assertx(m_elemS.key->isStatic());
break;
case STag::MIState:
break;
case STag::Ref:
assertx(m_ref.boxed->isA(TBoxedCell));
break;
}
assertx(m_bits & stagBits(m_stag));
return true;
}
bool AliasClass::equivData(AliasClass o) const {
assertx(m_stag == o.m_stag);
switch (m_stag) {
case STag::None: return true;
case STag::Frame: return framelike_equal(m_frame, o.m_frame);
case STag::IterPos: return framelike_equal(m_iterPos, o.m_iterPos);
case STag::IterBase: return framelike_equal(m_iterBase, o.m_iterBase);
case STag::IterBoth: return framelike_equal(m_iterBoth, o.m_iterBoth);
case STag::Prop: return m_prop.obj == o.m_prop.obj &&
m_prop.offset == o.m_prop.offset;
case STag::ElemI: return m_elemI.arr == o.m_elemI.arr &&
m_elemI.idx == o.m_elemI.idx;
case STag::ElemS: return m_elemS.arr == o.m_elemS.arr &&
m_elemS.key == o.m_elemS.key;
case STag::Stack: return m_stack.offset == o.m_stack.offset &&
m_stack.size == o.m_stack.size;
case STag::MIState: return m_mis.offset == o.m_mis.offset;
case STag::Ref: return m_ref.boxed == o.m_ref.boxed;
}
not_reached();
}
bool AliasClass::operator==(AliasClass o) const {
return m_bits == o.m_bits &&
m_stag == o.m_stag &&
equivData(o);
}
AliasClass AliasClass::unionData(rep newBits, AliasClass a, AliasClass b) {
assertx(a.m_stag == b.m_stag);
switch (a.m_stag) {
case STag::None:
break;
case STag::Frame:
case STag::IterPos:
case STag::IterBase:
case STag::Prop:
case STag::ElemI:
case STag::ElemS:
case STag::MIState:
case STag::Ref:
case STag::IterBoth:
assertx(!a.equivData(b));
break;
case STag::Stack:
{
auto const stkA = a.m_stack;
auto const stkB = b.m_stack;
// Make a stack range big enough to contain both of them.
auto const highest = std::max(stkA.offset, stkB.offset);
auto const lowest = std::min(lowest_offset(stkA), lowest_offset(stkB));
auto const newStack = AStack { highest, highest - lowest };
auto ret = AliasClass{newBits};
new (&ret.m_stack) AStack(newStack);
ret.m_stag = STag::Stack;
assertx(ret.checkInvariants());
assertx(a <= ret && b <= ret);
return ret;
}
}
return AliasClass{newBits};
}
folly::Optional<AliasClass>
AliasClass::precise_diffSTag_unionData(rep newBits,
AliasClass a,
AliasClass b) {
assertx(a.m_stag != b.m_stag &&
a.m_stag != STag::None &&
b.m_stag != STag::None);
// The only precise union with different stags we support so far is iterator
// stuff. If that works, return it, otherwise none.
auto const u1 = a.asUIter();
auto const u2 = b.asUIter();
if (u1 && u2 && framelike_equal(*u1, *u2)) {
auto ret = AliasClass{newBits};
new (&ret.m_iterBoth) UIterBoth(*u1);
ret.m_stag = STag::IterBoth;
return ret;
}
return folly::none;
}
folly::Optional<AliasClass> AliasClass::precise_union(AliasClass o) const {
if (o <= *this) return *this;
if (*this <= o) return o;
auto const unioned = static_cast<rep>(m_bits | o.m_bits);
// For a precise union, we need to make sure the returned class is not any
// bigger than it should be. This means we can't deal with situations where
// we have different stags, and right now we also don't try to deal with
// situations that have the same stag in a combinable way. (E.g. two
// adjacent AStack ranges.)
auto const stag1 = m_stag;
auto const stag2 = o.m_stag;
if (stag1 == STag::None && stag2 == STag::None) {
return AliasClass{unioned};
}
if (stag1 == STag::None && stag2 != STag::None) {
return o.precise_union(*this); // flip args
}
assertx(stag1 != STag::None);
if (stag2 != STag::None) {
if (stag1 == stag2) {
// We would've had o <= *this or vice versa if there was an easy precise
// union.
return folly::none;
}
return precise_diffSTag_unionData(unioned, *this, o);
}
if (o.m_bits & stagBits(stag1)) return folly::none;
// Keep the data and stag from this, but change its bits.
auto ret = *this;
ret.m_bits = unioned;
assertx(ret.m_stag == stag1);
return ret;
}
AliasClass AliasClass::operator|(AliasClass o) const {
if (auto const c = precise_union(o)) return *c;
auto const unioned = static_cast<rep>(m_bits | o.m_bits);
// If they have the same stag, try to merge them with unionData.
auto stag1 = m_stag;
auto stag2 = o.m_stag;
if (stag1 == stag2) return unionData(unioned, *this, o);
// If one of the alias classes have a non-None stag, we can only keep it if
// the other doesn't have any of the corresponding bits set.
if (stag1 != STag::None && (o.m_bits & stagBits(stag1))) stag1 = STag::None;
if (stag2 != STag::None && (m_bits & stagBits(stag2))) stag2 = STag::None;
auto ret = AliasClass{unioned};
if (stag1 == stag2) return ret; // both None.
/*
* Union operations are guaranteed to be commutative, so if there are two
* non-None stags, we have to consistently choose between them if we're going
* to keep one. For now we keep the one with a smaller `rep' value, instead
* of discarding both.
*
* We can also assume we're not in any of the situations that
* precise_diffSTag_unionData supported, and that neither *this nor `o' are
* subtypes of each other, because we already tried both of those things.
*
* Note also that we might be in a situation where one of the STags is
* representing a union of more primitive STags. For example we could have
* an IterPos and an IterBoth. But for this case, we've already thrown away
* the overlap by setting stags to None above.
*/
const AliasClass* chosen = &o;
auto const stag = [&] () -> STag {
if (stag1 != STag::None) {
if (stag2 == STag::None || stagBits(stag1) < stagBits(stag2)) {
chosen = this;
return stag1;
}
}
return stag2;
}();
switch (stag) {
case STag::None:
break;
case STag::IterPos: new (&ret.m_iterPos) AIterPos(chosen->m_iterPos); break;
case STag::IterBase: new (&ret.m_iterBase) AIterBase(chosen->m_iterBase);
break;
case STag::IterBoth: new (&ret.m_iterBoth) UIterBoth(chosen->m_iterBoth);
break;
case STag::Frame: new (&ret.m_frame) AFrame(chosen->m_frame); break;
case STag::Prop: new (&ret.m_prop) AProp(chosen->m_prop); break;
case STag::ElemI: new (&ret.m_elemI) AElemI(chosen->m_elemI); break;
case STag::ElemS: new (&ret.m_elemS) AElemS(chosen->m_elemS); break;
case STag::Stack: new (&ret.m_stack) AStack(chosen->m_stack); break;
case STag::MIState: new (&ret.m_mis) AMIState(chosen->m_mis); break;
case STag::Ref: new (&ret.m_ref) ARef(chosen->m_ref); break;
}
ret.m_stag = stag;
return ret;
}
bool AliasClass::subclassData(AliasClass o) const {
assertx(m_stag == o.m_stag);
switch (m_stag) {
case STag::None:
case STag::Frame:
case STag::IterPos:
case STag::IterBase:
case STag::IterBoth:
case STag::Prop:
case STag::ElemI:
case STag::ElemS:
case STag::MIState:
case STag::Ref:
return equivData(o);
case STag::Stack:
return m_stack.offset <= o.m_stack.offset &&
lowest_offset(m_stack) >= lowest_offset(o.m_stack);
}
not_reached();
}
folly::Optional<AliasClass::UIterBoth> AliasClass::asUIter() const {
switch (m_stag) {
case STag::None:
case STag::Frame:
case STag::Prop:
case STag::ElemI:
case STag::ElemS:
case STag::MIState:
case STag::Ref:
case STag::Stack:
return folly::none;
case STag::IterPos: return UIterBoth { m_iterPos.fp, m_iterPos.id };
case STag::IterBase: return UIterBoth { m_iterBase.fp, m_iterBase.id };
case STag::IterBoth: return m_iterBoth;
break;
}
not_reached();
}
bool AliasClass::refersToSameIterHelper(AliasClass o) const {
assertx(stagBits(m_stag) & stagBits(o.m_stag));
assertx(m_stag == STag::IterBoth || o.m_stag == STag::IterBoth);
return framelike_equal(*asUIter(), *o.asUIter());
}
/*
* Should return true if this's specialized data is entirely contained in o's
* specialized data, for only the portion of the data relevant for
* "relevant_bits", with the precondition that m_stag != o.m_stag.
*
* We know the only case this can happen is if at least one of them is an
* iterator, and they are only going to have a subclass relationship inside of
* relevant_bits only if they refer to the same iterator.
*/
bool AliasClass::diffSTagSubclassData(rep relevant_bits, AliasClass o) const {
return refersToSameIterHelper(o);
}
/*
* This function conceptually should check that the portions of the specialized
* information on the intersection of the stagBits for the two classes may
* overlap.
*
* Again since we only deal with iterators, the intersection must be iterator
* information, and we know it only intersects if they are the same iterator
* id.
*/
bool AliasClass::diffSTagMaybeData(rep relevant_bits, AliasClass o) const {
return refersToSameIterHelper(o);
}
bool AliasClass::operator<=(AliasClass o) const {
if (m_bits == BEmpty) return true;
auto const isect = static_cast<rep>(m_bits & o.m_bits);
if (isect != m_bits) return false;
// If they have the same specialized tag, then since isect is equal to
// m_bits, the stagBits must be part of the intersection or be BEmpty. This
// means they can only be in a subclass relationship if that specialized data
// is.
if (m_stag == o.m_stag) return subclassData(o);
auto const sbits = stagBits(m_stag);
auto const osbits = stagBits(o.m_stag);
/*
* If the stag bits for the two classes overlap, but the stags were
* different, we're dealing with a union-style STag (like IterBoth).
*/
if (auto const inner_bits = (sbits & osbits /*& isect is redundant*/)) {
return diffSTagSubclassData(static_cast<rep>(inner_bits), o);
}
/*
* Different stags, with non-overlapping bits. The sbits must be part of the
* intersection (or be BEmpty), since isect == m_bits above, but osbits may
* or may not be. So this breaks down into the following cases:
*
* If the osbits are part of the intersection, then this can't be a subclass
* of `o', because this has only generic information for that bit but it is
* set in isect. If the osbits was BEmpty, osbits & isect is zero, which
* avoids this case.
*
* The remaining situations are that m_stag is STag::None, in which case it
* is a subclass since osbits wasn't in the intersection. Or that m_stag has
* a bit that is in the isect (since m_bits == isect), and that bit is set in
* o.m_bits. In either case this is a subclass, so we can just return true.
*/
if (osbits & isect) return false;
return true;
}
bool AliasClass::maybeData(AliasClass o) const {
assertx(m_stag == o.m_stag);
switch (m_stag) {
case STag::None:
not_reached(); // handled outside
case STag::Frame: return framelike_equal(m_frame, o.m_frame);
case STag::IterPos: return framelike_equal(m_iterPos, o.m_iterPos);
case STag::IterBase: return framelike_equal(m_iterBase, o.m_iterBase);
case STag::IterBoth: return framelike_equal(m_iterBoth, o.m_iterBoth);
case STag::Prop:
/*
* We can't tell if two objects could be the same from here in general, but
* we can rule out simple cases based on type. The props can't be the same
* if they are at different offsets, though.
*
* For now we're ignoring the type information, and only using offset.
* TODO(#2939547) TODO(#2884927)
*/
if (m_prop.offset != o.m_prop.offset) return false;
return true;
/*
* Two arrays can generally be the same even if they aren't the same SSATmp,
* because we might have loaded it from more than one place, and we have
* linear chains in array modification instructions.
*/
case STag::ElemI:
if (m_elemI.idx != o.m_elemI.idx) return false;
return true;
case STag::ElemS:
if (m_elemS.key != o.m_elemS.key) return false;
return true;
case STag::Stack:
{
// True if there's a non-empty intersection of the two stack slot
// intervals.
auto const lowest_upper = std::min(m_stack.offset, o.m_stack.offset);
auto const highest_lower = std::max(
lowest_offset(m_stack),
lowest_offset(o.m_stack)
);
return lowest_upper > highest_lower;
}
case STag::MIState:
return m_mis.offset == o.m_mis.offset;
/*
* Two boxed cells can generally refer to the same RefData.
*/
case STag::Ref:
return true;
}
not_reached();
}
/*
* TODO(#2884927): we probably need to be aware of possibly-integer-like string
* keys here before we can start using ElemS for anything. (Or else ensure
* that we never use ElemS with an integer-like string.)
*/
bool AliasClass::maybe(AliasClass o) const {
auto const isect = static_cast<rep>(m_bits & o.m_bits);
if (isect == 0) return false;
auto const sbits = stagBits(m_stag);
auto const osbits = stagBits(o.m_stag);
/*
* If a shared portion of the specialized information is in the intersection,
* and the intersection is otherwise empty, we can just check if the data
* associated with the intersecting bits is in a maybe relationship.
*/
if (auto const inner_bits = (sbits & osbits & isect)) {
if ((inner_bits & isect) == isect) {
if (m_stag == o.m_stag) return maybeData(o);
return diffSTagMaybeData(static_cast<rep>(inner_bits), o);
}
}
// Otherwise, the intersection is non-empty and has no specialized
// information inside the intersecting portion to consult. So we know they
// overlap.
return true;
}
//////////////////////////////////////////////////////////////////////
AliasClass canonicalize(AliasClass a) {
using T = AliasClass::STag;
switch (a.m_stag) {
case T::None: return a;
case T::Frame: return a;
case T::IterPos: return a;
case T::IterBase: return a;
case T::IterBoth: return a;
case T::Stack: return a;
case T::MIState: return a;
case T::Ref: return a;
case T::Prop: a.m_prop.obj = canonical(a.m_prop.obj); return a;
case T::ElemI: a.m_elemI.arr = canonical(a.m_elemI.arr); return a;
case T::ElemS: a.m_elemS.arr = canonical(a.m_elemS.arr); return a;
}
not_reached();
}
//////////////////////////////////////////////////////////////////////
std::string show(AliasClass acls) {
using A = AliasClass;
auto ret = bit_str(acls.m_bits, A::stagBits(acls.m_stag));
if (!ret.empty() && acls.m_stag != A::STag::None) {
ret += ' ';
}
switch (acls.m_stag) {
case A::STag::None:
break;
case A::STag::Frame:
folly::format(&ret, "Fr t{}:{}", acls.m_frame.fp->id(), acls.m_frame.id);
break;
case A::STag::IterPos:
folly::format(&ret, "ItP t{}:{}", acls.m_iterPos.fp->id(),
acls.m_iterPos.id);
break;
case A::STag::IterBase:
folly::format(&ret, "ItB t{}:{}", acls.m_iterBase.fp->id(),
acls.m_iterBase.id);
break;
case A::STag::IterBoth:
folly::format(&ret, "It* t{}:{}", acls.m_iterBoth.fp->id(),
acls.m_iterBoth.id);
break;
case A::STag::Prop:
folly::format(&ret, "Pr t{}:{}", acls.m_prop.obj->id(), acls.m_prop.offset);
break;
case A::STag::ElemI:
folly::format(&ret, "Ei t{}:{}", acls.m_elemI.arr->id(), acls.m_elemI.idx);
break;
case A::STag::ElemS:
folly::format(&ret, "Es t{}:{.10}", acls.m_elemS.arr->id(),
acls.m_elemS.key);
break;
case A::STag::Stack:
folly::format(&ret, "St {}{}",
acls.m_stack.offset,
acls.m_stack.size == std::numeric_limits<int32_t>::max()
? "<"
: folly::sformat(";{}", acls.m_stack.size)
);
break;
case A::STag::MIState:
folly::format(&ret, "Mis {}", acls.m_mis.offset);
break;
case A::STag::Ref:
folly::format(&ret, "Ref {}", acls.m_ref.boxed->id());
break;
}
return ret;
}
//////////////////////////////////////////////////////////////////////
}}
|
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2016-2019 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
#include "d3d9_device.h"
#include "core/core.h"
#include "driver/dxgi/dxgi_common.h"
#include "serialise/serialiser.h"
#include "d3d9_debug.h"
WrappedD3DDevice9::WrappedD3DDevice9(IDirect3DDevice9 *device, HWND wnd)
: m_RefCounter(device, false),
m_SoftRefCounter(NULL, false),
m_device(device),
m_DebugManager(NULL)
{
m_FrameCounter = 0;
// refcounters implicitly construct with one reference, but we don't start with any soft
// references.
m_SoftRefCounter.Release();
m_InternalRefcount = 0;
m_Alive = true;
if(!RenderDoc::Inst().IsReplayApp())
{
RenderDoc::Inst().AddDeviceFrameCapturer((IDirect3DDevice9 *)this, this);
m_Wnd = wnd;
if(wnd != NULL)
RenderDoc::Inst().AddFrameCapturer((IDirect3DDevice9 *)this, wnd, this);
}
else
{
m_Wnd = NULL;
}
}
void WrappedD3DDevice9::CheckForDeath()
{
if(!m_Alive)
return;
if(m_RefCounter.GetRefCount() == 0)
{
RDCASSERT(m_SoftRefCounter.GetRefCount() >= m_InternalRefcount);
if(m_SoftRefCounter.GetRefCount() <= m_InternalRefcount)
{
m_Alive = false;
delete this;
}
}
}
WrappedD3DDevice9::~WrappedD3DDevice9()
{
RenderDoc::Inst().RemoveDeviceFrameCapturer((IDirect3DDevice9 *)this);
if(m_Wnd != NULL)
RenderDoc::Inst().RemoveFrameCapturer((IDirect3DDevice9 *)this, m_Wnd);
SAFE_DELETE(m_DebugManager);
SAFE_RELEASE(m_device);
}
HRESULT WrappedD3DDevice9::QueryInterface(REFIID riid, void **ppvObject)
{
// RenderDoc UUID {A7AA6116-9C8D-4BBA-9083-B4D816B71B78}
static const GUID IRenderDoc_uuid = {
0xa7aa6116, 0x9c8d, 0x4bba, {0x90, 0x83, 0xb4, 0xd8, 0x16, 0xb7, 0x1b, 0x78}};
if(riid == IRenderDoc_uuid)
{
AddRef();
*ppvObject = (IUnknown *)this;
return S_OK;
}
else
{
WarnUnknownGUID("IDirect3DDevice9", riid);
}
return m_device->QueryInterface(riid, ppvObject);
}
void WrappedD3DDevice9::LazyInit()
{
m_DebugManager = new D3D9DebugManager(this);
}
void WrappedD3DDevice9::StartFrameCapture(void *dev, void *wnd)
{
RDCERR("Capture not supported on D3D9");
}
bool WrappedD3DDevice9::EndFrameCapture(void *dev, void *wnd)
{
RDCERR("Capture not supported on D3D9");
return false;
}
bool WrappedD3DDevice9::DiscardFrameCapture(void *dev, void *wnd)
{
RDCERR("Capture not supported on D3D9");
return false;
}
HRESULT __stdcall WrappedD3DDevice9::TestCooperativeLevel()
{
return m_device->TestCooperativeLevel();
}
UINT __stdcall WrappedD3DDevice9::GetAvailableTextureMem()
{
return m_device->GetAvailableTextureMem();
}
HRESULT __stdcall WrappedD3DDevice9::EvictManagedResources()
{
return m_device->EvictManagedResources();
}
HRESULT __stdcall WrappedD3DDevice9::GetDirect3D(IDirect3D9 **ppD3D9)
{
return m_device->GetDirect3D(ppD3D9);
}
HRESULT __stdcall WrappedD3DDevice9::GetDeviceCaps(D3DCAPS9 *pCaps)
{
return m_device->GetDeviceCaps(pCaps);
}
HRESULT __stdcall WrappedD3DDevice9::GetDisplayMode(UINT iSwapChain, D3DDISPLAYMODE *pMode)
{
return m_device->GetDisplayMode(iSwapChain, pMode);
}
HRESULT __stdcall WrappedD3DDevice9::GetCreationParameters(D3DDEVICE_CREATION_PARAMETERS *pParameters)
{
return m_device->GetCreationParameters(pParameters);
}
HRESULT __stdcall WrappedD3DDevice9::SetCursorProperties(UINT XHotSpot, UINT YHotSpot,
IDirect3DSurface9 *pCursorBitmap)
{
return m_device->SetCursorProperties(XHotSpot, YHotSpot, pCursorBitmap);
}
void __stdcall WrappedD3DDevice9::SetCursorPosition(int X, int Y, DWORD Flags)
{
m_device->SetCursorPosition(X, Y, Flags);
}
BOOL __stdcall WrappedD3DDevice9::ShowCursor(BOOL bShow)
{
return m_device->ShowCursor(bShow);
}
HRESULT __stdcall WrappedD3DDevice9::CreateAdditionalSwapChain(
D3DPRESENT_PARAMETERS *pPresentationParameters, IDirect3DSwapChain9 **pSwapChain)
{
return m_device->CreateAdditionalSwapChain(pPresentationParameters, pSwapChain);
}
HRESULT __stdcall WrappedD3DDevice9::GetSwapChain(UINT iSwapChain, IDirect3DSwapChain9 **pSwapChain)
{
return m_device->GetSwapChain(iSwapChain, pSwapChain);
}
UINT __stdcall WrappedD3DDevice9::GetNumberOfSwapChains()
{
return m_device->GetNumberOfSwapChains();
}
HRESULT __stdcall WrappedD3DDevice9::Reset(D3DPRESENT_PARAMETERS *pPresentationParameters)
{
return m_device->Reset(pPresentationParameters);
}
HRESULT __stdcall WrappedD3DDevice9::Present(CONST RECT *pSourceRect, CONST RECT *pDestRect,
HWND hDestWindowOverride, CONST RGNDATA *pDirtyRegion)
{
// if(m_State == WRITING_IDLE)
RenderDoc::Inst().Tick();
IDirect3DSwapChain9 *swapChain = NULL;
m_device->GetSwapChain(0, &swapChain);
D3DPRESENT_PARAMETERS presentParams = {};
if(swapChain)
swapChain->GetPresentParameters(&presentParams);
HWND wnd = presentParams.hDeviceWindow;
if(hDestWindowOverride != NULL)
wnd = hDestWindowOverride;
bool activeWindow = RenderDoc::Inst().IsActiveWindow((IDirect3DDevice9 *)this, wnd);
m_FrameCounter++;
// if (m_State == WRITING_IDLE)
if(wnd != NULL)
{
uint32_t overlay = RenderDoc::Inst().GetOverlayBits();
if(overlay & eRENDERDOC_Overlay_Enabled)
{
HRESULT res = S_OK;
res = m_device->BeginScene();
IDirect3DStateBlock9 *stateBlock;
HRESULT stateBlockRes = m_device->CreateStateBlock(D3DSBT_ALL, &stateBlock);
IDirect3DSurface9 *backBuffer;
res |= m_device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &backBuffer);
res |= m_device->SetRenderTarget(0, backBuffer);
D3DSURFACE_DESC bbDesc;
backBuffer->GetDesc(&bbDesc);
//
D3DVIEWPORT9 viewport = {0, 0, bbDesc.Width, bbDesc.Height, 0.f, 1.f};
res |= m_device->SetViewport(&viewport);
GetDebugManager()->SetOutputDimensions(bbDesc.Width, bbDesc.Height);
GetDebugManager()->SetOutputWindow(presentParams.hDeviceWindow);
int flags = activeWindow ? RenderDoc::eOverlay_ActiveWindow : 0;
flags |= RenderDoc::eOverlay_CaptureDisabled;
std::string overlayText =
RenderDoc::Inst().GetOverlayText(RDCDriver::D3D9, m_FrameCounter, flags);
overlayText += "Captures not supported with D3D9\n";
if(!overlayText.empty())
GetDebugManager()->RenderText(0.0f, 0.0f, overlayText.c_str());
stateBlockRes = stateBlock->Apply();
res |= m_device->EndScene();
}
}
RenderDoc::Inst().AddActiveDriver(RDCDriver::D3D9, true);
return m_device->Present(pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion);
}
HRESULT __stdcall WrappedD3DDevice9::GetBackBuffer(UINT iSwapChain, UINT iBackBuffer,
D3DBACKBUFFER_TYPE Type,
IDirect3DSurface9 **ppBackBuffer)
{
return m_device->GetBackBuffer(iSwapChain, iBackBuffer, Type, ppBackBuffer);
}
HRESULT __stdcall WrappedD3DDevice9::GetRasterStatus(UINT iSwapChain, D3DRASTER_STATUS *pRasterStatus)
{
return m_device->GetRasterStatus(iSwapChain, pRasterStatus);
}
HRESULT __stdcall WrappedD3DDevice9::SetDialogBoxMode(BOOL bEnableDialogs)
{
return m_device->SetDialogBoxMode(bEnableDialogs);
}
void __stdcall WrappedD3DDevice9::SetGammaRamp(UINT iSwapChain, DWORD Flags, CONST D3DGAMMARAMP *pRamp)
{
m_device->SetGammaRamp(iSwapChain, Flags, pRamp);
}
void __stdcall WrappedD3DDevice9::GetGammaRamp(UINT iSwapChain, D3DGAMMARAMP *pRamp)
{
m_device->GetGammaRamp(iSwapChain, pRamp);
}
HRESULT __stdcall WrappedD3DDevice9::CreateTexture(UINT Width, UINT Height, UINT Levels,
DWORD Usage, D3DFORMAT Format, D3DPOOL Pool,
IDirect3DTexture9 **ppTexture,
HANDLE *pSharedHandle)
{
return m_device->CreateTexture(Width, Height, Levels, Usage, Format, Pool, ppTexture,
pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateVolumeTexture(UINT Width, UINT Height, UINT Depth,
UINT Levels, DWORD Usage, D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DVolumeTexture9 **ppVolumeTexture,
HANDLE *pSharedHandle)
{
return m_device->CreateVolumeTexture(Width, Height, Depth, Levels, Usage, Format, Pool,
ppVolumeTexture, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateCubeTexture(UINT EdgeLength, UINT Levels, DWORD Usage,
D3DFORMAT Format, D3DPOOL Pool,
IDirect3DCubeTexture9 **ppCubeTexture,
HANDLE *pSharedHandle)
{
return m_device->CreateCubeTexture(EdgeLength, Levels, Usage, Format, Pool, ppCubeTexture,
pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateVertexBuffer(UINT Length, DWORD Usage, DWORD FVF,
D3DPOOL Pool,
IDirect3DVertexBuffer9 **ppVertexBuffer,
HANDLE *pSharedHandle)
{
return m_device->CreateVertexBuffer(Length, Usage, FVF, Pool, ppVertexBuffer, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateIndexBuffer(UINT Length, DWORD Usage, D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DIndexBuffer9 **ppIndexBuffer,
HANDLE *pSharedHandle)
{
return m_device->CreateIndexBuffer(Length, Usage, Format, Pool, ppIndexBuffer, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateRenderTarget(UINT Width, UINT Height, D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality, BOOL Lockable,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle)
{
return m_device->CreateRenderTarget(Width, Height, Format, MultiSample, MultisampleQuality,
Lockable, ppSurface, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateDepthStencilSurface(
UINT Width, UINT Height, D3DFORMAT Format, D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality, BOOL Discard, IDirect3DSurface9 **ppSurface, HANDLE *pSharedHandle)
{
return m_device->CreateDepthStencilSurface(Width, Height, Format, MultiSample, MultisampleQuality,
Discard, ppSurface, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::UpdateSurface(IDirect3DSurface9 *pSourceSurface,
CONST RECT *pSourceRect,
IDirect3DSurface9 *pDestinationSurface,
CONST POINT *pDestPoint)
{
return m_device->UpdateSurface(pSourceSurface, pSourceRect, pDestinationSurface, pDestPoint);
}
HRESULT __stdcall WrappedD3DDevice9::UpdateTexture(IDirect3DBaseTexture9 *pSourceTexture,
IDirect3DBaseTexture9 *pDestinationTexture)
{
return m_device->UpdateTexture(pSourceTexture, pDestinationTexture);
}
HRESULT __stdcall WrappedD3DDevice9::GetRenderTargetData(IDirect3DSurface9 *pRenderTarget,
IDirect3DSurface9 *pDestSurface)
{
return m_device->GetRenderTargetData(pRenderTarget, pDestSurface);
}
HRESULT __stdcall WrappedD3DDevice9::GetFrontBufferData(UINT iSwapChain,
IDirect3DSurface9 *pDestSurface)
{
return m_device->GetFrontBufferData(iSwapChain, pDestSurface);
}
HRESULT __stdcall WrappedD3DDevice9::StretchRect(IDirect3DSurface9 *pSourceSurface,
CONST RECT *pSourceRect,
IDirect3DSurface9 *pDestSurface,
CONST RECT *pDestRect, D3DTEXTUREFILTERTYPE Filter)
{
return m_device->StretchRect(pSourceSurface, pSourceRect, pDestSurface, pDestRect, Filter);
}
HRESULT __stdcall WrappedD3DDevice9::ColorFill(IDirect3DSurface9 *pSurface, CONST RECT *pRect,
D3DCOLOR color)
{
return m_device->ColorFill(pSurface, pRect, color);
}
HRESULT __stdcall WrappedD3DDevice9::CreateOffscreenPlainSurface(UINT Width, UINT Height,
D3DFORMAT Format, D3DPOOL Pool,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle)
{
return m_device->CreateOffscreenPlainSurface(Width, Height, Format, Pool, ppSurface, pSharedHandle);
}
HRESULT __stdcall WrappedD3DDevice9::SetRenderTarget(DWORD RenderTargetIndex,
IDirect3DSurface9 *pRenderTarget)
{
return m_device->SetRenderTarget(RenderTargetIndex, pRenderTarget);
}
HRESULT __stdcall WrappedD3DDevice9::GetRenderTarget(DWORD RenderTargetIndex,
IDirect3DSurface9 **ppRenderTarget)
{
return m_device->GetRenderTarget(RenderTargetIndex, ppRenderTarget);
}
HRESULT __stdcall WrappedD3DDevice9::SetDepthStencilSurface(IDirect3DSurface9 *pNewZStencil)
{
return m_device->SetDepthStencilSurface(pNewZStencil);
}
HRESULT __stdcall WrappedD3DDevice9::GetDepthStencilSurface(IDirect3DSurface9 **ppZStencilSurface)
{
return m_device->GetDepthStencilSurface(ppZStencilSurface);
}
HRESULT __stdcall WrappedD3DDevice9::BeginScene()
{
return m_device->BeginScene();
}
HRESULT __stdcall WrappedD3DDevice9::EndScene()
{
return m_device->EndScene();
}
HRESULT __stdcall WrappedD3DDevice9::Clear(DWORD Count, CONST D3DRECT *pRects, DWORD Flags,
D3DCOLOR Color, float Z, DWORD Stencil)
{
return m_device->Clear(Count, pRects, Flags, Color, Z, Stencil);
}
HRESULT __stdcall WrappedD3DDevice9::SetTransform(D3DTRANSFORMSTATETYPE State,
CONST D3DMATRIX *pMatrix)
{
return m_device->SetTransform(State, pMatrix);
}
HRESULT __stdcall WrappedD3DDevice9::GetTransform(D3DTRANSFORMSTATETYPE State, D3DMATRIX *pMatrix)
{
return m_device->GetTransform(State, pMatrix);
}
HRESULT __stdcall WrappedD3DDevice9::MultiplyTransform(D3DTRANSFORMSTATETYPE _arg1,
CONST D3DMATRIX *_arg2)
{
return m_device->MultiplyTransform(_arg1, _arg2);
}
HRESULT __stdcall WrappedD3DDevice9::SetViewport(CONST D3DVIEWPORT9 *pViewport)
{
return m_device->SetViewport(pViewport);
}
HRESULT __stdcall WrappedD3DDevice9::GetViewport(D3DVIEWPORT9 *pViewport)
{
return m_device->GetViewport(pViewport);
}
HRESULT __stdcall WrappedD3DDevice9::SetMaterial(CONST D3DMATERIAL9 *pMaterial)
{
return m_device->SetMaterial(pMaterial);
}
HRESULT __stdcall WrappedD3DDevice9::GetMaterial(D3DMATERIAL9 *pMaterial)
{
return m_device->GetMaterial(pMaterial);
}
HRESULT __stdcall WrappedD3DDevice9::SetLight(DWORD Index, CONST D3DLIGHT9 *_arg2)
{
return m_device->SetLight(Index, _arg2);
}
HRESULT __stdcall WrappedD3DDevice9::GetLight(DWORD Index, D3DLIGHT9 *_arg2)
{
return m_device->GetLight(Index, _arg2);
}
HRESULT __stdcall WrappedD3DDevice9::LightEnable(DWORD Index, BOOL Enable)
{
return m_device->LightEnable(Index, Enable);
}
HRESULT __stdcall WrappedD3DDevice9::GetLightEnable(DWORD Index, BOOL *pEnable)
{
return m_device->GetLightEnable(Index, pEnable);
}
HRESULT __stdcall WrappedD3DDevice9::SetClipPlane(DWORD Index, CONST float *pPlane)
{
return m_device->SetClipPlane(Index, pPlane);
}
HRESULT __stdcall WrappedD3DDevice9::GetClipPlane(DWORD Index, float *pPlane)
{
return m_device->GetClipPlane(Index, pPlane);
}
HRESULT __stdcall WrappedD3DDevice9::SetRenderState(D3DRENDERSTATETYPE State, DWORD Value)
{
return m_device->SetRenderState(State, Value);
}
HRESULT __stdcall WrappedD3DDevice9::GetRenderState(D3DRENDERSTATETYPE State, DWORD *pValue)
{
return m_device->GetRenderState(State, pValue);
}
HRESULT __stdcall WrappedD3DDevice9::CreateStateBlock(D3DSTATEBLOCKTYPE Type,
IDirect3DStateBlock9 **ppSB)
{
return m_device->CreateStateBlock(Type, ppSB);
}
HRESULT __stdcall WrappedD3DDevice9::BeginStateBlock()
{
return m_device->BeginStateBlock();
}
HRESULT __stdcall WrappedD3DDevice9::EndStateBlock(IDirect3DStateBlock9 **ppSB)
{
return m_device->EndStateBlock(ppSB);
}
HRESULT __stdcall WrappedD3DDevice9::SetClipStatus(CONST D3DCLIPSTATUS9 *pClipStatus)
{
return m_device->SetClipStatus(pClipStatus);
}
HRESULT __stdcall WrappedD3DDevice9::GetClipStatus(D3DCLIPSTATUS9 *pClipStatus)
{
return m_device->GetClipStatus(pClipStatus);
}
HRESULT __stdcall WrappedD3DDevice9::GetTexture(DWORD Stage, IDirect3DBaseTexture9 **ppTexture)
{
return m_device->GetTexture(Stage, ppTexture);
}
HRESULT __stdcall WrappedD3DDevice9::SetTexture(DWORD Stage, IDirect3DBaseTexture9 *pTexture)
{
return m_device->SetTexture(Stage, pTexture);
}
HRESULT __stdcall WrappedD3DDevice9::GetTextureStageState(DWORD Stage, D3DTEXTURESTAGESTATETYPE Type,
DWORD *pValue)
{
return m_device->GetTextureStageState(Stage, Type, pValue);
}
HRESULT __stdcall WrappedD3DDevice9::SetTextureStageState(DWORD Stage,
D3DTEXTURESTAGESTATETYPE Type, DWORD Value)
{
return m_device->SetTextureStageState(Stage, Type, Value);
}
HRESULT __stdcall WrappedD3DDevice9::GetSamplerState(DWORD Sampler, D3DSAMPLERSTATETYPE Type,
DWORD *pValue)
{
return m_device->GetSamplerState(Sampler, Type, pValue);
}
HRESULT __stdcall WrappedD3DDevice9::SetSamplerState(DWORD Sampler, D3DSAMPLERSTATETYPE Type,
DWORD Value)
{
return m_device->SetSamplerState(Sampler, Type, Value);
}
HRESULT __stdcall WrappedD3DDevice9::ValidateDevice(DWORD *pNumPasses)
{
return m_device->ValidateDevice(pNumPasses);
}
HRESULT __stdcall WrappedD3DDevice9::SetPaletteEntries(UINT PaletteNumber,
CONST PALETTEENTRY *pEntries)
{
return m_device->SetPaletteEntries(PaletteNumber, pEntries);
}
HRESULT __stdcall WrappedD3DDevice9::GetPaletteEntries(UINT PaletteNumber, PALETTEENTRY *pEntries)
{
return m_device->GetPaletteEntries(PaletteNumber, pEntries);
}
HRESULT __stdcall WrappedD3DDevice9::SetCurrentTexturePalette(UINT PaletteNumber)
{
return m_device->SetCurrentTexturePalette(PaletteNumber);
}
HRESULT __stdcall WrappedD3DDevice9::GetCurrentTexturePalette(UINT *PaletteNumber)
{
return m_device->GetCurrentTexturePalette(PaletteNumber);
}
HRESULT __stdcall WrappedD3DDevice9::SetScissorRect(CONST RECT *pRect)
{
return m_device->SetScissorRect(pRect);
}
HRESULT __stdcall WrappedD3DDevice9::GetScissorRect(RECT *pRect)
{
return m_device->GetScissorRect(pRect);
}
HRESULT __stdcall WrappedD3DDevice9::SetSoftwareVertexProcessing(BOOL bSoftware)
{
return m_device->SetSoftwareVertexProcessing(bSoftware);
}
BOOL __stdcall WrappedD3DDevice9::GetSoftwareVertexProcessing()
{
return m_device->GetSoftwareVertexProcessing();
}
HRESULT __stdcall WrappedD3DDevice9::SetNPatchMode(float nSegments)
{
return m_device->SetNPatchMode(nSegments);
}
float __stdcall WrappedD3DDevice9::GetNPatchMode()
{
return m_device->GetNPatchMode();
}
HRESULT __stdcall WrappedD3DDevice9::DrawPrimitive(D3DPRIMITIVETYPE PrimitiveType, UINT StartVertex,
UINT PrimitiveCount)
{
return m_device->DrawPrimitive(PrimitiveType, StartVertex, PrimitiveCount);
}
HRESULT __stdcall WrappedD3DDevice9::DrawIndexedPrimitive(D3DPRIMITIVETYPE _arg1, INT BaseVertexIndex,
UINT MinVertexIndex, UINT NumVertices,
UINT startIndex, UINT primCount)
{
return m_device->DrawIndexedPrimitive(_arg1, BaseVertexIndex, MinVertexIndex, NumVertices,
startIndex, primCount);
}
HRESULT __stdcall WrappedD3DDevice9::DrawPrimitiveUP(D3DPRIMITIVETYPE PrimitiveType,
UINT PrimitiveCount,
CONST void *pVertexStreamZeroData,
UINT VertexStreamZeroStride)
{
return m_device->DrawPrimitiveUP(PrimitiveType, PrimitiveCount, pVertexStreamZeroData,
VertexStreamZeroStride);
}
HRESULT __stdcall WrappedD3DDevice9::DrawIndexedPrimitiveUP(
D3DPRIMITIVETYPE PrimitiveType, UINT MinVertexIndex, UINT NumVertices, UINT PrimitiveCount,
CONST void *pIndexData, D3DFORMAT IndexDataFormat, CONST void *pVertexStreamZeroData,
UINT VertexStreamZeroStride)
{
return m_device->DrawIndexedPrimitiveUP(PrimitiveType, MinVertexIndex, NumVertices,
PrimitiveCount, pIndexData, IndexDataFormat,
pVertexStreamZeroData, VertexStreamZeroStride);
}
HRESULT __stdcall WrappedD3DDevice9::ProcessVertices(UINT SrcStartIndex, UINT DestIndex,
UINT VertexCount,
IDirect3DVertexBuffer9 *pDestBuffer,
IDirect3DVertexDeclaration9 *pVertexDecl,
DWORD Flags)
{
return m_device->ProcessVertices(SrcStartIndex, DestIndex, VertexCount, pDestBuffer, pVertexDecl,
Flags);
}
HRESULT __stdcall WrappedD3DDevice9::CreateVertexDeclaration(CONST D3DVERTEXELEMENT9 *pVertexElements,
IDirect3DVertexDeclaration9 **ppDecl)
{
return m_device->CreateVertexDeclaration(pVertexElements, ppDecl);
}
HRESULT __stdcall WrappedD3DDevice9::SetVertexDeclaration(IDirect3DVertexDeclaration9 *pDecl)
{
return m_device->SetVertexDeclaration(pDecl);
}
HRESULT __stdcall WrappedD3DDevice9::GetVertexDeclaration(IDirect3DVertexDeclaration9 **ppDecl)
{
return m_device->GetVertexDeclaration(ppDecl);
}
HRESULT __stdcall WrappedD3DDevice9::SetFVF(DWORD FVF)
{
return m_device->SetFVF(FVF);
}
HRESULT __stdcall WrappedD3DDevice9::GetFVF(DWORD *pFVF)
{
return m_device->GetFVF(pFVF);
}
HRESULT __stdcall WrappedD3DDevice9::CreateVertexShader(CONST DWORD *pFunction,
IDirect3DVertexShader9 **ppShader)
{
return m_device->CreateVertexShader(pFunction, ppShader);
}
HRESULT __stdcall WrappedD3DDevice9::SetVertexShader(IDirect3DVertexShader9 *pShader)
{
return m_device->SetVertexShader(pShader);
}
HRESULT __stdcall WrappedD3DDevice9::GetVertexShader(IDirect3DVertexShader9 **ppShader)
{
return m_device->GetVertexShader(ppShader);
}
HRESULT __stdcall WrappedD3DDevice9::SetVertexShaderConstantF(UINT StartRegister,
CONST float *pConstantData,
UINT Vector4fCount)
{
return m_device->SetVertexShaderConstantF(StartRegister, pConstantData, Vector4fCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetVertexShaderConstantF(UINT StartRegister,
float *pConstantData,
UINT Vector4fCount)
{
return m_device->GetVertexShaderConstantF(StartRegister, pConstantData, Vector4fCount);
}
HRESULT __stdcall WrappedD3DDevice9::SetVertexShaderConstantI(UINT StartRegister,
CONST int *pConstantData,
UINT Vector4iCount)
{
return m_device->SetVertexShaderConstantI(StartRegister, pConstantData, Vector4iCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetVertexShaderConstantI(UINT StartRegister,
int *pConstantData, UINT Vector4iCount)
{
return m_device->GetVertexShaderConstantI(StartRegister, pConstantData, Vector4iCount);
}
HRESULT __stdcall WrappedD3DDevice9::SetVertexShaderConstantB(UINT StartRegister,
CONST BOOL *pConstantData,
UINT BoolCount)
{
return m_device->SetVertexShaderConstantB(StartRegister, pConstantData, BoolCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetVertexShaderConstantB(UINT StartRegister,
BOOL *pConstantData, UINT BoolCount)
{
return m_device->GetVertexShaderConstantB(StartRegister, pConstantData, BoolCount);
}
HRESULT __stdcall WrappedD3DDevice9::SetStreamSource(UINT StreamNumber,
IDirect3DVertexBuffer9 *pStreamData,
UINT OffsetInBytes, UINT Stride)
{
return m_device->SetStreamSource(StreamNumber, pStreamData, OffsetInBytes, Stride);
}
HRESULT __stdcall WrappedD3DDevice9::GetStreamSource(UINT StreamNumber,
IDirect3DVertexBuffer9 **ppStreamData,
UINT *pOffsetInBytes, UINT *pStride)
{
return m_device->GetStreamSource(StreamNumber, ppStreamData, pOffsetInBytes, pStride);
}
HRESULT __stdcall WrappedD3DDevice9::SetStreamSourceFreq(UINT StreamNumber, UINT Setting)
{
return m_device->SetStreamSourceFreq(StreamNumber, Setting);
}
HRESULT __stdcall WrappedD3DDevice9::GetStreamSourceFreq(UINT StreamNumber, UINT *pSetting)
{
return m_device->GetStreamSourceFreq(StreamNumber, pSetting);
}
HRESULT __stdcall WrappedD3DDevice9::SetIndices(IDirect3DIndexBuffer9 *pIndexData)
{
return m_device->SetIndices(pIndexData);
}
HRESULT __stdcall WrappedD3DDevice9::GetIndices(IDirect3DIndexBuffer9 **ppIndexData)
{
return m_device->GetIndices(ppIndexData);
}
HRESULT __stdcall WrappedD3DDevice9::CreatePixelShader(CONST DWORD *pFunction,
IDirect3DPixelShader9 **ppShader)
{
return m_device->CreatePixelShader(pFunction, ppShader);
}
HRESULT __stdcall WrappedD3DDevice9::SetPixelShader(IDirect3DPixelShader9 *pShader)
{
return m_device->SetPixelShader(pShader);
}
HRESULT __stdcall WrappedD3DDevice9::GetPixelShader(IDirect3DPixelShader9 **ppShader)
{
return m_device->GetPixelShader(ppShader);
}
HRESULT __stdcall WrappedD3DDevice9::SetPixelShaderConstantF(UINT StartRegister,
CONST float *pConstantData,
UINT Vector4fCount)
{
return m_device->SetPixelShaderConstantF(StartRegister, pConstantData, Vector4fCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetPixelShaderConstantF(UINT StartRegister,
float *pConstantData, UINT Vector4fCount)
{
return m_device->GetPixelShaderConstantF(StartRegister, pConstantData, Vector4fCount);
}
HRESULT __stdcall WrappedD3DDevice9::SetPixelShaderConstantI(UINT StartRegister,
CONST int *pConstantData,
UINT Vector4iCount)
{
return m_device->SetPixelShaderConstantI(StartRegister, pConstantData, Vector4iCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetPixelShaderConstantI(UINT StartRegister, int *pConstantData,
UINT Vector4iCount)
{
return m_device->GetPixelShaderConstantI(StartRegister, pConstantData, Vector4iCount);
}
HRESULT __stdcall WrappedD3DDevice9::SetPixelShaderConstantB(UINT StartRegister,
CONST BOOL *pConstantData,
UINT BoolCount)
{
return m_device->SetPixelShaderConstantB(StartRegister, pConstantData, BoolCount);
}
HRESULT __stdcall WrappedD3DDevice9::GetPixelShaderConstantB(UINT StartRegister,
BOOL *pConstantData, UINT BoolCount)
{
return m_device->GetPixelShaderConstantB(StartRegister, pConstantData, BoolCount);
}
HRESULT __stdcall WrappedD3DDevice9::DrawRectPatch(UINT Handle, CONST float *pNumSegs,
CONST D3DRECTPATCH_INFO *pRectPatchInfo)
{
return m_device->DrawRectPatch(Handle, pNumSegs, pRectPatchInfo);
}
HRESULT __stdcall WrappedD3DDevice9::DrawTriPatch(UINT Handle, CONST float *pNumSegs,
CONST D3DTRIPATCH_INFO *pTriPatchInfo)
{
return m_device->DrawTriPatch(Handle, pNumSegs, pTriPatchInfo);
}
HRESULT __stdcall WrappedD3DDevice9::DeletePatch(UINT Handle)
{
return m_device->DeletePatch(Handle);
}
HRESULT __stdcall WrappedD3DDevice9::CreateQuery(D3DQUERYTYPE Type, IDirect3DQuery9 **ppQuery)
{
return m_device->CreateQuery(Type, ppQuery);
}
HRESULT __stdcall WrappedD3D9::QueryInterface(REFIID riid, void **ppvObj)
{
return m_direct3D->QueryInterface(riid, ppvObj);
}
ULONG __stdcall WrappedD3D9::AddRef()
{
ULONG refCount;
refCount = m_direct3D->AddRef();
return refCount;
}
ULONG __stdcall WrappedD3D9::Release()
{
ULONG refCount = m_direct3D->Release();
if(refCount == 0)
{
delete this;
}
return refCount;
}
HRESULT __stdcall WrappedD3D9::RegisterSoftwareDevice(void *pInitializeFunction)
{
return m_direct3D->RegisterSoftwareDevice(pInitializeFunction);
}
UINT __stdcall WrappedD3D9::GetAdapterCount()
{
return m_direct3D->GetAdapterCount();
}
HRESULT __stdcall WrappedD3D9::GetAdapterIdentifier(UINT Adapter, DWORD Flags,
D3DADAPTER_IDENTIFIER9 *pIdentifier)
{
return m_direct3D->GetAdapterIdentifier(Adapter, Flags, pIdentifier);
}
UINT __stdcall WrappedD3D9::GetAdapterModeCount(UINT Adapter, D3DFORMAT Format)
{
return m_direct3D->GetAdapterModeCount(Adapter, Format);
}
HRESULT __stdcall WrappedD3D9::EnumAdapterModes(UINT Adapter, D3DFORMAT Format, UINT Mode,
D3DDISPLAYMODE *pMode)
{
return m_direct3D->EnumAdapterModes(Adapter, Format, Mode, pMode);
}
HRESULT __stdcall WrappedD3D9::GetAdapterDisplayMode(UINT Adapter, D3DDISPLAYMODE *pMode)
{
return m_direct3D->GetAdapterDisplayMode(Adapter, pMode);
}
HRESULT __stdcall WrappedD3D9::CheckDeviceType(UINT Adapter, D3DDEVTYPE DevType,
D3DFORMAT AdapterFormat, D3DFORMAT BackBufferFormat,
BOOL bWindowed)
{
return m_direct3D->CheckDeviceType(Adapter, DevType, AdapterFormat, BackBufferFormat, bWindowed);
}
HRESULT __stdcall WrappedD3D9::CheckDeviceFormat(UINT Adapter, D3DDEVTYPE DeviceType,
D3DFORMAT AdapterFormat, DWORD Usage,
D3DRESOURCETYPE RType, D3DFORMAT CheckFormat)
{
return m_direct3D->CheckDeviceFormat(Adapter, DeviceType, AdapterFormat, Usage, RType, CheckFormat);
}
HRESULT __stdcall WrappedD3D9::CheckDeviceMultiSampleType(UINT Adapter, D3DDEVTYPE DeviceType,
D3DFORMAT SurfaceFormat, BOOL Windowed,
D3DMULTISAMPLE_TYPE MultiSampleType,
DWORD *pQualityLevels)
{
return m_direct3D->CheckDeviceMultiSampleType(Adapter, DeviceType, SurfaceFormat, Windowed,
MultiSampleType, pQualityLevels);
}
HRESULT __stdcall WrappedD3D9::CheckDepthStencilMatch(UINT Adapter, D3DDEVTYPE DeviceType,
D3DFORMAT AdapterFormat,
D3DFORMAT RenderTargetFormat,
D3DFORMAT DepthStencilFormat)
{
return m_direct3D->CheckDepthStencilMatch(Adapter, DeviceType, AdapterFormat, RenderTargetFormat,
DepthStencilFormat);
}
HRESULT __stdcall WrappedD3D9::CheckDeviceFormatConversion(UINT Adapter, D3DDEVTYPE DeviceType,
D3DFORMAT SourceFormat,
D3DFORMAT TargetFormat)
{
return m_direct3D->CheckDeviceFormatConversion(Adapter, DeviceType, SourceFormat, TargetFormat);
}
HRESULT __stdcall WrappedD3D9::GetDeviceCaps(UINT Adapter, D3DDEVTYPE DeviceType, D3DCAPS9 *pCaps)
{
return m_direct3D->GetDeviceCaps(Adapter, DeviceType, pCaps);
}
HMONITOR __stdcall WrappedD3D9::GetAdapterMonitor(UINT Adapter)
{
return m_direct3D->GetAdapterMonitor(Adapter);
}
HRESULT __stdcall WrappedD3D9::CreateDevice(UINT Adapter, D3DDEVTYPE DeviceType, HWND hFocusWindow,
DWORD BehaviorFlags,
D3DPRESENT_PARAMETERS *pPresentationParameters,
IDirect3DDevice9 **ppReturnedDeviceInterface)
{
IDirect3DDevice9 *device = NULL;
HRESULT res = m_direct3D->CreateDevice(Adapter, DeviceType, hFocusWindow, BehaviorFlags,
pPresentationParameters, &device);
if(res == S_OK)
{
RDCLOG("App creating d3d9 device");
HWND wnd = pPresentationParameters->hDeviceWindow;
if(wnd == NULL)
wnd = hFocusWindow;
if(!wnd)
RDCWARN("Couldn't find valid non-NULL window at CreateDevice time");
WrappedD3DDevice9 *wrappedDevice = new WrappedD3DDevice9(device, wnd);
wrappedDevice->LazyInit(); // TODO this can be moved later probably
*ppReturnedDeviceInterface = wrappedDevice;
}
else
{
*ppReturnedDeviceInterface = NULL;
}
return res;
}
|
/*
* Copyright (C) 2002-2011 The DOSBox Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "dosbox.h"
#include "pic.h"
#include "regs.h"
#include "cpu.h"
#include "lazyflags.h"
#include "paging.h"
#include "fpu.h"
#include "debug.h"
#include "inout.h"
#include "callback.h"
typedef PhysPt EAPoint;
#define SegBase(c) SegPhys(c)
#define LoadMb(off) mem_readb_inline(off)
#define LoadMw(off) mem_readw_inline(off)
#define LoadMd(off) mem_readd_inline(off)
#define LoadMbs(off) (Bit8s)(LoadMb(off))
#define LoadMws(off) (Bit16s)(LoadMw(off))
#define LoadMds(off) (Bit32s)(LoadMd(off))
#define SaveMb(off,val) mem_writeb_inline(off,val)
#define SaveMw(off,val) mem_writew_inline(off,val)
#define SaveMd(off,val) mem_writed_inline(off,val)
#define LoadD(reg) reg
#define SaveD(reg,val) reg=val
#include "core_full/loadwrite.h"
#include "core_full/support.h"
#include "core_full/optable.h"
#include "instructions.h"
#define EXCEPTION(blah) \
{ \
Bit8u new_num=blah; \
CPU_Exception(new_num,0); \
continue; \
}
Bits CPU_Core_Full_Run(void) {
FullData inst;
while (CPU_Cycles-->0) {
#if C_DEBUG
cycle_count++;
#if C_HEAVY_DEBUG
if (DEBUG_HeavyIsBreakpoint()) {
FillFlags();
return debugCallback;
};
#endif
#endif
LoadIP();
inst.entry=cpu.code.big*0x200;
inst.prefix=cpu.code.big;
restartopcode:
inst.entry=(inst.entry & 0xffffff00) | Fetchb();
inst.code=OpCodeTable[inst.entry];
#include "core_full/load.h"
#include "core_full/op.h"
#include "core_full/save.h"
nextopcode:;
SaveIP();
continue;
illegalopcode:
LOG(LOG_CPU,LOG_NORMAL)("Illegal opcode");
CPU_Exception(0x6,0);
}
FillFlags();
return CBRET_NONE;
}
void CPU_Core_Full_Init(void) {
}
|
#pragma once
#include <stddef.h>
#include <bitset>
#include <cmath>
#include <exception>
#include <stdexcept>
#include <functional>
#include <map>
#include <memory>
#include <vector>
#include <limits>
#include "dynamixel_sdk/dynamixel_sdk.h"
#include "iostream"
#include "servo_register_definitions.h"
namespace dynamixel {
class Driver {
public:
typedef std::function<void(const std::string& joint_name)> hw_error_callback;
private:
class Motor;
struct GroupSyncReader {
GroupSyncRead syncRead;
std::vector<std::function<void(GroupSyncRead&)>> read_functions;
std::vector<Motor*> motors;
GroupSyncReader(
std::unique_ptr<PortHandler>& port,
std::unique_ptr<PacketHandler>& ph,
uint16_t& start_address,
uint16_t& data_length) : syncRead(port.get(), ph.get(), start_address, data_length){};
GroupSyncReader(
std::unique_ptr<PortHandler>& port,
std::unique_ptr<PacketHandler>& ph,
field& f) : GroupSyncReader(port, ph, f.address, f.data_length){};
};
class Motor {
public:
const motor_id id;
const std::string name;
const model_info* model;
bool led;
bool rebooting;
bool rebooted;
double goal_position;
double position;
//double goal_velocity;
double velocity;
double effort;
bool torque;
uint8_t hw_error;
double p_gain, i_gain, d_gain;
double p_gain_target, i_gain_target, d_gain_target;
std::vector<std::function<void()>> after_reboot_functions;
typedef std::function<void(dynamixel::hardware_status)> hw_error_callback;
//hw_error_callback error_callback;
void get_command(const dynamixel::field_name& command, field& f) const {
try {
f = model->controll_table->at(command);
} catch (std::exception& e) {
throw command_type_not_implemented(command);
}
};
double velocity_from_raw(uint32_t raw_position) const {
const double RPM2RADPERSEC = 0.104719755f;
//FIXME this calculation is not always correct
//In the official Dynamixel-Toolbox there is a function which handles all the edge cases
//in which negative directions are marked by the 10th bit.
return double(static_cast<int32_t>(raw_position)) * model->rpm_factor * RPM2RADPERSEC;
}
double position_from_raw(uint32_t raw_position) const {
return double(double(double(raw_position) - double(model->min_value)) / double(model->max_value - model->min_value)) * double(model->max_radian - model->min_radian) + double(model->min_radian);
};
uint32_t raw_from_position(double position) const {
return uint64_t((position - model->min_radian) / double(model->max_radian - model->min_radian) * double(model->max_value - model->min_value)) + double(model->min_value);
};
void set_position_from_raw(uint32_t raw) { position = position_from_raw(raw); }
void set_velocity_from_raw(uint32_t raw) { velocity = velocity_from_raw(raw); }
void set_effort_from_raw(uint32_t raw) { effort = double(static_cast<int16_t>(raw))*0.1; }
Motor(const motor_id p_id, std::string p_name, const model_t type) :
id(p_id),
name(p_name),
rebooting(true),
rebooted(false),
goal_position(std::numeric_limits<double>::quiet_NaN()),
position(std::numeric_limits<double>::quiet_NaN()),
p_gain(std::numeric_limits<double>::quiet_NaN()),
i_gain(std::numeric_limits<double>::quiet_NaN()),
d_gain(std::numeric_limits<double>::quiet_NaN())
{
try {
model = &model_infos.at(type);
} catch (const std::exception &e) {
throw motor_type_not_implemented(type);
}
};
uint32_t get_position_raw() const {
return raw_from_position(position);
}
void setup_sync_readers(
std::map<field, std::unique_ptr<GroupSyncReader>>& syncReaders,
std::unique_ptr<PortHandler>& portHandler,
std::unique_ptr<PacketHandler>& packetHandler,
hw_error_callback error_callback = [](auto){}) {
//Torque enabled
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Torque_Enable,
[this](uint32_t data) {
this->torque = bool(data);
if (this->torque == false) {
goal_position = std::numeric_limits<double>::quiet_NaN();
}
});
//Present Position
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Present_Position,
[this](uint32_t data) {
this->set_position_from_raw(data);
});
//Present Velocity
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Present_Velocity,
[this](uint32_t data) {
this->set_velocity_from_raw(data);
});
//Present Effort (Load if available else Current)
try {
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Present_Load,
[this](uint32_t data) {
this->set_effort_from_raw(data);
});
} catch (command_type_not_implemented& e) {
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Present_Current,
[this](uint32_t data) {
this->set_effort_from_raw(data);
});
}
//Hardware Error Status
if(error_callback) {
setup_sync_reader(
syncReaders, portHandler, packetHandler,
command::Hardware_Error_Status,
[this, error_callback](uint32_t data) {
uint8_t error = uint8_t(data);
this->hw_error = error;
if (error != 0) {
//std::bitset<8> error(data);
if (error & hardware_status::Input_Voltage_Error) {
error_callback(hardware_status::Input_Voltage_Error);
}
if (error & hardware_status::OverHeating_Error) {
error_callback(hardware_status::OverHeating_Error);
}
if (error & hardware_status::Motor_Encoder_Error) {
error_callback(hardware_status::Motor_Encoder_Error);
}
if (error & hardware_status::Electrical_Shock_Error) {
error_callback(hardware_status::Electrical_Shock_Error);
}
if (error & hardware_status::Overload_Error) {
error_callback(hardware_status::Overload_Error);
}
//https://emanual.robotis.com/docs/en/dxl/x/xl430-w250/#hardware-error-status
} else {
this->rebooting = false;
this->rebooted = true;
}
});
}
}
void setup_init_sync_readers(
std::map<field, std::unique_ptr<GroupSyncReader>>& initSyncReaders,
std::unique_ptr<PortHandler>& portHandler,
std::unique_ptr<PacketHandler>& packetHandler)
{
//Load PID gains if available
try {
setup_sync_reader(
initSyncReaders, portHandler, packetHandler,
command::Position_P_Gain,
[this](uint32_t raw) {
this->p_gain = double(static_cast<int16_t>(raw));
});
setup_sync_reader(
initSyncReaders, portHandler, packetHandler,
command::Position_I_Gain,
[this](uint32_t raw) {
this->i_gain = double(static_cast<int16_t>(raw));
});
setup_sync_reader(
initSyncReaders, portHandler, packetHandler,
command::Position_D_Gain,
[this](uint32_t raw) {
this->d_gain = double(static_cast<int16_t>(raw));
});
} catch (command_type_not_implemented& e) {}
}
void setup_sync_reader(
std::map<field, std::unique_ptr<GroupSyncReader>>& syncReaders,
std::unique_ptr<PortHandler>& portHandler,
std::unique_ptr<PacketHandler>& packetHandler,
const dynamixel::field_name& field_name,
std::function<void(uint32_t)> callback) {
field sr_field;
this->get_command(field_name, sr_field);
auto sync_reader_it = syncReaders.find(sr_field);
//if there is no reader for the command field create it
if (sync_reader_it == syncReaders.end()) {
sync_reader_it = syncReaders.insert(
sync_reader_it,
{sr_field,
std::make_unique<GroupSyncReader>(portHandler, packetHandler, sr_field)});
}
sync_reader_it->second->syncRead.addParam(this->id);
sync_reader_it->second->motors.push_back(this);
sync_reader_it->second->read_functions.push_back(
[this, sr_field, callback](GroupSyncRead& read) {
if (read.isAvailable(this->id, sr_field.address, sr_field.data_length)) {
uint32_t data = read.getData(this->id, sr_field.address, sr_field.data_length);
callback(data);
} else {
// No data available
}
});
}
typedef std::shared_ptr<Motor> Ptr;
};
std::map<std::string, Motor::Ptr> motors;
std::unique_ptr<PortHandler> portHandler;
std::unique_ptr<PacketHandler> packetHandler;
std::map<field, std::unique_ptr<GroupSyncReader>> initSyncReaders;
std::map<field, std::unique_ptr<GroupSyncReader>> syncReaders;
std::map<field, std::unique_ptr<GroupSyncWrite>> syncWrites;
model_t get_motor_type(motor_id id) const {
uint16_t model_number = 0;
uint8_t dxl_error = 0;
int dxl_comm_result = packetHandler->ping(portHandler.get(), id, &model_number, &dxl_error);
if (dxl_comm_result != COMM_SUCCESS) {
//error<<"Error pinging "<<m.joint_name<<" on id "<<std::to_string(id)<<":\n"<<packetHandler_->getRxPacketError(dxl_error);
//std::cerr << "Error pinging motor (id: " << std::to_string(id) << ") :\n"
// << packetHandler->getRxPacketError(dxl_error) << std::endl;
throw dynamixel_bus_error(id, std::string(packetHandler->getRxPacketError(dxl_error)));
}
return model_t(model_number);
}
/**
* Adds the data to a syncwrite (and creates one if there is none for the specific command field)
* @param command_field the field (i.e. address and data length of the controll table)
* @param data the data to write to the Dynamixel
* @return true if successfull, false otherwise
*/
bool add_syncwrite_data(motor_id id, field& command_field, uint8_t* data) {
auto find = syncWrites.find(command_field);
if (find == syncWrites.end()) {
find = syncWrites.insert(
find,
{command_field,
std::make_unique<GroupSyncWrite>(
portHandler.get(),
packetHandler.get(),
command_field.address,
command_field.data_length)});
}
find->second->addParam(id, data);
return true;
}
/**
* Searches the motor name given the id
* @param id the id to get the name for
* @return the name of the motor
*/
std::string get_motor_name_by_id(const motor_id& id) const {
for (const auto& [joint_name, motor_check] : motors) {
if (motor_check->id == id)
return joint_name;
}
throw std::invalid_argument(
"Joint with id \"" + std::to_string(id) +
"\" not added to driver!");
}
/**
* adds the specified motors torque state into the specific GroupSyncWriter
* to prepare it for the writeing over the bus.
*/
void set_torque(const dynamixel::Driver::Motor::Ptr& motor, bool torque) {
if(torque && motor->hw_error) {
if(motor->rebooting)
return;
throw hardware_error(motor, "Can't enable torque!");
}
field command_field;
motor->get_command(command::Torque_Enable, command_field);
uint32_t p = torque;
add_syncwrite_data(motor->id, command_field, reinterpret_cast<uint8_t*>(&p));
}
void reboot(dynamixel::Driver::Motor::Ptr& motor) {
motor->after_reboot_functions.clear();
motor->after_reboot_functions.push_back([this, motor](){
this->pid_gains(motor->name, motor->p_gain_target, motor->i_gain_target, motor->d_gain_target);
});
packetHandler->reboot(portHandler.get(),motor->id);
}
hw_error_callback overload_error_callback;
hw_error_callback input_voltage_error_callback;
hw_error_callback overheating_error_callback;
hw_error_callback electrical_shock_error_callback;
hw_error_callback motor_encoder_error_callback;
public:
struct command_type_not_implemented : public std::logic_error
{
command_type_not_implemented (const dynamixel::field_name type) : std::logic_error{"Command type \""+type+"\" not implemented."} {}
};
struct motor_type_not_implemented : public std::logic_error
{
motor_type_not_implemented (const model_t type) : std::logic_error{"Motor type \""+std::to_string(type)+"\" not implemented."} {}
};
struct torque_not_enabled : public std::runtime_error {
torque_not_enabled(const dynamixel::Driver::Motor::Ptr& motor) : std::runtime_error("torque of motor " + motor->name + "(id: " + std::to_string(motor->id) + ") is not enabled!") {}
};
struct dynamixel_bus_error : public std::runtime_error {
dynamixel_bus_error(const motor_id& id, std::string dynamixel_error) : std::runtime_error("dynamixel bus error while communicating with motor (id: " + std::to_string(id) + ") " + dynamixel_error) {}
};
struct hardware_error : public std::runtime_error {
hardware_error(const dynamixel::Driver::Motor::Ptr& motor, std::string consequence) : std::runtime_error("motor " + motor->name + "(id: " + std::to_string(motor->id) + ") has hardware error! " + consequence) {}
};
/**
* Dynamixel Driver class.
* Only works with dynmaixel-protocol version 2.0 as syncwrites and reads are used.
* @param port the name of the serial port the dynamixel actuators are connected to
* @param baudrate the baudrate of the dynamixels
*/
Driver(std::string port, uint baudrate = 57600) : portHandler(PortHandler::getPortHandler(port.c_str())),
packetHandler(PacketHandler::getPacketHandler(2.0f)) {
portHandler->setBaudRate(baudrate);
//syncRead = std::make_shared<GroupSyncRead>(portHandler_.get(),packetHandler_.get(),0,);
}
void set_hardware_error_callbacks(
hw_error_callback overload_error_callback = [](auto){},
hw_error_callback input_voltage_error_callback = [](auto){},
hw_error_callback overheating_error_callback = [](auto){},
hw_error_callback electrical_shock_error_callback = [](auto){},
hw_error_callback motor_encoder_error_callback = [](auto){}) {
this->overload_error_callback = overload_error_callback;
this->input_voltage_error_callback = input_voltage_error_callback;
this->overheating_error_callback = overheating_error_callback;
this->electrical_shock_error_callback = electrical_shock_error_callback;
this->motor_encoder_error_callback = motor_encoder_error_callback;
}
/**
* Tryes to add a motor to this driver by pinging the id over the bus.
* @param joint_name the name of the actuator
* @param id the id the dynamixel-actuator has
*/
void add_motor(const std::string& joint_name, motor_id id) {
for (const auto& [joint_name_check, motor_check] : motors) {
if (motor_check->id == id)
throw std::invalid_argument(
"Dynamixel-ID \"" + std::to_string(id) +
"\" already in use for joint \"" + joint_name_check + "\"!");
if (joint_name_check == joint_name)
if (id != motor_check->id)
throw std::invalid_argument(
"Can not add Dynamixel \"" + joint_name + "\" with id " + std::to_string(id) +
".\nIt is already added under different id (id:\"" + joint_name_check + "\")!");
}
auto type = get_motor_type(id);
if (type == model_t::None)
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
auto motor = std::make_shared<Motor>(id, joint_name, type);
motors[joint_name] = motor;
std::weak_ptr<Motor> weak_motor(motors[joint_name]);
motor->setup_init_sync_readers(initSyncReaders, portHandler, packetHandler);
motor->setup_sync_readers(syncReaders, portHandler, packetHandler,
[this, weak_motor](dynamixel::hardware_status s)
{
switch (s) {
case dynamixel::hardware_status::Overload_Error:
overload_error_callback(weak_motor.lock()->name);
break;
case dynamixel::hardware_status::Input_Voltage_Error:
input_voltage_error_callback(weak_motor.lock()->name);
break;
case dynamixel::hardware_status::OverHeating_Error:
overheating_error_callback(weak_motor.lock()->name);
break;
case dynamixel::hardware_status::Motor_Encoder_Error:
motor_encoder_error_callback(weak_motor.lock()->name);
break;
case dynamixel::hardware_status::Electrical_Shock_Error:
electrical_shock_error_callback(weak_motor.lock()->name);
break;
default:
break;
}
});
}
/**
* pushes the commands over the bus and reads the states back.
*/
void write(bool enable_torque = false) {
for (const auto& [joint_name, motor]: motors) {
if(motor->torque || enable_torque) {
if(std::isnan(motor->goal_position))
motor->goal_position = motor->position;
set_position(joint_name, motor->goal_position, enable_torque);
}
}
for (const auto& [field, syncWrite] : syncWrites) {
//(void)field;
/*auto result = */ syncWrite->txPacket();
//syncWrite->clearParam();
//std::cout<<"SyncWrite"<<std::endl;
//std::cout<<packetHandler->getTxRxResult(result)<<std::endl;
}
syncWrites.clear();
}
void read() {
bool init_done = true;
for (auto it = initSyncReaders.begin(); it != initSyncReaders.end(); std::advance(it,1)) {
const auto& [field, syncReader] = *it;
(void)field;
/*auto result = */ syncReader->syncRead.txRxPacket();
//std::cout<<packetHandler->getTxRxResult(result)<<std::endl;
for (auto read_f : syncReader->read_functions) {
read_f(syncReader->syncRead);
}
// If no motor is rebooting then the initial read was succesfull.
if (!std::all_of(syncReader->motors.cbegin(), syncReader->motors.cend(),
[](const auto& m){ return !m->rebooting; })) {
init_done = false;
}
}
for (const auto& [field, syncReader] : syncReaders) {
(void)field;
/*auto result = */ syncReader->syncRead.txRxPacket();
//std::cout<<packetHandler->getTxRxResult(result)<<std::endl;
for (auto read_f : syncReader->read_functions) {
read_f(syncReader->syncRead);
}
}
if(init_done) {
initSyncReaders.clear();
for (const auto& [joint_name, motor]: motors) {
if(motor->rebooted) {
motor->rebooted = false;
for(auto after_reboot_f:motor->after_reboot_functions) {
after_reboot_f();
}
}
}
}
}
/**
* Tries to ping every Dynamixel to check it they are reachable.
*/
void ping_all() const {
for (const auto& [joint_name, motor] : motors) {
get_motor_type(motor->id);
}
}
void for_each_joint(std::function<void(const std::string&)> f) {
for (const auto& [joint_name, motor]: motors) {
f(joint_name);
}
}
/**
* Garanteed to have the same order as
* get_positions, get_velocities and get_efforts.
*
* Use to construct the JointStates message.
*/
void get_joint_names(std::vector<std::string>& names) const {
names.resize(motors.size());
int i = 0;
for (const auto& [joint_name, motor] : motors) {
(void)joint_name;
names[i++] = joint_name;
}
}
double* get_goal_position_ptr(const std::string& joint_name) const {
auto find = motors.find(joint_name);
if (find == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
return &find->second->goal_position;
}
double* get_position_ptr(const std::string& joint_name) const {
auto find = motors.find(joint_name);
if (find == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
return &find->second->position;
}
double* get_velocity_ptr(const std::string& joint_name) const {
auto find = motors.find(joint_name);
if (find == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
return &find->second->velocity;
}
double* get_effort_ptr(const std::string& joint_name) const {
auto find = motors.find(joint_name);
if (find == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
return &find->second->effort;
}
/**
* Garanteed to have the same order as
* get_joint_names, get_velocities and get_efforts.
*
* Use to construct the JointStates message.
* @param positions the array that gets filled with the data
*/
void get_positions(std::vector<double>& positions) const {
positions.resize(motors.size());
int i = 0;
for (const auto& [joint_name, motor] : motors) {
(void)joint_name;
positions[i++] = motor->position;
}
}
/**
* Garanteed to have the same order as
* get_joint_names, get_positions and get_efforts.
*
* Use to construct the JointStates message.
* @param velocities the array that gets filled with the data
*/
void get_velocities(std::vector<double>& velocities) const {
velocities.resize(motors.size());
int i = 0;
for (const auto& [joint_name, motor] : motors) {
(void)joint_name;
velocities[i++] = motor->velocity;
}
}
/**
* Garanteed to have the same order as
* get_joint_names, get_positions and get_velocities.
*
* Use to construct the JointStates message.
* @param efforts the array that gets filled with the data
*/
void get_efforts(std::vector<double>& efforts) const {
efforts.resize(motors.size());
int i = 0;
for (const auto& [joint_name, motor] : motors) {
(void)joint_name;
efforts[i++] = 0;
}
}
/**
* returns the number of actuators this controller handles.
* add_motor(...) to add more.
* @return returns the number of actuators
*/
std::size_t get_motor_count() const {
return motors.size();
}
/**
* adds the specified motors torque state into the specific GroupSyncWriter
* to prepare it for the writeing over the bus.
* @param joint_name the name of the joint
* @param torque true -> enable torque, false -> no torque
*/
void set_torque(const std::string& joint_name, bool torque) {
auto it_motor = motors.find(joint_name);
if (it_motor == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
set_torque(it_motor->second, torque);
}
/**
* adds all motors torque state into the GroupSyncWriter
* to prepare it for the writeing over the bus.
* @param torque true -> enable torque, false -> no torque
*/
void set_torque_all(bool torque) {
std::map<Motor::Ptr,bool> old_torques;
try{
for (const auto& [joint_name, motor]: motors) {
old_torques[motor] = motor->torque;
set_torque(motor, torque);
}
} catch (hardware_error& e) {
for (const auto& [motor, old_torque]: old_torques) {
set_torque(motor, old_torque);
}
throw e;
}
}
/**
* adds the specified motors position-data into the specific GroupSyncWriter
* to prepare it for the writeing over the bus.
* @param joint_name the name of the joint
* @param position the position in radians the actuator should move to
* @param enable_torque enable the torque if not enabled already
*/
void set_position(const std::string& joint_name, double position, bool enable_torque = false) {
if (std::isnan(position))
throw std::invalid_argument("Can't set position to NaN for Joint \"" + joint_name + "\"!");
auto it_motor = motors.find(joint_name);
if (it_motor == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
auto motor = it_motor->second; //motors[joint_name];
if (!motor->torque) {
if (enable_torque)
set_torque(motor, true);
else
throw torque_not_enabled(motor);
}
field command_field;
motor->get_command(command::Goal_Position, command_field);
uint32_t p = motor->raw_from_position(position);
motor->goal_position = position;
add_syncwrite_data(motor->id, command_field, reinterpret_cast<uint8_t*>(&p));
}
/**
* Sets the pid gains for a motor. This is only possible after the first read
* as the initial values need to be read from the motor.
* @param joint_name the name of the joint
*/
void pid_gains(const std::string& joint_name, double& p, double& i, double& d) {
auto it_motor = motors.find(joint_name);
if (it_motor == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
auto motor = it_motor->second;
//std::cout <<joint_name<<" target p:"<< p<<"\ti:"<< i<<"\td:" << d << std::endl;
//std::cout <<joint_name<<" current p:"<< motor->p_gain<<"\ti:"<< motor->i_gain<<"\td:" << motor->d_gain << std::endl;
if(std::isnan( motor->p_gain) || std::isnan( motor->i_gain) || std::isnan( motor->d_gain)) {
throw std::invalid_argument("Cant set pid gains for \"" + joint_name + "\" because motor doesn't have pid control!");
}
if(std::isnan(p)) {
p = motor->p_gain;
}
if(std::isnan(i)) {
i = motor->i_gain;
}
if(std::isnan(d)) {
d = motor->d_gain;
}
if( p != motor->p_gain) {
field command_field;
motor->get_command(command::Position_P_Gain, command_field);
uint32_t data = static_cast<uint32_t>(p);
motor->p_gain_target = p;
add_syncwrite_data(motor->id, command_field, reinterpret_cast<uint8_t*>(&data));
}
if( i != motor->i_gain) {
field command_field;
motor->get_command(command::Position_I_Gain, command_field);
uint32_t data = static_cast<uint32_t>(i);
motor->i_gain_target = i;
add_syncwrite_data(motor->id, command_field, reinterpret_cast<uint8_t*>(&data));
}
if( d != motor->d_gain) {
field command_field;
motor->get_command(command::Position_D_Gain, command_field);
uint32_t data = static_cast<uint32_t>(d);
motor->d_gain_target = d;
add_syncwrite_data(motor->id, command_field, reinterpret_cast<uint8_t*>(&data));
}
}
/**
* reboots a joint.
* @param joint_name the name of the joint
*/
void reboot(const std::string& joint_name) {
auto it_motor = motors.find(joint_name);
if (it_motor == motors.end())
throw std::invalid_argument("Joint \"" + joint_name + "\" not added to driver!");
it_motor->second->rebooting = true;
reboot(it_motor->second);
}
};
} // namespace dynamixel
|
//=================================================================================================
/*!
// \file src/mathtest/smatsmatkron/HCbUCa.cpp
// \brief Source file for the HCbUCa sparse matrix/sparse matrix Kronecker product math test
//
// Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/CompressedMatrix.h>
#include <blaze/math/HermitianMatrix.h>
#include <blaze/math/UpperMatrix.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/smatsmatkron/OperationTest.h>
#include <blazetest/system/MathTest.h>
#ifdef BLAZE_USE_HPX_THREADS
# include <hpx/hpx_main.hpp>
#endif
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'HCbUCa'..." << std::endl;
using blazetest::mathtest::TypeA;
using blazetest::mathtest::TypeB;
try
{
// Matrix type definitions
using HCb = blaze::HermitianMatrix< blaze::CompressedMatrix<TypeB> >;
using UCa = blaze::UpperMatrix< blaze::CompressedMatrix<TypeA> >;
// Creator type definitions
using CHCb = blazetest::Creator<HCb>;
using CUCa = blazetest::Creator<UCa>;
// Running tests with small matrices
for( size_t i=0UL; i<=4UL; ++i ) {
for( size_t j=0UL; j<=i*i; ++j ) {
for( size_t k=0UL; k<=4UL; ++k ) {
for( size_t l=0UL; l<=UCa::maxNonZeros( k ); ++l ) {
RUN_SMATSMATKRON_OPERATION_TEST( CHCb( i, j ), CUCa( k, l ) );
}
}
}
}
// Running tests with large matrices
RUN_SMATSMATKRON_OPERATION_TEST( CHCb( 9UL, 7UL ), CUCa( 8UL, 7UL ) );
RUN_SMATSMATKRON_OPERATION_TEST( CHCb( 16UL, 7UL ), CUCa( 15UL, 7UL ) );
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during sparse matrix/sparse matrix Kronecker product:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
#ifndef BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED
#define BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Source: /physbam_repository/External_Libraries/Archives/boost/boost/mpl/limits/unrolling.hpp,v $
// $Date: 2007/02/12 18:25:36 $
// $Revision: 1.1 $
#if !defined(BOOST_MPL_LIMIT_UNROLLING)
# define BOOST_MPL_LIMIT_UNROLLING 4
#endif
#endif // BOOST_MPL_LIMITS_UNROLLING_HPP_INCLUDED
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/chromeos/login/wrong_hwid_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/oobe_screen.h"
#include "chrome/grit/generated_resources.h"
#include "components/login/localized_values_builder.h"
namespace {
const char kJsScreenPath[] = "login.WrongHWIDScreen";
} // namespace
namespace chromeos {
WrongHWIDScreenHandler::WrongHWIDScreenHandler()
: BaseScreenHandler(kJsScreenPath),
delegate_(NULL),
show_on_init_(false) {
}
WrongHWIDScreenHandler::~WrongHWIDScreenHandler() {
if (delegate_)
delegate_->OnActorDestroyed(this);
}
void WrongHWIDScreenHandler::PrepareToShow() {
}
void WrongHWIDScreenHandler::Show() {
if (!page_is_ready()) {
show_on_init_ = true;
return;
}
ShowScreen(OobeScreen::SCREEN_WRONG_HWID);
}
void WrongHWIDScreenHandler::Hide() {
}
void WrongHWIDScreenHandler::SetDelegate(Delegate* delegate) {
delegate_ = delegate;
if (page_is_ready())
Initialize();
}
void WrongHWIDScreenHandler::DeclareLocalizedValues(
::login::LocalizedValuesBuilder* builder) {
builder->Add("wrongHWIDScreenHeader", IDS_WRONG_HWID_SCREEN_HEADER);
builder->Add("wrongHWIDMessageFirstPart",
IDS_WRONG_HWID_SCREEN_MESSAGE_FIRST_PART);
builder->Add("wrongHWIDMessageSecondPart",
IDS_WRONG_HWID_SCREEN_MESSAGE_SECOND_PART);
builder->Add("wrongHWIDScreenSkipLink",
IDS_WRONG_HWID_SCREEN_SKIP_LINK);
}
void WrongHWIDScreenHandler::Initialize() {
if (!page_is_ready() || !delegate_)
return;
if (show_on_init_) {
Show();
show_on_init_ = false;
}
}
void WrongHWIDScreenHandler::RegisterMessages() {
AddCallback("wrongHWIDOnSkip", &WrongHWIDScreenHandler::HandleOnSkip);
}
void WrongHWIDScreenHandler::HandleOnSkip() {
if (delegate_)
delegate_->OnExit();
}
} // namespace chromeos
|
#pragma once
#include "solid_box/all.hpp"
#include "solid_uv_sphere/solid_uv_sphere.hpp"
#include "wire_axes/all.hpp"
|
/*
* Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tencentcloud/dbbrain/v20191016/model/ModifyDiagDBInstanceConfResponse.h>
#include <tencentcloud/core/utils/rapidjson/document.h>
#include <tencentcloud/core/utils/rapidjson/writer.h>
#include <tencentcloud/core/utils/rapidjson/stringbuffer.h>
using TencentCloud::CoreInternalOutcome;
using namespace TencentCloud::Dbbrain::V20191016::Model;
using namespace std;
ModifyDiagDBInstanceConfResponse::ModifyDiagDBInstanceConfResponse()
{
}
CoreInternalOutcome ModifyDiagDBInstanceConfResponse::Deserialize(const string &payload)
{
rapidjson::Document d;
d.Parse(payload.c_str());
if (d.HasParseError() || !d.IsObject())
{
return CoreInternalOutcome(Error("response not json format"));
}
if (!d.HasMember("Response") || !d["Response"].IsObject())
{
return CoreInternalOutcome(Error("response `Response` is null or not object"));
}
rapidjson::Value &rsp = d["Response"];
if (!rsp.HasMember("RequestId") || !rsp["RequestId"].IsString())
{
return CoreInternalOutcome(Error("response `Response.RequestId` is null or not string"));
}
string requestId(rsp["RequestId"].GetString());
SetRequestId(requestId);
if (rsp.HasMember("Error"))
{
if (!rsp["Error"].IsObject() ||
!rsp["Error"].HasMember("Code") || !rsp["Error"]["Code"].IsString() ||
!rsp["Error"].HasMember("Message") || !rsp["Error"]["Message"].IsString())
{
return CoreInternalOutcome(Error("response `Response.Error` format error").SetRequestId(requestId));
}
string errorCode(rsp["Error"]["Code"].GetString());
string errorMsg(rsp["Error"]["Message"].GetString());
return CoreInternalOutcome(Error(errorCode, errorMsg).SetRequestId(requestId));
}
return CoreInternalOutcome(true);
}
|
/****************************************************************************
*
* Copyright (c) 2013-2020 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#include "TERARANGER.hpp"
#include "TERARANGER.cpp"
#include <px4_platform_common/getopt.h>
#include <px4_platform_common/module.h>
void
TERARANGER::print_usage()
{
PRINT_MODULE_DESCRIPTION(
R"DESCR_STR(
### Description
I2C bus driver for TeraRanger rangefinders.
The sensor/driver must be enabled using the parameter SENS_EN_TRANGER.
Setup/usage information: https://docs.px4.io/master/en/sensor/rangefinders.html#teraranger-rangefinders
)DESCR_STR");
PRINT_MODULE_USAGE_NAME("teraranger", "driver");
PRINT_MODULE_USAGE_SUBCATEGORY("distance_sensor");
PRINT_MODULE_USAGE_COMMAND("start");
PRINT_MODULE_USAGE_PARAMS_I2C_SPI_DRIVER(true, false);
PRINT_MODULE_USAGE_PARAM_INT('R', 25, 0, 25, "Sensor rotation - downward facing by default", true);
PRINT_MODULE_USAGE_DEFAULT_COMMANDS();
}
I2CSPIDriverBase *TERARANGER::instantiate(const BusCLIArguments &cli, const BusInstanceIterator &iterator,
int runtime_instance)
{
TERARANGER *instance = new TERARANGER(iterator.configuredBusOption(), iterator.bus(), cli.orientation, cli.bus_frequency);
if (instance == nullptr) {
PX4_ERR("alloc failed");
return nullptr;
}
if (instance->init() != PX4_OK) {
delete instance;
return nullptr;
}
instance->start();
//int a = instance->probe();
//if (a){
//printf("Is there a Teraranger? %d\n",a);
//}
return instance;
}
extern "C" __EXPORT int teraranger_main(int argc, char *argv[])
{
int ch;
using ThisDriver = TERARANGER;
BusCLIArguments cli{true, false};
cli.orientation = distance_sensor_s::ROTATION_DOWNWARD_FACING;
cli.default_i2c_frequency = 00000;
//int probing = ThisDriver::probe();
//printf("Is there a Teraranger? %d\n",probing);
while ((ch = cli.getopt(argc, argv, "R:")) != EOF) {
switch (ch) {
case 'R':
cli.orientation = atoi(cli.optarg());
break;
}
}
const char *verb = cli.optarg();
if (!verb) {
ThisDriver::print_usage();
return -1;
}
BusInstanceIterator iterator(MODULE_NAME, cli, DRV_DIST_DEVTYPE_TERARANGER);
if (!strcmp(verb, "start")) {
static int a {-1};
//while(a == -1){
a = ThisDriver::module_start(cli, iterator);
printf(" Start module failed? %d \n",a);
//}
return a;//ThisDriver::module_start(cli, iterator);
}
if (!strcmp(verb, "stop")) {
return ThisDriver::module_stop(iterator);
}
if (!strcmp(verb, "status")) {
return ThisDriver::module_status(iterator);
}
ThisDriver::print_usage();
return -1;
}
|
class Solution {
public:
int missingNumber(vector<int>& nums) {
int n = nums.size();
n = n*(n+1)/2;
for(auto i: nums) n-=i;
return n;
}
};
|
/// \file
// Range v3 library
//
// Copyright Eric Niebler 2014-present
//
// Use, modification and distribution is subject to the
// Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Project home: https://github.com/ericniebler/range-v3
//
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef RANGES_V3_ALGORITHM_NTH_ELEMENT_HPP
#define RANGES_V3_ALGORITHM_NTH_ELEMENT_HPP
#include <utility>
#include <range/v3/range_fwd.hpp>
#include <range/v3/algorithm/min_element.hpp>
#include <range/v3/functional/comparisons.hpp>
#include <range/v3/functional/identity.hpp>
#include <range/v3/functional/invoke.hpp>
#include <range/v3/iterator/concepts.hpp>
#include <range/v3/iterator/operations.hpp>
#include <range/v3/iterator/traits.hpp>
#include <range/v3/range/access.hpp>
#include <range/v3/range/concepts.hpp>
#include <range/v3/range/dangling.hpp>
#include <range/v3/range/traits.hpp>
#include <range/v3/utility/static_const.hpp>
#include <range/v3/utility/swap.hpp>
#include <range/v3/detail/prologue.hpp>
namespace ranges
{
/// \cond
namespace detail
{
// stable, 2-3 compares, 0-2 swaps
template(typename I, typename C, typename P)(
/// \pre
requires forward_iterator<I> AND indirect_relation<C, projected<I, P>>)
unsigned sort3(I x, I y, I z, C & pred, P & proj)
{
unsigned r = 0;
if(!invoke(pred, invoke(proj, *y), invoke(proj, *x))) // if x <= y
{
if(!invoke(pred, invoke(proj, *z), invoke(proj, *y))) // if y <= z
return r; // x <= y && y <= z
// x <= y && y > z
ranges::iter_swap(y, z); // x <= z && y < z
r = 1;
if(invoke(pred, invoke(proj, *y), invoke(proj, *x))) // if x > y
{
ranges::iter_swap(x, y); // x < y && y <= z
r = 2;
}
return r; // x <= y && y < z
}
if(invoke(pred, invoke(proj, *z), invoke(proj, *y))) // x > y, if y > z
{
ranges::iter_swap(x, z); // x < y && y < z
r = 1;
return r;
}
ranges::iter_swap(x, y); // x > y && y <= z
r = 1; // x < y && x <= z
if(invoke(pred, invoke(proj, *z), invoke(proj, *y))) // if y > z
{
ranges::iter_swap(y, z); // x <= y && y < z
r = 2;
}
return r;
} // x <= y && y <= z
template(typename I, typename C, typename P)(
/// \pre
requires bidirectional_iterator<I> AND indirect_relation<C, projected<I, P>>)
void selection_sort(I first, I last, C & pred, P & proj)
{
RANGES_EXPECT(first != last);
for(I lm1 = ranges::prev(last); first != lm1; ++first)
{
I i = ranges::min_element(first, last, std::ref(pred), std::ref(proj));
if(i != first)
ranges::iter_swap(first, i);
}
}
} // namespace detail
/// \endcond
/// \addtogroup group-algorithms
/// @{
RANGES_FUNC_BEGIN(nth_element)
/// \brief function template \c nth_element
template(typename I, typename S, typename C = less, typename P = identity)(
/// \pre
requires random_access_iterator<I> AND sortable<I, C, P>)
I RANGES_FUNC(nth_element)(
I first, I nth, S end_, C pred = C{}, P proj = P{}) //
{
I last = ranges::next(nth, end_), end_orig = last;
// C is known to be a reference type
using difference_type = iter_difference_t<I>;
difference_type const limit = 7;
while(true)
{
restart:
if(nth == last)
return end_orig;
difference_type len = last - first;
switch(len)
{
case 0:
case 1:
return end_orig;
case 2:
if(invoke(pred, invoke(proj, *--last), invoke(proj, *first)))
ranges::iter_swap(first, last);
return end_orig;
case 3:
{
I m = first;
detail::sort3(first, ++m, --last, pred, proj);
return end_orig;
}
}
if(len <= limit)
{
detail::selection_sort(first, last, pred, proj);
return end_orig;
}
// len > limit >= 3
I m = first + len / 2;
I lm1 = last;
unsigned n_swaps = detail::sort3(first, m, --lm1, pred, proj);
// *m is median
// partition [first, m) < *m and *m <= [m, last)
//(this inhibits tossing elements equivalent to m around unnecessarily)
I i = first;
I j = lm1;
// j points beyond range to be tested, *lm1 is known to be <= *m
// The search going up is known to be guarded but the search coming down
// isn't. Prime the downward search with a guard.
if(!invoke(pred, invoke(proj, *i), invoke(proj, *m))) // if *first == *m
{
// *first == *m, *first doesn't go in first part
// manually guard downward moving j against i
while(true)
{
if(i == --j)
{
// *first == *m, *m <= all other elements
// Parition instead into [first, i) == *first and *first < [i,
// last)
++i; // first + 1
j = last;
if(!invoke(
pred,
invoke(proj, *first),
invoke(
proj,
*--j))) // we need a guard if *first == *(last-1)
{
while(true)
{
if(i == j)
return end_orig; // [first, last) all equivalent
// elements
if(invoke(
pred, invoke(proj, *first), invoke(proj, *i)))
{
ranges::iter_swap(i, j);
++n_swaps;
++i;
break;
}
++i;
}
}
// [first, i) == *first and *first < [j, last) and j == last -
// 1
if(i == j)
return end_orig;
while(true)
{
while(
!invoke(pred, invoke(proj, *first), invoke(proj, *i)))
++i;
while(invoke(
pred, invoke(proj, *first), invoke(proj, *--j)))
;
if(i >= j)
break;
ranges::iter_swap(i, j);
++n_swaps;
++i;
}
// [first, i) == *first and *first < [i, last)
// The first part is sorted,
if(nth < i)
return end_orig;
// nth_element the second part
// nth_element<C>(i, nth, last, pred);
first = i;
goto restart;
}
if(invoke(pred, invoke(proj, *j), invoke(proj, *m)))
{
ranges::iter_swap(i, j);
++n_swaps;
break; // found guard for downward moving j, now use unguarded
// partition
}
}
}
++i;
// j points beyond range to be tested, *lm1 is known to be <= *m
// if not yet partitioned...
if(i < j)
{
// known that *(i - 1) < *m
while(true)
{
// m still guards upward moving i
while(invoke(pred, invoke(proj, *i), invoke(proj, *m)))
++i;
// It is now known that a guard exists for downward moving j
while(!invoke(pred, invoke(proj, *--j), invoke(proj, *m)))
;
if(i >= j)
break;
ranges::iter_swap(i, j);
++n_swaps;
// It is known that m != j
// If m just moved, follow it
if(m == i)
m = j;
++i;
}
}
// [first, i) < *m and *m <= [i, last)
if(i != m && invoke(pred, invoke(proj, *m), invoke(proj, *i)))
{
ranges::iter_swap(i, m);
++n_swaps;
}
// [first, i) < *i and *i <= [i+1, last)
if(nth == i)
return end_orig;
if(n_swaps == 0)
{
// We were given a perfectly partitioned sequence. Coincidence?
if(nth < i)
{
// Check for [first, i) already sorted
j = m = first;
while(++j != i)
{
if(invoke(pred, invoke(proj, *j), invoke(proj, *m)))
// not yet sorted, so sort
goto not_sorted;
m = j;
}
// [first, i) sorted
return end_orig;
}
else
{
// Check for [i, last) already sorted
j = m = i;
while(++j != last)
{
if(invoke(pred, invoke(proj, *j), invoke(proj, *m)))
// not yet sorted, so sort
goto not_sorted;
m = j;
}
// [i, last) sorted
return end_orig;
}
}
not_sorted:
// nth_element on range containing nth
if(nth < i)
{
// nth_element<C>(first, nth, i, pred);
last = i;
}
else
{
// nth_element<C>(i+1, nth, last, pred);
first = ++i;
}
}
return end_orig;
}
/// \overload
template(typename Rng, typename C = less, typename P = identity)(
/// \pre
requires random_access_range<Rng> AND sortable<iterator_t<Rng>, C, P>)
borrowed_iterator_t<Rng> RANGES_FUNC(nth_element)(
Rng && rng, iterator_t<Rng> nth, C pred = C{}, P proj = P{}) //
{
return (*this)(
begin(rng), std::move(nth), end(rng), std::move(pred), std::move(proj));
}
RANGES_FUNC_END(nth_element)
namespace cpp20
{
using ranges::nth_element;
}
/// @}
} // namespace ranges
#include <range/v3/detail/epilogue.hpp>
#endif
|
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/wasm-code-manager.h"
#include <iomanip>
#include "src/base/build_config.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/common/globals.h"
#include "src/diagnostics/disassembler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
#include "src/utils/vector.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module-sourcemap.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#if defined(V8_OS_WIN64)
#include "src/base/platform/wrappers.h"
#include "src/diagnostics/unwinding-info-win64.h"
#endif // V8_OS_WIN64
#define TRACE_HEAP(...) \
do { \
if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
} while (false)
namespace v8 {
namespace internal {
namespace wasm {
using trap_handler::ProtectedInstructionData;
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
#endif
base::AddressRegion DisjointAllocationPool::Merge(
base::AddressRegion new_region) {
// Find the possible insertion position by identifying the first region whose
// start address is not less than that of {new_region}. Since there cannot be
// any overlap between regions, this also means that the start of {above} is
// bigger or equal than the *end* of {new_region}.
auto above = regions_.lower_bound(new_region);
DCHECK(above == regions_.end() || above->begin() >= new_region.end());
// Check whether to merge with {above}.
if (above != regions_.end() && new_region.end() == above->begin()) {
base::AddressRegion merged_region{new_region.begin(),
new_region.size() + above->size()};
DCHECK_EQ(merged_region.end(), above->end());
// Check whether to also merge with the region below.
if (above != regions_.begin()) {
auto below = above;
--below;
if (below->end() == new_region.begin()) {
merged_region = {below->begin(), below->size() + merged_region.size()};
regions_.erase(below);
}
}
auto insert_pos = regions_.erase(above);
regions_.insert(insert_pos, merged_region);
return merged_region;
}
// No element below, and not adjavent to {above}: insert and done.
if (above == regions_.begin()) {
regions_.insert(above, new_region);
return new_region;
}
auto below = above;
--below;
// Consistency check:
DCHECK(above == regions_.end() || below->end() < above->begin());
// Adjacent to {below}: merge and done.
if (below->end() == new_region.begin()) {
base::AddressRegion merged_region{below->begin(),
below->size() + new_region.size()};
DCHECK_EQ(merged_region.end(), new_region.end());
regions_.erase(below);
regions_.insert(above, merged_region);
return merged_region;
}
// Not adjacent to any existing region: insert between {below} and {above}.
DCHECK_LT(below->end(), new_region.begin());
regions_.insert(above, new_region);
return new_region;
}
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
return AllocateInRegion(size,
{kNullAddress, std::numeric_limits<size_t>::max()});
}
base::AddressRegion DisjointAllocationPool::AllocateInRegion(
size_t size, base::AddressRegion region) {
// Get an iterator to the first contained region whose start address is not
// smaller than the start address of {region}. Start the search from the
// region one before that (the last one whose start address is smaller).
auto it = regions_.lower_bound(region);
if (it != regions_.begin()) --it;
for (auto end = regions_.end(); it != end; ++it) {
base::AddressRegion overlap = it->GetOverlap(region);
if (size > overlap.size()) continue;
base::AddressRegion ret{overlap.begin(), size};
base::AddressRegion old = *it;
auto insert_pos = regions_.erase(it);
if (size == old.size()) {
// We use the full region --> nothing to add back.
} else if (ret.begin() == old.begin()) {
// We return a region at the start --> shrink old region from front.
regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
} else if (ret.end() == old.end()) {
// We return a region at the end --> shrink remaining region.
regions_.insert(insert_pos, {old.begin(), old.size() - size});
} else {
// We return something in the middle --> split the remaining region
// (insert the region with smaller address first).
regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
}
return ret;
}
return {};
}
Address WasmCode::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
if (constant_pool_offset_ < code_comments_offset_) {
return instruction_start() + constant_pool_offset_;
}
}
return kNullAddress;
}
Address WasmCode::handler_table() const {
return instruction_start() + handler_table_offset_;
}
int WasmCode::handler_table_size() const {
DCHECK_GE(constant_pool_offset_, handler_table_offset_);
return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
}
Address WasmCode::code_comments() const {
return instruction_start() + code_comments_offset_;
}
int WasmCode::code_comments_size() const {
DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
}
std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
std::initializer_list<Vector<const byte>> vectors) {
size_t total_size = 0;
for (auto& vec : vectors) total_size += vec.size();
// Use default-initialization (== no initialization).
std::unique_ptr<byte[]> result{new byte[total_size]};
byte* ptr = result.get();
for (auto& vec : vectors) {
if (vec.empty()) continue; // Avoid nullptr in {memcpy}.
base::Memcpy(ptr, vec.begin(), vec.size());
ptr += vec.size();
}
return result;
}
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!has_trap_handler_index());
if (kind() != WasmCode::kFunction) return;
if (protected_instructions_size_ == 0) return;
Address base = instruction_start();
size_t size = instructions().size();
auto protected_instruction_data = this->protected_instructions();
const int index =
RegisterHandlerData(base, size, protected_instruction_data.size(),
protected_instruction_data.begin());
// TODO(eholk): if index is negative, fail.
CHECK_LE(0, index);
set_trap_handler_index(index);
DCHECK(has_trap_handler_index());
}
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
// The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
// to call {WasmEngine::EnableCodeLogging} if this return value would change
// for any isolate. Otherwise we might lose code events.
return isolate->logger()->is_listening_to_code_events() ||
isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
isolate->is_profiling();
}
void WasmCode::LogCode(Isolate* isolate, const char* source_url,
int script_id) const {
DCHECK(ShouldBeLogged(isolate));
if (IsAnonymous()) return;
ModuleWireBytes wire_bytes(native_module_->wire_bytes());
const WasmModule* module = native_module_->module();
WireBytesRef name_ref = module->lazily_generated_names.LookupFunctionName(
wire_bytes, index(), VectorOf(module->export_table));
WasmName name = wire_bytes.GetNameOrNull(name_ref);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
auto source_map = native_module_->GetWasmSourceMap();
if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
!debug_symbols.external_url.is_empty() && load_wasm_source_map) {
WasmName external_url =
wire_bytes.GetNameOrNull(debug_symbols.external_url);
std::string external_url_string(external_url.data(), external_url.size());
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Local<v8::String> source_map_str =
load_wasm_source_map(v8_isolate, external_url_string.c_str());
native_module_->SetWasmSourceMap(
std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
std::string name_buffer;
if (kind() == kWasmToJsWrapper) {
name_buffer = "wasm-to-js:";
size_t prefix_len = name_buffer.size();
constexpr size_t kMaxSigLength = 128;
name_buffer.resize(prefix_len + kMaxSigLength);
const FunctionSig* sig = module->functions[index_].sig;
size_t sig_length =
PrintSignature(VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
name_buffer.resize(prefix_len + sig_length);
// If the import has a name, also append that (separated by "-").
if (!name.empty()) {
name_buffer += '-';
name_buffer.append(name.begin(), name.size());
}
name = VectorOf(name_buffer);
} else if (name.empty()) {
name_buffer.resize(32);
name_buffer.resize(
SNPrintF(VectorOf(&name_buffer.front(), name_buffer.size()),
"wasm-function[%d]", index()));
name = VectorOf(name_buffer);
}
// TODO(clemensb): Remove this #if once this compilation unit is excluded in
// no-wasm builds.
#if V8_ENABLE_WEBASSEMBLY
int code_offset = module->functions[index_].code.offset();
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
source_url, code_offset, script_id));
#endif // V8_ENABLE_WEBASSEMBLY
if (!source_positions().empty()) {
LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
source_positions()));
}
}
void WasmCode::Validate() const {
#ifdef DEBUG
// Scope for foreign WasmCode pointers.
WasmCodeRefScope code_ref_scope;
// We expect certain relocation info modes to never appear in {WasmCode}
// objects or to be restricted to a small set of valid values. Hence the
// iteration below does not use a mask, but visits all relocation data.
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::WASM_CALL: {
Address target = it.rinfo()->wasm_call_address();
WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code);
CHECK_EQ(WasmCode::kJumpTable, code->kind());
CHECK(code->contains(target));
break;
}
case RelocInfo::WASM_STUB_CALL: {
Address target = it.rinfo()->wasm_stub_call_address();
WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code);
CHECK_EQ(WasmCode::kJumpTable, code->kind());
CHECK(code->contains(target));
break;
}
case RelocInfo::INTERNAL_REFERENCE:
case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
Address target = it.rinfo()->target_internal_reference();
CHECK(contains(target));
break;
}
case RelocInfo::EXTERNAL_REFERENCE:
case RelocInfo::CONST_POOL:
case RelocInfo::VENEER_POOL:
// These are OK to appear.
break;
default:
FATAL("Unexpected mode: %d", mode);
}
}
#endif
}
void WasmCode::MaybePrint(const char* name) const {
// Determines whether flags want this code to be printed.
bool function_index_matches =
(!IsAnonymous() &&
FLAG_print_wasm_code_function_index == static_cast<int>(index()));
if (FLAG_print_code ||
(kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
: FLAG_print_wasm_stub_code)) {
Print(name);
}
}
void WasmCode::Print(const char* name) const {
StdoutStream os;
os << "--- WebAssembly code ---\n";
Disassemble(name, os);
if (native_module_->HasDebugInfo()) {
if (auto* debug_side_table =
native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
debug_side_table->Print(os);
}
}
os << "--- End code ---\n";
}
void WasmCode::Disassemble(const char* name, std::ostream& os,
Address current_pc) const {
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
size_t padding = instructions().size() - unpadded_binary_size_;
os << "Body (size = " << instructions().size() << " = "
<< unpadded_binary_size_ << " + " << padding << " padding)\n";
#ifdef ENABLE_DISASSEMBLER
int instruction_size = unpadded_binary_size_;
if (constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
}
if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
instruction_size = safepoint_table_offset_;
}
if (handler_table_offset_ < instruction_size) {
instruction_size = handler_table_offset_;
}
DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
Disassembler::Decode(nullptr, &os, instructions().begin(),
instructions().begin() + instruction_size,
CodeReference(this), current_pc);
os << "\n";
if (handler_table_size() > 0) {
HandlerTable table(this);
os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
<< "):\n";
table.HandlerTableReturnPrint(os);
os << "\n";
}
if (protected_instructions_size_ > 0) {
os << "Protected instructions:\n pc offset land pad\n";
for (auto& data : protected_instructions()) {
os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
<< std::hex << data.landing_offset << "\n";
}
os << "\n";
}
if (!source_positions().empty()) {
os << "Source positions:\n pc offset position\n";
for (SourcePositionTableIterator it(source_positions()); !it.done();
it.Advance()) {
os << std::setw(10) << std::hex << it.code_offset() << std::dec
<< std::setw(10) << it.source_position().ScriptOffset()
<< (it.is_statement() ? " statement" : "") << "\n";
}
os << "\n";
}
if (safepoint_table_offset_ > 0) {
SafepointTable table(this);
os << "Safepoints (size = " << table.size() << ")\n";
for (uint32_t i = 0; i < table.length(); i++) {
uintptr_t pc_offset = table.GetPcOffset(i);
os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
os << std::setw(6) << std::hex << pc_offset << " " << std::dec;
table.PrintEntry(i, os);
os << " (sp -> fp)";
SafepointEntry entry = table.GetEntry(i);
if (entry.trampoline_pc() != SafepointEntry::kNoTrampolinePC) {
os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
}
if (entry.has_register_bits()) {
os << " registers: ";
uint32_t register_bits = entry.register_bits();
int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
for (int i = bits - 1; i >= 0; --i) {
os << ((register_bits >> i) & 1);
}
}
os << "\n";
}
os << "\n";
}
os << "RelocInfo (size = " << reloc_info().size() << ")\n";
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
it.rinfo()->Print(nullptr, os);
}
os << "\n";
#endif // ENABLE_DISASSEMBLER
}
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
case WasmCode::kFunction:
return "wasm function";
case WasmCode::kWasmToCapiWrapper:
return "wasm-to-capi";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
case WasmCode::kJumpTable:
return "jump table";
}
return "unknown kind";
}
WasmCode::~WasmCode() {
if (has_trap_handler_index()) {
trap_handler::ReleaseHandlerData(trap_handler_index());
}
}
V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
// The code just became potentially dead. The ref count we wanted to
// decrement is now transferred to the set of potentially dead code, and
// will be decremented when the next GC is run.
return false;
}
// If we reach here, the code was already potentially dead. Decrement the ref
// count, and return true if it drops to zero.
return DecRefOnDeadCode();
}
// static
void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
// Decrement the ref counter of all given code objects. Keep the ones whose
// ref count drops to zero.
WasmEngine::DeadCodeMap dead_code;
WasmEngine* engine = nullptr;
for (WasmCode* code : code_vec) {
if (!code->DecRef()) continue; // Remaining references.
dead_code[code->native_module()].push_back(code);
if (!engine) engine = code->native_module()->engine();
DCHECK_EQ(engine, code->native_module()->engine());
}
DCHECK_EQ(dead_code.empty(), engine == nullptr);
if (engine) engine->FreeDeadCode(dead_code);
}
int WasmCode::GetSourcePositionBefore(int offset) {
int position = kNoSourcePosition;
for (SourcePositionTableIterator iterator(source_positions());
!iterator.done() && iterator.code_offset() < offset;
iterator.Advance()) {
position = iterator.source_position().ScriptOffset();
}
return position;
}
WasmCodeAllocator::OptionalLock::~OptionalLock() {
if (allocator_) allocator_->mutex_.Unlock();
}
void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
DCHECK(!is_locked());
allocator_ = allocator;
allocator->mutex_.Lock();
}
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
owned_code_space_.emplace_back(std::move(code_space));
async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
code_manager_->FreeNativeModule(VectorOf(owned_code_space_),
committed_code_space());
}
void WasmCodeAllocator::Init(NativeModule* native_module) {
DCHECK_EQ(1, owned_code_space_.size());
native_module->AddCodeSpace(owned_code_space_[0].region(), {});
}
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
// memory, we append that memory at the end of the owned_code_space_ list, we
// traverse that list in reverse order to find the reservation(s) that guide how
// to chunk the region to commit.
#if V8_OS_WIN
constexpr bool kNeedsToSplitRangeByReservations = true;
#else
constexpr bool kNeedsToSplitRangeByReservations = false;
#endif
base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
base::AddressRegion range,
const std::vector<VirtualMemory>& owned_code_space) {
if (!kNeedsToSplitRangeByReservations) return {range};
base::SmallVector<base::AddressRegion, 1> split_ranges;
size_t missing_begin = range.begin();
size_t missing_end = range.end();
for (auto& vmem : base::Reversed(owned_code_space)) {
Address overlap_begin = std::max(missing_begin, vmem.address());
Address overlap_end = std::min(missing_end, vmem.end());
if (overlap_begin >= overlap_end) continue;
split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
// Opportunistically reduce the missing range. This might terminate the loop
// early.
if (missing_begin == overlap_begin) missing_begin = overlap_end;
if (missing_end == overlap_end) missing_end = overlap_begin;
if (missing_begin >= missing_end) break;
}
#ifdef ENABLE_SLOW_DCHECKS
// The returned vector should cover the full range.
size_t total_split_size = 0;
for (auto split : split_ranges) total_split_size += split.size();
DCHECK_EQ(range.size(), total_split_size);
#endif
return split_ranges;
}
int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
? static_cast<int>(num_declared_functions)
: 0;
}
// Returns an overapproximation of the code size overhead per new code space
// created by the jump tables.
size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
// Overhead for the jump table.
size_t overhead = RoundUp<kCodeAlignment>(
JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
#if defined(V8_OS_WIN64)
// On Win64, we need to reserve some pages at the beginning of an executable
// space. See {AddCodeSpace}.
overhead += Heap::GetCodeRangeReservedAreaSize();
#endif // V8_OS_WIN64
// Overhead for the far jump table.
overhead +=
RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_declared_functions)));
return overhead;
}
size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
size_t total_reserved) {
size_t overhead = OverheadPerCodeSpace(num_declared_functions);
// Reserve a power of two at least as big as any of
// a) needed size + overhead (this is the minimum needed)
// b) 2 * overhead (to not waste too much space by overhead)
// c) 1/4 of current total reservation size (to grow exponentially)
size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
2 * overhead),
total_reserved / 4));
// Limit by the maximum supported code space size.
return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
}
} // namespace
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
WasmCodeAllocator::OptionalLock{});
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
NativeModule* native_module, size_t size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& optional_lock) {
OptionalLock new_lock;
if (!optional_lock.is_locked()) new_lock.Lock(this);
const auto& locked_lock =
optional_lock.is_locked() ? optional_lock : new_lock;
DCHECK(locked_lock.is_locked());
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size = RoundUp<kCodeAlignment>(size);
base::AddressRegion code_space =
free_code_space_.AllocateInRegion(size, region);
if (V8_UNLIKELY(code_space.is_empty())) {
// Only allocations without a specific region are allowed to fail. Otherwise
// the region must have been allocated big enough to hold all initial
// allocations (jump tables etc).
CHECK_EQ(kUnrestrictedRegion, region);
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
size_t total_reserved = 0;
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
size_t reserve_size = ReservationSize(
size, native_module->module()->num_declared_functions, total_reserved);
VirtualMemory new_mem =
code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
UNREACHABLE();
}
base::AddressRegion new_region = new_mem.region();
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
native_module->AddCodeSpace(new_region, locked_lock);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
async_counters_->wasm_module_num_code_spaces()->AddSample(
static_cast<int>(owned_code_space_.size()));
}
const Address commit_page_size = page_allocator->CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
Address commit_end = RoundUp(code_space.end(), commit_page_size);
// {commit_start} will be either code_space.start or the start of the next
// page. {commit_end} will be the start of the page after the one in which
// the allocation ends.
// We start from an aligned start, and we know we allocated vmem in
// page multiples.
// We just need to commit what's not committed. The page in which we
// start is already committed (or we start at the beginning of a page).
// The end needs to be committed all through the end of the page.
if (commit_start < commit_end) {
for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
{commit_start, commit_end - commit_start}, owned_code_space_)) {
code_manager_->Commit(split_range);
}
committed_code_space_.fetch_add(commit_end - commit_start);
// Committed code cannot grow bigger than maximum code space size.
DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
allocated_code_space_.Merge(code_space);
generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
code_space.begin(), size);
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
bool WasmCodeAllocator::SetExecutable(bool executable) {
base::MutexGuard lock(&mutex_);
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
if (FLAG_wasm_write_protect_code_memory) {
PageAllocator::Permission permission =
executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
// reservation.
// For now, in that case, we commit at reserved memory granularity.
// Technically, that may be a waste, because we may reserve more than we
// use. On 32-bit though, the scarce resource is the address space -
// committed or not.
for (auto& vmem : owned_code_space_) {
if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
executable);
}
#else // V8_OS_WIN
size_t commit_page_size = page_allocator->CommitPageSize();
for (auto& region : allocated_code_space_.regions()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
size_t region_size = RoundUp(region.size(), commit_page_size);
if (!SetPermissions(page_allocator, region.begin(), region_size,
permission)) {
return false;
}
TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
region.begin(), region.end(), executable);
}
#endif // V8_OS_WIN
}
is_executable_ = executable;
return true;
}
void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
size_t code_size = 0;
CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) {
ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(),
code->instructions().size());
code_size += code->instructions().size();
freed_regions.Merge(base::AddressRegion{code->instruction_start(),
code->instructions().size()});
}
freed_code_size_.fetch_add(code_size);
// Merge {freed_regions} into {freed_code_space_} and put all ranges of full
// pages to decommit into {regions_to_decommit} (decommitting is expensive,
// so try to merge regions before decommitting).
DisjointAllocationPool regions_to_decommit;
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
{
base::MutexGuard guard(&mutex_);
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
std::max(RoundUp(merged_region.begin(), commit_page_size),
RoundDown(region.begin(), commit_page_size));
Address discard_end =
std::min(RoundDown(merged_region.end(), commit_page_size),
RoundUp(region.end(), commit_page_size));
if (discard_start >= discard_end) continue;
regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
}
for (auto region : regions_to_decommit.regions()) {
size_t old_committed = committed_code_space_.fetch_sub(region.size());
DCHECK_GE(old_committed, region.size());
USE(old_committed);
for (base::AddressRegion split_range :
SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
code_manager_->Decommit(split_range);
}
}
}
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
base::MutexGuard lock(&mutex_);
return owned_code_space_.size();
}
// static
constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
: engine_(engine),
engine_scope_(engine->GetBarrierForBackgroundCompile()->TryLock()),
code_allocator_(engine->code_manager(), std::move(code_space),
async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
new WasmImportWrapperCache())),
use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler) {
DCHECK(engine_scope_);
// We receive a pointer to an empty {std::shared_ptr}, and install ourselve
// there.
DCHECK_NOT_NULL(shared_this);
DCHECK_NULL(*shared_this);
shared_this->reset(this);
compilation_state_ =
CompilationState::New(*shared_this, std::move(async_counters));
compilation_state_->InitCompileJob(engine);
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
code_table_ =
std::make_unique<WasmCode*[]>(module_->num_declared_functions);
num_liftoff_function_calls_ =
std::make_unique<uint32_t[]>(module_->num_declared_functions);
// Start counter at 4 to avoid runtime calls for smaller numbers.
constexpr int kCounterStart = 4;
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, kCounterStart);
}
code_allocator_.Init(this);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
WasmCodeRefScope code_ref_scope;
DCHECK_LE(module_->num_declared_functions, max_functions);
auto new_table = std::make_unique<WasmCode*[]>(max_functions);
if (module_->num_declared_functions > 0) {
base::Memcpy(new_table.get(), code_table_.get(),
module_->num_declared_functions * sizeof(WasmCode*));
}
code_table_ = std::move(new_table);
base::AddressRegion single_code_space_region;
{
base::MutexGuard guard(&allocation_mutex_);
CHECK_EQ(1, code_space_data_.size());
single_code_space_region = code_space_data_[0].region;
}
// Re-allocate jump table.
main_jump_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
single_code_space_region, WasmCodeAllocator::OptionalLock{});
base::MutexGuard guard(&allocation_mutex_);
code_space_data_[0].jump_table = main_jump_table_;
}
void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
DisallowGarbageCollection no_gc;
if (!WasmCode::ShouldBeLogged(isolate)) return;
TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
module_->num_declared_functions);
Object url_obj = script.name();
DCHECK(url_obj.IsString() || url_obj.IsUndefined());
std::unique_ptr<char[]> source_url =
url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
base::MutexGuard lock(&allocation_mutex_);
for (auto& owned_entry : owned_code_) {
owned_entry.second->LogCode(isolate, source_url.get(), script.id());
}
for (auto& owned_entry : new_owned_code_) {
owned_entry->LogCode(isolate, source_url.get(), script.id());
}
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
return {module(), use_trap_handler_, kRuntimeExceptionSupport,
enabled_features_, kNoLowerSimd};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
reloc_info = OwnedVector<byte>::Of(
Vector<byte>{code->relocation_start(), relocation_size});
}
Handle<ByteArray> source_pos_table(code->source_position_table(),
code->GetIsolate());
OwnedVector<byte> source_pos =
OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
if (source_pos_table->length() > 0) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
}
CHECK(!code->is_off_heap_trampoline());
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
Vector<const byte> instructions(
reinterpret_cast<byte*>(code->raw_body_start()),
static_cast<size_t>(code->raw_body_size()));
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
// Metadata offsets in Code objects are relative to the start of the metadata
// section, whereas WasmCode expects offsets relative to InstructionStart.
const int base_offset = code->raw_instruction_size();
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
const int safepoint_table_offset =
code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
: 0;
const int handler_table_offset = base_offset + code->handler_table_offset();
const int constant_pool_offset = base_offset + code->constant_pool_offset();
const int code_comments_offset = base_offset + code->code_comments_offset();
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
base::Memcpy(dst_code_bytes.begin(), instructions.begin(),
instructions.size());
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
code->raw_instruction_start();
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
constant_pool_start, mode_mask);
!it.done(); it.next(), orig_it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
Address entry = GetNearRuntimeStubEntry(
static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
}
}
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
std::unique_ptr<WasmCode> new_code{
new WasmCode{this, // native_module
kAnonymousFuncIndex, // index
dst_code_bytes, // instructions
stack_slots, // stack_slots
0, // tagged_parameter_slots
safepoint_table_offset, // safepoint_table_offset
handler_table_offset, // handler_table_offset
constant_pool_offset, // constant_pool_offset
code_comments_offset, // code_comments_offset
instructions.length(), // unpadded_binary_size
{}, // protected_instructions
reloc_info.as_vector(), // reloc_info
source_pos.as_vector(), // source positions
WasmCode::kFunction, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
return PublishCode(std::move(new_code));
}
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
base::AddressRegion single_code_space_region;
{
base::MutexGuard guard(&allocation_mutex_);
DCHECK_EQ(1, code_space_data_.size());
single_code_space_region = code_space_data_[0].region;
}
lazy_compile_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
single_code_space_region, WasmCodeAllocator::OptionalLock{});
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
FindJumpTablesForRegion(base::AddressRegionOf(
lazy_compile_table_->instructions()))));
}
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = declared_function_index(module(), func_index);
DCHECK_NULL(code_table_[slot_index]);
Address lazy_compile_target =
lazy_compile_table_->instruction_start() +
JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
base::MutexGuard guard(&allocation_mutex_);
PatchJumpTablesLocked(slot_index, lazy_compile_target);
}
std::unique_ptr<WasmCode> NativeModule::AddCode(
int index, const CodeDesc& desc, int stack_slots,
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, desc.instr_size);
auto jump_table_ref =
FindJumpTablesForRegion(base::AddressRegionOf(code_space));
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
code_space, jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int index, const CodeDesc& desc, int stack_slots,
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging,
Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size)};
UpdateCodeSize(desc.instr_size, tier, for_debugging);
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
// 'empty'.
const int safepoint_table_offset =
desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
const int handler_table_offset = desc.handler_table_offset;
const int constant_pool_offset = desc.constant_pool_offset;
const int code_comments_offset = desc.code_comments_offset;
const int instr_size = desc.instr_size;
CODE_SPACE_WRITE_SCOPE
base::Memcpy(dst_code_bytes.begin(), desc.buffer,
static_cast<size_t>(desc.instr_size));
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = dst_code_bytes.begin() - desc.buffer;
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = code_start + constant_pool_offset;
for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
uint32_t call_tag = it.rinfo()->wasm_call_tag();
Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
Address entry = GetNearRuntimeStubEntry(
static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
}
}
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
// Liftoff code will not be relocated or serialized, thus do not store any
// relocation information.
if (tier == ExecutionTier::kLiftoff) reloc_info = {};
std::unique_ptr<WasmCode> code{new WasmCode{
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
source_position_table, kind, tier, for_debugging}};
code->MaybePrint();
code->Validate();
return code;
}
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode");
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
std::vector<WasmCode*> NativeModule::PublishCode(
Vector<std::unique_ptr<WasmCode>> codes) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::MutexGuard lock(&allocation_mutex_);
// The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code)));
}
return published_code;
}
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
switch (result.kind) {
case WasmCompilationResult::kWasmToJsWrapper:
return WasmCode::Kind::kWasmToJsWrapper;
case WasmCompilationResult::kFunction:
return WasmCode::Kind::kFunction;
default:
UNREACHABLE();
}
}
WasmCode* NativeModule::PublishCodeLocked(
std::unique_ptr<WasmCode> owned_code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
WasmCode* code = owned_code.get();
new_owned_code_.emplace_back(std::move(owned_code));
// Add the code to the surrounding code ref scope, so the returned pointer is
// guaranteed to be valid.
WasmCodeRefScope::AddRef(code);
if (code->IsAnonymous() || code->index() < module_->num_imported_functions) {
return code;
}
DCHECK_LT(code->index(), num_functions());
code->RegisterTrapHandlerData();
// Put the code in the debugging cache, if needed.
if (V8_UNLIKELY(cached_code_)) InsertToCodeCache(code);
// Assume an order of execution tiers that represents the quality of their
// generated code.
static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
"Assume an order on execution tiers");
uint32_t slot_idx = declared_function_index(module(), code->index());
WasmCode* prior_code = code_table_[slot_idx];
// If we are tiered down, install all debugging code (except for stepping
// code, which is only used for a single frame and never installed in the
// code table of jump table). Otherwise, install code if it was compiled
// with a higher tier.
static_assert(
kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
"for_debugging is ordered");
const bool update_code_table =
// Never install stepping code.
code->for_debugging() != kForStepping &&
(!prior_code ||
(tiering_state_ == kTieredDown
// Tiered down: Install breakpoints over normal debug code.
? prior_code->for_debugging() <= code->for_debugging()
// Tiered up: Install if the tier is higher than before.
: prior_code->tier() < code->tier()));
if (update_code_table) {
code_table_[slot_idx] = code;
if (prior_code) {
WasmCodeRefScope::AddRef(prior_code);
// The code is added to the current {WasmCodeRefScope}, hence the ref
// count cannot drop to zero here.
prior_code->DecRefOnLiveCode();
}
PatchJumpTablesLocked(slot_idx, code->instruction_start());
} else {
// The code tables does not hold a reference to the code, hence decrement
// the initial ref count of 1. The code was added to the
// {WasmCodeRefScope} though, so it cannot die here.
code->DecRefOnLiveCode();
}
if (!code->for_debugging() && tiering_state_ == kTieredDown &&
code->tier() == ExecutionTier::kTurbofan) {
liftoff_bailout_count_.fetch_add(1);
}
return code;
}
void NativeModule::ReinstallDebugCode(WasmCode* code) {
base::MutexGuard lock(&allocation_mutex_);
DCHECK_EQ(this, code->native_module());
DCHECK_EQ(kWithBreakpoints, code->for_debugging());
DCHECK(!code->IsAnonymous());
DCHECK_LE(module_->num_imported_functions, code->index());
DCHECK_LT(code->index(), num_functions());
DCHECK_EQ(kTieredDown, tiering_state_);
uint32_t slot_idx = declared_function_index(module(), code->index());
if (WasmCode* prior_code = code_table_[slot_idx]) {
WasmCodeRefScope::AddRef(prior_code);
// The code is added to the current {WasmCodeRefScope}, hence the ref
// count cannot drop to zero here.
prior_code->DecRefOnLiveCode();
}
code_table_[slot_idx] = code;
code->IncRef();
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
size_t total_code_size) {
return code_allocator_.AllocateForCode(this, total_code_size);
}
std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
int index, Vector<byte> instructions, int stack_slots,
int tagged_parameter_slots, int safepoint_table_offset,
int handler_table_offset, int constant_pool_offset,
int code_comments_offset, int unpadded_binary_size,
Vector<const byte> protected_instructions_data,
Vector<const byte> reloc_info, Vector<const byte> source_position_table,
WasmCode::Kind kind, ExecutionTier tier) {
UpdateCodeSize(instructions.size(), tier, kNoDebugging);
return std::unique_ptr<WasmCode>{new WasmCode{
this, index, instructions, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size, protected_instructions_data,
reloc_info, source_position_table, kind, tier, kNoDebugging}};
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::MutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
for (WasmCode* code : VectorOf(start, end - start)) {
if (code) WasmCodeRefScope::AddRef(code);
}
return std::vector<WasmCode*>{start, end};
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
base::MutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
if (code) WasmCodeRefScope::AddRef(code);
return code;
}
bool NativeModule::HasCode(uint32_t index) const {
base::MutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr;
}
bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
base::MutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr &&
code_table_[declared_function_index(module(), index)]->tier() == tier;
}
void NativeModule::SetWasmSourceMap(
std::unique_ptr<WasmModuleSourceMap> source_map) {
source_map_ = std::move(source_map);
}
WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
return source_map_.get();
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
int jump_table_size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
CODE_SPACE_WRITE_SCOPE
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
kAnonymousFuncIndex, // index
code_space, // instructions
0, // stack_slots
0, // tagged_parameter_slots
0, // safepoint_table_offset
jump_table_size, // handler_table_offset
jump_table_size, // constant_pool_offset
jump_table_size, // code_comments_offset
jump_table_size, // unpadded_binary_size
{}, // protected_instructions
{}, // reloc_info
{}, // source_pos
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
return PublishCode(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
ForDebugging for_debugging) {
if (for_debugging != kNoDebugging) return;
// Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
// this is shared code.
if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
if (tier != ExecutionTier::kLiftoff) turbofan_code_size_.fetch_add(size);
}
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
CODE_SPACE_WRITE_SCOPE
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.jump_table) continue;
PatchJumpTableLocked(code_space_data, slot_index, target);
}
}
void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
uint32_t slot_index, Address target) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
DCHECK_LT(slot_index, module_->num_declared_functions);
Address jump_table_slot =
code_space_data.jump_table->instruction_start() +
JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
WasmCode::kRuntimeStubCount + slot_index);
// Only pass the far jump table start if the far jump table actually has a
// slot for this function index (i.e. does not only contain runtime stubs).
bool has_far_jump_slot =
far_jump_table_offset <
code_space_data.far_jump_table->instructions().size();
Address far_jump_table_start =
code_space_data.far_jump_table->instruction_start();
Address far_jump_table_slot =
has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
: kNullAddress;
JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
target);
}
void NativeModule::AddCodeSpace(
base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
DCHECK_GE(region.size(),
2 * OverheadPerCodeSpace(module()->num_declared_functions));
#if defined(V8_OS_WIN64)
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
// See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
if (engine_->code_manager()
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
this, size, region, allocator_lock);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(region.begin()), region.size());
}
#endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool is_first_code_space = code_space_data_.empty();
// We always need a far jump table, because it contains the runtime stubs.
const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
allocator_lock);
CHECK(region.contains(jump_table->instruction_start()));
}
if (needs_far_jump_table) {
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
far_jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_function_slots)),
region, allocator_lock);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
far_jump_table->instruction_start(), builtin_addresses,
WasmCode::kRuntimeStubCount, num_function_slots);
}
if (is_first_code_space) {
// This can be updated and accessed without locks, since the addition of the
// first code space happens during initialization of the {NativeModule},
// where no concurrent accesses are possible.
main_jump_table_ = jump_table;
main_far_jump_table_ = far_jump_table;
}
base::MutexGuard guard(&allocation_mutex_);
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
if (jump_table && !is_first_code_space) {
// Patch the new jump table(s) with existing functions. If this is the first
// code space, there cannot be any functions that have been compiled yet.
const CodeSpaceData& new_code_space_data = code_space_data_.back();
for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
++slot_index) {
if (code_table_[slot_index]) {
PatchJumpTableLocked(new_code_space_data, slot_index,
code_table_[slot_index]->instruction_start());
} else if (lazy_compile_table_) {
Address lazy_compile_target =
lazy_compile_table_->instruction_start() +
JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
PatchJumpTableLocked(new_code_space_data, slot_index,
lazy_compile_target);
}
}
}
}
namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
public:
explicit NativeModuleWireBytesStorage(
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
: wire_bytes_(std::move(wire_bytes)) {}
Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
return std::atomic_load(&wire_bytes_)
->as_vector()
.SubVector(ref.offset(), ref.end_offset());
}
private:
const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
};
} // namespace
void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
auto shared_wire_bytes =
std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
std::atomic_store(&wire_bytes_, shared_wire_bytes);
if (!shared_wire_bytes->empty()) {
compilation_state_->SetWireBytesStorage(
std::make_shared<NativeModuleWireBytesStorage>(
std::move(shared_wire_bytes)));
}
}
void NativeModule::TransferNewOwnedCodeLocked() const {
// The caller holds the allocation mutex.
DCHECK(!allocation_mutex_.TryLock());
DCHECK(!new_owned_code_.empty());
// Sort the {new_owned_code_} vector reversed, such that the position of the
// previously inserted element can be used as a hint for the next element. If
// elements in {new_owned_code_} are adjacent, this will guarantee
// constant-time insertion into the map.
std::sort(new_owned_code_.begin(), new_owned_code_.end(),
[](const std::unique_ptr<WasmCode>& a,
const std::unique_ptr<WasmCode>& b) {
return a->instruction_start() > b->instruction_start();
});
auto insertion_hint = owned_code_.end();
for (auto& code : new_owned_code_) {
DCHECK_EQ(0, owned_code_.count(code->instruction_start()));
// Check plausibility of the insertion hint.
DCHECK(insertion_hint == owned_code_.end() ||
insertion_hint->first > code->instruction_start());
insertion_hint = owned_code_.emplace_hint(
insertion_hint, code->instruction_start(), std::move(code));
}
new_owned_code_.clear();
}
void NativeModule::InsertToCodeCache(WasmCode* code) {
// The caller holds {allocation_mutex_}.
DCHECK(!allocation_mutex_.TryLock());
DCHECK_NOT_NULL(cached_code_);
if (code->IsAnonymous()) return;
// Only cache Liftoff debugging code or TurboFan code (no breakpoints or
// stepping).
if (code->tier() == ExecutionTier::kLiftoff &&
code->for_debugging() != kForDebugging) {
return;
}
auto key = std::make_pair(code->tier(), code->index());
if (cached_code_->insert(std::make_pair(key, code)).second) {
code->IncRef();
}
}
WasmCode* NativeModule::Lookup(Address pc) const {
base::MutexGuard lock(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
auto iter = owned_code_.upper_bound(pc);
if (iter == owned_code_.begin()) return nullptr;
--iter;
WasmCode* candidate = iter->second.get();
DCHECK_EQ(candidate->instruction_start(), iter->first);
if (!candidate->contains(pc)) return nullptr;
WasmCodeRefScope::AddRef(candidate);
return candidate;
}
uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
uint32_t slot_idx = declared_function_index(module(), func_index);
return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
}
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(main_jump_table_);
uint32_t slot_offset = GetJumpTableOffset(func_index);
DCHECK_LT(slot_offset, main_jump_table_->instructions().size());
return main_jump_table_->instruction_start() + slot_offset;
}
NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
base::AddressRegion code_region) const {
auto jump_table_usable = [code_region](const WasmCode* jump_table) {
Address table_start = jump_table->instruction_start();
Address table_end = table_start + jump_table->instructions().size();
// Compute the maximum distance from anywhere in the code region to anywhere
// in the jump table, avoiding any underflow.
size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
// We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
// every call or jump will target an address *within* the region, but never
// exactly the end of the region. So all occuring offsets are actually
// smaller than max_distance.
return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
// Access to these fields is possible without locking, since these fields are
// initialized on construction of the {NativeModule}.
if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
(main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
return {
main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
main_far_jump_table_->instruction_start()};
}
// Otherwise, take the mutex and look for another suitable jump table.
base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.far_jump_table) continue;
// Only return these jump tables if they are reachable from the whole
// {code_region}.
if (kNeedsFarJumpsBetweenCodeSpaces &&
(!jump_table_usable(code_space_data.far_jump_table) ||
(code_space_data.jump_table &&
!jump_table_usable(code_space_data.jump_table)))) {
continue;
}
return {code_space_data.jump_table
? code_space_data.jump_table->instruction_start()
: kNullAddress,
code_space_data.far_jump_table->instruction_start()};
}
return {};
}
Address NativeModule::GetNearCallTargetForFunction(
uint32_t func_index, const JumpTablesRef& jump_tables) const {
DCHECK(jump_tables.is_valid());
uint32_t slot_offset = GetJumpTableOffset(func_index);
return jump_tables.jump_table_start + slot_offset;
}
Address NativeModule::GetNearRuntimeStubEntry(
WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
DCHECK(jump_tables.is_valid());
auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
return jump_tables.far_jump_table_start + offset;
}
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
Address slot_address) const {
WasmCodeRefScope code_refs;
WasmCode* code = Lookup(slot_address);
DCHECK_NOT_NULL(code);
DCHECK_EQ(WasmCode::kJumpTable, code->kind());
uint32_t slot_offset =
static_cast<uint32_t>(slot_address - code->instruction_start());
uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
DCHECK_LT(slot_idx, module_->num_declared_functions);
DCHECK_EQ(slot_address,
code->instruction_start() +
JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
return module_->num_imported_functions + slot_idx;
}
WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
if (code_space_data.far_jump_table != nullptr &&
code_space_data.far_jump_table->contains(target)) {
uint32_t offset = static_cast<uint32_t>(
target - code_space_data.far_jump_table->instruction_start());
uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
if (index >= WasmCode::kRuntimeStubCount) continue;
if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
continue;
}
return static_cast<WasmCode::RuntimeStubId>(index);
}
}
// Invalid address.
return WasmCode::kRuntimeStubCount;
}
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", this);
// Cancel all background compilation before resetting any field of the
// NativeModule or freeing anything.
compilation_state_->CancelCompilation();
engine_->FreeNativeModule(this);
// Free the import wrapper cache before releasing the {WasmCode} objects in
// {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
// decrease reference counts on the {WasmCode} objects.
import_wrapper_cache_.reset();
}
WasmCodeManager::WasmCodeManager(size_t max_committed)
: max_committed_code_space_(max_committed),
critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
}
#if defined(V8_OS_WIN64)
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
FLAG_win64_unwinding_info;
}
#endif // V8_OS_WIN64
void WasmCodeManager::Commit(base::AddressRegion region) {
// TODO(v8:8462): Remove eager commit once perf supports remapping.
if (V8_UNLIKELY(FLAG_perf_prof)) return;
DCHECK(IsAligned(region.begin(), CommitPageSize()));
DCHECK(IsAligned(region.size(), CommitPageSize()));
// Reserve the size. Use CAS loop to avoid overflow on
// {total_committed_code_space_}.
size_t old_value = total_committed_code_space_.load();
while (true) {
DCHECK_GE(max_committed_code_space_, old_value);
if (region.size() > max_committed_code_space_ - old_value) {
V8::FatalProcessOutOfMemory(
nullptr,
"WasmCodeManager::Commit: Exceeding maximum wasm code space");
UNREACHABLE();
}
if (total_committed_code_space_.compare_exchange_weak(
old_value, old_value + region.size())) {
break;
}
}
PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
? PageAllocator::kReadWrite
: PageAllocator::kReadWriteExecute;
TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
if (!SetPermissions(GetPlatformPageAllocator(), region.begin(), region.size(),
permission)) {
// Highly unlikely.
V8::FatalProcessOutOfMemory(
nullptr,
"WasmCodeManager::Commit: Cannot make pre-reserved region writable");
UNREACHABLE();
}
}
void WasmCodeManager::Decommit(base::AddressRegion region) {
// TODO(v8:8462): Remove this once perf supports remapping.
if (V8_UNLIKELY(FLAG_perf_prof)) return;
PageAllocator* allocator = GetPlatformPageAllocator();
DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
DCHECK_LE(region.size(), old_committed);
USE(old_committed);
TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
region.size(), PageAllocator::kNoAccess));
}
void WasmCodeManager::AssignRange(base::AddressRegion region,
NativeModule* native_module) {
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(
region.begin(), std::make_pair(region.end(), native_module)));
}
VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
// When we start exposing Wasm in jitless mode, then the jitless flag
// will have to determine whether we set kMapAsJittable or not.
DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable);
if (!mem.IsReserved()) {
BackingStore::ReleaseReservation(size);
return {};
}
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
mem.end(), mem.size());
// TODO(v8:8462): Remove eager commit once perf supports remapping.
if (FLAG_perf_prof) {
SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
PageAllocator::kReadWriteExecute);
}
return mem;
}
namespace {
// The numbers here are rough estimates, used to calculate the size of the
// initial code reservation and for estimating the amount of external memory
// reported to the GC.
// They do not need to be accurate. Choosing them too small will result in
// separate code spaces being allocated (compile time and runtime overhead),
// choosing them too large results in over-reservation (virtual address space
// only).
// The current numbers have been determined on 2019-11-11 by clemensb@, based
// on one small and one large module compiled from C++ by Emscripten. If in
// doubt, they where chosen slightly larger than required, as over-reservation
// is not a big issue currently.
// Numbers will change when Liftoff or TurboFan evolve, other toolchains are
// used to produce the wasm code, or characteristics of wasm modules on the
// web change. They might require occasional tuning.
// This patch might help to find reasonable numbers for any future adaptation:
// https://crrev.com/c/1910945
#if V8_TARGET_ARCH_X64
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 350;
#elif V8_TARGET_ARCH_IA32
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 480;
#elif V8_TARGET_ARCH_ARM
constexpr size_t kTurbofanFunctionOverhead = 40;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 108;
constexpr size_t kLiftoffCodeSizeMultiplier = 7;
constexpr size_t kImportSize = 750;
#elif V8_TARGET_ARCH_ARM64
constexpr size_t kTurbofanFunctionOverhead = 60;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 80;
constexpr size_t kLiftoffCodeSizeMultiplier = 7;
constexpr size_t kImportSize = 750;
#else
// Other platforms should add their own estimates if needed. Numbers below are
// the minimum of other architectures.
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 350;
#endif
} // namespace
// static
size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
body_size * kLiftoffCodeSizeMultiplier;
}
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module,
bool include_liftoff) {
int num_functions = static_cast<int>(module->num_declared_functions);
int num_imported_functions = static_cast<int>(module->num_imported_functions);
int code_section_length = 0;
if (num_functions > 0) {
DCHECK_EQ(module->functions.size(), num_imported_functions + num_functions);
auto* first_fn = &module->functions[module->num_imported_functions];
auto* last_fn = &module->functions.back();
code_section_length =
static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
}
return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
code_section_length, include_liftoff);
}
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(int num_functions,
int num_imported_functions,
int code_section_length,
bool include_liftoff) {
const size_t overhead_per_function =
kTurbofanFunctionOverhead + kCodeAlignment / 2 +
(include_liftoff ? kLiftoffFunctionOverhead + kCodeAlignment / 2 : 0);
const size_t overhead_per_code_byte =
kTurbofanCodeSizeMultiplier +
(include_liftoff ? kLiftoffCodeSizeMultiplier : 0);
const size_t jump_table_size = RoundUp<kCodeAlignment>(
JumpTableAssembler::SizeForNumberOfSlots(num_functions));
const size_t far_jump_table_size =
RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_functions)));
return jump_table_size // jump table
+ far_jump_table_size // far jump table
+ overhead_per_function * num_functions // per function
+ overhead_per_code_byte * code_section_length // per code byte
+ kImportSize * num_imported_functions; // per import
}
// static
size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
const WasmModule* module) {
size_t wasm_module_estimate = EstimateStoredSize(module);
uint32_t num_wasm_functions = module->num_declared_functions;
// TODO(wasm): Include wire bytes size.
size_t native_module_estimate =
sizeof(NativeModule) + /* NativeModule struct */
(sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
(sizeof(WasmCode) * num_wasm_functions); /* code object size */
return wasm_module_estimate + native_module_estimate;
}
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
if (total_committed_code_space_.load() >
critical_committed_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical);
size_t committed = total_committed_code_space_.load();
DCHECK_GE(max_committed_code_space_, committed);
critical_committed_code_space_.store(
committed + (max_committed_code_space_ - committed) / 2);
}
// If we cannot add code space later, reserve enough address space up front.
size_t code_vmem_size =
ReservationSize(code_size_estimate, module->num_declared_functions, 0);
// The '--wasm-max-code-space-reservation' testing flag can be used to reduce
// the maximum size of the initial code space reservation (in MB).
if (FLAG_wasm_max_initial_code_space_reservation > 0) {
size_t flag_max_bytes =
static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
}
// Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
static constexpr int kAllocationRetries = 2;
VirtualMemory code_space;
for (int retries = 0;; ++retries) {
code_space = TryAllocate(code_vmem_size);
if (code_space.IsReserved()) break;
if (retries == kAllocationRetries) {
V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
UNREACHABLE();
}
// Run one GC, then try the allocation again.
isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
true);
}
Address start = code_space.address();
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
new NativeModule(engine, enabled, std::move(code_space), std::move(module),
isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
return ret;
}
void NativeModule::SampleCodeSize(
Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
size_t code_size = sampling_time == kSampling
? code_allocator_.committed_code_space()
: code_allocator_.generated_code_size();
int code_size_mb = static_cast<int>(code_size / MB);
Histogram* histogram = nullptr;
switch (sampling_time) {
case kAfterBaseline:
histogram = counters->wasm_module_code_size_mb_after_baseline();
break;
case kAfterTopTier:
histogram = counters->wasm_module_code_size_mb_after_top_tier();
break;
case kSampling: {
histogram = counters->wasm_module_code_size_mb();
// If this is a wasm module of >= 2MB, also sample the freed code size,
// absolute and relative. Code GC does not happen on asm.js modules, and
// small modules will never trigger GC anyway.
size_t generated_size = code_allocator_.generated_code_size();
if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
size_t freed_size = code_allocator_.freed_code_size();
DCHECK_LE(freed_size, generated_size);
int freed_percent = static_cast<int>(100 * freed_size / generated_size);
counters->wasm_module_freed_code_size_percent()->AddSample(
freed_percent);
}
break;
}
}
histogram->AddSample(code_size_mb);
}
std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
WasmCompilationResult result) {
std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
return std::move(code[0]);
}
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
// First, allocate code space for all the results.
size_t total_code_space = 0;
for (auto& result : results) {
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, total_code_space);
// Lookup the jump tables to use once, then use for all code objects.
auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
// If we happen to have a {total_code_space} which is bigger than
// {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
// region. If this ever happens, we need to handle this case (by splitting the
// {results} vector in smaller chunks).
CHECK(jump_tables.is_valid());
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
// Now copy the generated code into the code space and relocate it.
CODE_SPACE_WRITE_SCOPE
for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
Vector<byte> this_code_space = code_space.SubVector(0, code_size);
code_space += code_size;
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
result.result_tier, result.for_debugging, this_code_space,
jump_tables));
}
DCHECK_EQ(0, code_space.size());
return generated_code;
}
void NativeModule::SetTieringState(TieringState new_tiering_state) {
// Do not tier down asm.js (just never change the tiering state).
if (module()->origin != kWasmOrigin) return;
base::MutexGuard lock(&allocation_mutex_);
tiering_state_ = new_tiering_state;
}
bool NativeModule::IsTieredDown() {
base::MutexGuard lock(&allocation_mutex_);
return tiering_state_ == kTieredDown;
}
void NativeModule::RecompileForTiering() {
// Read the tiering state under the lock, then trigger recompilation after
// releasing the lock. If the tiering state was changed when the triggered
// compilation units finish, code installation will handle that correctly.
TieringState current_state;
{
base::MutexGuard lock(&allocation_mutex_);
current_state = tiering_state_;
// Initialize {cached_code_} to signal that this cache should get filled
// from now on.
if (!cached_code_) {
cached_code_ = std::make_unique<
std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
// Fill with existing code.
for (auto& code_entry : owned_code_) {
InsertToCodeCache(code_entry.second.get());
}
}
}
RecompileNativeModule(this, current_state);
}
std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope;
base::MutexGuard guard(&allocation_mutex_);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
const bool tier_down = new_tiering_state == kTieredDown;
for (int slot_index = 0; slot_index < declared; ++slot_index) {
int function_index = imported + slot_index;
WasmCode* old_code = code_table_[slot_index];
bool code_is_good =
tier_down ? old_code && old_code->for_debugging()
: old_code && old_code->tier() == ExecutionTier::kTurbofan;
if (code_is_good) continue;
DCHECK_NOT_NULL(cached_code_);
auto cache_it = cached_code_->find(std::make_pair(
tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
function_index));
if (cache_it != cached_code_->end()) {
WasmCode* cached_code = cache_it->second;
if (old_code) {
WasmCodeRefScope::AddRef(old_code);
// The code is added to the current {WasmCodeRefScope}, hence the ref
// count cannot drop to zero here.
old_code->DecRefOnLiveCode();
}
code_table_[slot_index] = cached_code;
PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
cached_code->IncRef();
continue;
}
// Otherwise add the function to the set of functions to recompile.
function_indexes.push_back(function_index);
}
return function_indexes;
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
// Free the code space.
code_allocator_.FreeCode(codes);
DebugInfo* debug_info = nullptr;
{
base::MutexGuard guard(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
debug_info = debug_info_.get();
// Free the {WasmCode} objects. This will also unregister trap handler data.
for (WasmCode* code : codes) {
DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
owned_code_.erase(code->instruction_start());
}
}
// Remove debug side tables for all removed code objects, after releasing our
// lock. This is to avoid lock order inversion.
if (debug_info) debug_info->RemoveDebugSideTables(codes);
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
return code_allocator_.GetNumCodeSpaces();
}
bool NativeModule::HasDebugInfo() const {
base::MutexGuard guard(&allocation_mutex_);
return debug_info_ != nullptr;
}
DebugInfo* NativeModule::GetDebugInfo() {
base::MutexGuard guard(&allocation_mutex_);
if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
return debug_info_.get();
}
void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
size_t committed_size) {
base::MutexGuard lock(&native_modules_mutex_);
for (auto& code_space : owned_code_space) {
DCHECK(code_space.IsReserved());
TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
code_space.address(), code_space.end(), code_space.size());
#if defined(V8_OS_WIN64)
if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::UnregisterNonABICompliantCodeRange(
reinterpret_cast<void*>(code_space.address()));
}
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
DCHECK(IsAligned(committed_size, CommitPageSize()));
// TODO(v8:8462): Remove this once perf supports remapping.
if (!FLAG_perf_prof) {
size_t old_committed =
total_committed_code_space_.fetch_sub(committed_size);
DCHECK_LE(committed_size, old_committed);
USE(old_committed);
}
}
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
base::MutexGuard lock(&native_modules_mutex_);
if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc);
if (iter == lookup_map_.begin()) return nullptr;
--iter;
Address region_start = iter->first;
Address region_end = iter->second.first;
NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate);
return region_start <= pc && pc < region_end ? candidate : nullptr;
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModule* candidate = LookupNativeModule(pc);
return candidate ? candidate->Lookup(pc) : nullptr;
}
// TODO(v8:7424): Code protection scopes are not yet supported with shared code
// enabled and need to be revisited.
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
if (FLAG_wasm_write_protect_code_memory && native_module_ &&
(native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
if (FLAG_wasm_write_protect_code_memory && native_module_ &&
(native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
}
namespace {
thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
} // namespace
WasmCodeRefScope::WasmCodeRefScope()
: previous_scope_(current_code_refs_scope) {
current_code_refs_scope = this;
}
WasmCodeRefScope::~WasmCodeRefScope() {
DCHECK_EQ(this, current_code_refs_scope);
current_code_refs_scope = previous_scope_;
WasmCode::DecrementRefCount(VectorOf(code_ptrs_));
}
// static
void WasmCodeRefScope::AddRef(WasmCode* code) {
DCHECK_NOT_NULL(code);
WasmCodeRefScope* current_scope = current_code_refs_scope;
DCHECK_NOT_NULL(current_scope);
current_scope->code_ptrs_.push_back(code);
code->IncRef();
}
const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
#define RUNTIME_STUB_NAME(Name) #Name,
#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
#undef RUNTIME_STUB_NAME
#undef RUNTIME_STUB_NAME_TRAP
STATIC_ASSERT(arraysize(runtime_stub_names) ==
WasmCode::kRuntimeStubCount + 1);
DCHECK_GT(arraysize(runtime_stub_names), stub_id);
return runtime_stub_names[stub_id];
}
} // namespace wasm
} // namespace internal
} // namespace v8
#undef TRACE_HEAP
|
#ifndef DECODER_RSC_BCJR_INTRA_HPP_
#define DECODER_RSC_BCJR_INTRA_HPP_
#include <vector>
#include <mipp.h>
#include "../Decoder_RSC_BCJR.hpp"
namespace aff3ct
{
namespace module
{
template <typename B = int, typename R = float>
class Decoder_RSC_BCJR_intra : public Decoder_RSC_BCJR<B,R>
{
protected:
mipp::vector<R> alpha; // node metric (left to right)
mipp::vector<R> gamma; // edge metric
Decoder_RSC_BCJR_intra(const int &K,
const std::vector<std::vector<int>> &trellis,
const bool buffered_encoding = true,
const int n_frames = 1);
virtual ~Decoder_RSC_BCJR_intra();
protected:
void _decode_siso(const R *sys, const R *par, R *ext, const int frame_id);
virtual void compute_gamma (const R *sys, const R *par) = 0;
virtual void compute_alpha ( ) = 0;
virtual void compute_beta_ext(const R *sys, R *ext) = 0;
};
}
}
#include "Decoder_RSC_BCJR_intra.hxx"
#endif /* DECODER_RSC_BCJR_INTRA_HPP_ */
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#define FBGEMM_EXPORTS
#include "fbgemm/FbgemmI8DepthwiseAvx2.h"
#include <stdexcept> // for logic_error
#include <string>
#include "./FbgemmI8DepthwiseAvx2-inl.h"
#include "./MaskAvx2.h"
#include "fbgemm/Utils.h"
#include "fbgemm/UtilsAvx2.h"
using namespace std;
namespace fbgemm {
template <
bool SUM_A,
bool REMAINDER = false,
bool PER_CHANNEL_QUANTIZATION = false>
static ALWAYS_INLINE void inner_prod_3x3x3_packed_(
int T,
int H,
int W,
int K,
int t_in,
int h_in,
int w_in,
const uint8_t* A,
int32_t A_zero_point,
const int8_t* Bp,
const int32_t* B_zero_point,
int32_t* C,
int remainder,
int32_t* row_offsets) {
__m256i A_zero_point_v = _mm256_set1_epi8(static_cast<uint8_t>(A_zero_point));
__m256i mask_v = _mm256_setzero_si256();
if (REMAINDER) {
mask_v = _mm256_load_si256(reinterpret_cast<const __m256i*>(
internal::avx2_ps_or_epi32_masks[remainder / 4]));
}
// The code below can be written as a simple R*S loop but the compiler
// doesn't unroll so we're manually unrolling it.
// constexpr int R = 3, S = 3;
// array<__m256i, R * S> a_v;
// for (int r = 0; r < R; ++r) {
// for (int s = 0; s < S; ++s) {
// if (h_in + r >= 0 && h_in + r < H && w_in + s >= 0 && w_in + s < W) {
// if (REMAINDER) {
// a_v[r * S + s] =
// _mm256_maskload_epi32((const int *)(A + (r * W + s) * K),
// mask_v);
// } else {
// a_v[r * S + s] =
// _mm256_lddqu_si256((const __m256i *)(A + (r * W + s) * K));
// }
// } else {
// a_v[r * S + s] = A_zero_point_v;
// }
// }
// }
__m256i a_v[8];
a_v[0] = A_zero_point_v;
a_v[1] = A_zero_point_v;
a_v[2] = A_zero_point_v;
a_v[3] = A_zero_point_v;
a_v[4] = A_zero_point_v;
a_v[5] = A_zero_point_v;
a_v[6] = A_zero_point_v;
a_v[7] = A_zero_point_v;
if (t_in >= 0 && t_in < T) {
if (h_in >= 0 && h_in < H) {
if (w_in >= 0 && w_in < W) {
a_v[0] = load_a<REMAINDER>(A + ((0 * H + 0) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[1] = load_a<REMAINDER>(A + ((0 * H + 0) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[2] = load_a<REMAINDER>(A + ((0 * H + 0) * W + 2) * K, mask_v);
}
}
if (h_in + 1 >= 0 && h_in + 1 < H) {
if (w_in >= 0 && w_in < W) {
a_v[3] = load_a<REMAINDER>(A + ((0 * H + 1) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[4] = load_a<REMAINDER>(A + ((0 * H + 1) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[5] = load_a<REMAINDER>(A + ((0 * H + 1) * W + 2) * K, mask_v);
}
}
if (h_in + 2 >= 0 && h_in + 2 < H) {
if (w_in >= 0 && w_in < W) {
a_v[6] = load_a<REMAINDER>(A + ((0 * H + 2) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[7] = load_a<REMAINDER>(A + ((0 * H + 2) * W + 1) * K, mask_v);
}
}
}
__m256i a_sum[4];
inner_prod_packed_<8, SUM_A, REMAINDER>(
a_v, reinterpret_cast<const __m256i*>(Bp), C, remainder, a_sum);
a_v[0] = A_zero_point_v;
a_v[1] = A_zero_point_v;
a_v[2] = A_zero_point_v;
a_v[3] = A_zero_point_v;
a_v[4] = A_zero_point_v;
a_v[5] = A_zero_point_v;
a_v[6] = A_zero_point_v;
a_v[7] = A_zero_point_v;
if (t_in >= 0 && t_in < T) {
if (h_in + 2 >= 0 && h_in + 2 < H) {
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[0] = load_a<REMAINDER>(A + ((0 * H + 2) * W + 2) * K, mask_v);
}
}
}
if (t_in + 1 >= 0 && t_in + 1 < T) {
if (h_in >= 0 && h_in < H) {
if (w_in >= 0 && w_in < W) {
a_v[1] = load_a<REMAINDER>(A + ((1 * H + 0) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[2] = load_a<REMAINDER>(A + ((1 * H + 0) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[3] = load_a<REMAINDER>(A + ((1 * H + 0) * W + 2) * K, mask_v);
}
}
if (h_in + 1 >= 0 && h_in + 1 < H) {
if (w_in >= 0 && w_in < W) {
a_v[4] = load_a<REMAINDER>(A + ((1 * H + 1) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[5] = load_a<REMAINDER>(A + ((1 * H + 1) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[6] = load_a<REMAINDER>(A + ((1 * H + 1) * W + 2) * K, mask_v);
}
}
if (h_in + 2 >= 0 && h_in + 2 < H) {
if (w_in >= 0 && w_in < W) {
a_v[7] = load_a<REMAINDER>(A + ((1 * H + 2) * W + 0) * K, mask_v);
}
}
}
__m256i a_sum_temp[4];
inner_prod_packed_<8, SUM_A, REMAINDER, true /* acc */>(
a_v, reinterpret_cast<const __m256i*>(Bp) + 8, C, remainder, a_sum_temp);
if (SUM_A) {
a_sum[0] = _mm256_add_epi32(a_sum[0], a_sum_temp[0]);
a_sum[1] = _mm256_add_epi32(a_sum[1], a_sum_temp[1]);
a_sum[2] = _mm256_add_epi32(a_sum[2], a_sum_temp[2]);
a_sum[3] = _mm256_add_epi32(a_sum[3], a_sum_temp[3]);
}
a_v[0] = A_zero_point_v;
a_v[1] = A_zero_point_v;
a_v[2] = A_zero_point_v;
a_v[3] = A_zero_point_v;
a_v[4] = A_zero_point_v;
a_v[5] = A_zero_point_v;
a_v[6] = A_zero_point_v;
a_v[7] = A_zero_point_v;
if (t_in + 1 >= 0 && t_in + 1 < T) {
if (h_in + 2 >= 0 && h_in + 2 < H) {
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[0] = load_a<REMAINDER>(A + ((1 * H + 2) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[1] = load_a<REMAINDER>(A + ((1 * H + 2) * W + 2) * K, mask_v);
}
}
}
if (t_in + 2 >= 0 && t_in + 2 < T) {
if (h_in >= 0 && h_in < H) {
if (w_in >= 0 && w_in < W) {
a_v[2] = load_a<REMAINDER>(A + ((2 * H + 0) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[3] = load_a<REMAINDER>(A + ((2 * H + 0) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[4] = load_a<REMAINDER>(A + ((2 * H + 0) * W + 2) * K, mask_v);
}
}
if (h_in + 1 >= 0 && h_in + 1 < H) {
if (w_in >= 0 && w_in < W) {
a_v[5] = load_a<REMAINDER>(A + ((2 * H + 1) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[6] = load_a<REMAINDER>(A + ((2 * H + 1) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[7] = load_a<REMAINDER>(A + ((2 * H + 1) * W + 2) * K, mask_v);
}
}
}
inner_prod_packed_<8, SUM_A, REMAINDER, true /* acc */>(
a_v, reinterpret_cast<const __m256i*>(Bp) + 16, C, remainder, a_sum_temp);
if (SUM_A) {
a_sum[0] = _mm256_add_epi32(a_sum[0], a_sum_temp[0]);
a_sum[1] = _mm256_add_epi32(a_sum[1], a_sum_temp[1]);
a_sum[2] = _mm256_add_epi32(a_sum[2], a_sum_temp[2]);
a_sum[3] = _mm256_add_epi32(a_sum[3], a_sum_temp[3]);
}
a_v[0] = A_zero_point_v;
a_v[1] = A_zero_point_v;
a_v[2] = A_zero_point_v;
if (t_in + 2 >= 0 && t_in + 2 < T) {
if (h_in + 2 >= 0 && h_in + 2 < H) {
if (w_in >= 0 && w_in < W) {
a_v[0] = load_a<REMAINDER>(A + ((2 * H + 2) * W + 0) * K, mask_v);
}
if (w_in + 1 >= 0 && w_in + 1 < W) {
a_v[1] = load_a<REMAINDER>(A + ((2 * H + 2) * W + 1) * K, mask_v);
}
if (w_in + 2 >= 0 && w_in + 2 < W) {
a_v[2] = load_a<REMAINDER>(A + ((2 * H + 2) * W + 2) * K, mask_v);
}
}
}
inner_prod_packed_<3, SUM_A, REMAINDER, true /* acc */>(
a_v, reinterpret_cast<const __m256i*>(Bp) + 24, C, remainder, a_sum_temp);
if (SUM_A) {
a_sum[0] = _mm256_add_epi32(a_sum[0], a_sum_temp[0]);
a_sum[1] = _mm256_add_epi32(a_sum[1], a_sum_temp[1]);
a_sum[2] = _mm256_add_epi32(a_sum[2], a_sum_temp[2]);
a_sum[3] = _mm256_add_epi32(a_sum[3], a_sum_temp[3]);
__m256i B_zero_point_v;
for (int i = 0; i < (REMAINDER ? (remainder / 8) : 4); ++i) {
if (PER_CHANNEL_QUANTIZATION) {
B_zero_point_v = _mm256_loadu_si256(
reinterpret_cast<const __m256i*>(B_zero_point + i * 8));
} else {
B_zero_point_v = _mm256_set1_epi32(B_zero_point[0]);
}
_mm256_store_si256(
reinterpret_cast<__m256i*>(&row_offsets[i * 8]),
_mm256_mullo_epi32(a_sum[i], B_zero_point_v));
}
}
}
template <
bool FUSE_RELU,
bool HAS_BIAS,
bool A_SYMMETRIC,
bool B_SYMMETRIC,
typename BIAS_TYPE>
static ALWAYS_INLINE void depthwise_3x3x3_kernel_(
int T,
int H,
int W,
int K,
int t,
int h,
int w,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const int8_t* Bp,
float C_multiplier,
int32_t C_zero_point,
int32_t* C_int32,
uint8_t* C_uint8,
int32_t* row_offsets,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
float act_times_w_scale) {
constexpr int R = 3, S = 3;
constexpr int PAD_P = 1, PAD_T = 1, PAD_B = 1, PAD_L = 1, PAD_R = 1;
int H_OUT = (H + PAD_T + PAD_B - R) / stride_h + 1;
int W_OUT = (W + PAD_L + PAD_R - S) / stride_w + 1;
int t_in = -PAD_P + t * stride_t;
int h_in = -PAD_T + h * stride_h;
int w_in = -PAD_L + w * stride_w;
int k;
for (k = 0; k < K / 32 * 32; k += 32) {
inner_prod_3x3x3_packed_<!B_SYMMETRIC /*SUM_A*/>(
T,
H,
W,
K,
t_in,
h_in,
w_in,
A + ((t_in * H + h_in) * W + w_in) * K + k,
A_zero_point,
Bp + k * 28,
&B_zero_point,
C_int32 + k,
0,
B_SYMMETRIC ? nullptr : &row_offsets[k]);
}
int remainder = K - k;
if (remainder) {
inner_prod_3x3x3_packed_<!B_SYMMETRIC /*SUM_A*/, true>(
T,
H,
W,
K,
t_in,
h_in,
w_in,
A + ((t_in * H + h_in) * W + w_in) * K + k,
A_zero_point,
Bp + k * 28,
&B_zero_point,
C_int32 + k,
remainder,
B_SYMMETRIC ? nullptr : &row_offsets[k]);
}
requantize_<
FUSE_RELU,
HAS_BIAS,
false, /*PER_CHAN_QUANT*/
A_SYMMETRIC,
B_SYMMETRIC>(
A_zero_point,
&C_multiplier,
C_zero_point,
C_int32,
C_uint8 + ((t * H_OUT + h) * W_OUT + w) * K,
K,
row_offsets,
col_offsets,
bias,
&act_times_w_scale);
}
template <bool FUSE_RELU, bool HAS_BIAS, bool A_SYMMETRIC, typename BIAS_TYPE>
static ALWAYS_INLINE void depthwise_3x3x3_per_channel_quantization_kernel_(
int T,
int H,
int W,
int K,
int t,
int h,
int w,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const int8_t* Bp,
const float* C_multiplier,
int32_t C_zero_point,
int32_t* C_int32,
uint8_t* C_uint8,
int32_t* row_offsets,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
const float* act_times_w_scale) {
constexpr int R = 3, S = 3;
constexpr int PAD_P = 1, PAD_T = 1, PAD_B = 1, PAD_L = 1, PAD_R = 1;
int H_OUT = (H + PAD_T + PAD_B - R) / stride_h + 1;
int W_OUT = (W + PAD_L + PAD_R - S) / stride_w + 1;
int t_in = -PAD_P + t * stride_t;
int h_in = -PAD_T + h * stride_h;
int w_in = -PAD_L + w * stride_w;
int k;
for (k = 0; k < K / 32 * 32; k += 32) {
inner_prod_3x3x3_packed_<
true, /*SUM_A*/
false, /*remainder*/
true /*per-channel*/>(
T,
H,
W,
K,
t_in,
h_in,
w_in,
A + ((t_in * H + h_in) * W + w_in) * K + k,
A_zero_point,
Bp + k * 28,
B_zero_point + k,
C_int32 + k,
0,
&row_offsets[k]);
}
int remainder = K - k;
if (remainder) {
inner_prod_3x3x3_packed_<
true, /*SUM_A*/
true, /*remainder*/
true /*per-channel*/>(
T,
H,
W,
K,
t_in,
h_in,
w_in,
A + ((t_in * H + h_in) * W + w_in) * K + k,
A_zero_point,
Bp + k * 28,
B_zero_point + k,
C_int32 + k,
remainder,
&row_offsets[k]);
}
requantize_<
FUSE_RELU,
HAS_BIAS,
true, /*PER_CHAN_QUANT*/
A_SYMMETRIC,
false /*B_SYMM*/>(
A_zero_point,
C_multiplier,
C_zero_point,
C_int32,
C_uint8 + ((t * H_OUT + h) * W_OUT + w) * K,
K,
row_offsets,
col_offsets,
bias,
act_times_w_scale);
}
template <
bool FUSE_RELU,
bool HAS_BIAS,
bool A_SYMMETRIC,
bool B_SYMMETRIC,
typename BIAS_TYPE>
static ALWAYS_INLINE void depthwise_3x3x3_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
int32_t* C_int32,
uint8_t* C_uint8,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
float act_times_w_scale,
int thread_id,
int num_threads) {
assert(K % 8 == 0);
constexpr int K_T = 3, K_H = 3, K_W = 3;
constexpr int PAD_P = 1, PAD_N = 1, PAD_T = 1, PAD_B = 1, PAD_L = 1,
PAD_R = 1;
int T_OUT = (T + PAD_P + PAD_N - K_T) / stride_t + 1;
int H_OUT = (H + PAD_T + PAD_B - K_H) / stride_h + 1;
int W_OUT = (W + PAD_L + PAD_R - K_W) / stride_w + 1;
const int8_t* Bp = B.PackedMat();
int32_t* row_offsets = static_cast<int32_t*>(
fbgemmAlignedAlloc(64, (K + 31) / 32 * 32 * sizeof(int32_t)));
int n_begin, n_end, t_begin, t_end, h_begin, h_end;
// Reuse the 3-dim partition scheme for parallelization in matrix
// multiplication.
thread_type_t th_info =
fbgemmGetThreadPartition(N, T_OUT, H_OUT, thread_id, num_threads);
// Calculate the begin and end index along the batch (N) dimension
fbgemmPartition1D(
th_info.g_thread_id, th_info.g_num_threads, N, n_begin, n_end);
// Calculate the begin and end index along the T dimension
fbgemmPartition1D(
th_info.m_thread_id, th_info.m_num_threads, T_OUT, t_begin, t_end);
// Calculate the begin and end index along the H dimension
fbgemmPartition1D(
th_info.n_thread_id, th_info.n_num_threads, H_OUT, h_begin, h_end);
for (int n = n_begin; n < n_end; ++n) {
const uint8_t* A_base = A + n * T * H * W * K;
uint8_t* C_uint8_base = C_uint8 + n * T_OUT * H_OUT * W_OUT * K;
for (int t = t_begin; t < t_end; ++t) {
for (int h = h_begin; h < h_end; ++h) {
for (int w = 0; w < W_OUT; ++w) {
depthwise_3x3x3_kernel_<
FUSE_RELU,
HAS_BIAS,
A_SYMMETRIC,
B_SYMMETRIC>(
T,
H,
W,
K,
t,
h,
w,
stride_t,
stride_h,
stride_w,
A_zero_point,
A_base,
B_zero_point,
Bp,
C_multiplier,
C_zero_point,
C_int32,
C_uint8_base,
row_offsets,
col_offsets,
bias,
act_times_w_scale);
} // w
} // h
} // t
} // for each n
fbgemmAlignedFree(row_offsets);
};
template <bool FUSE_RELU, bool HAS_BIAS, bool A_SYMMETRIC, typename BIAS_TYPE>
static ALWAYS_INLINE void depthwise_3x3x3_per_channel_quantization_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
int32_t* C_int32,
uint8_t* C_uint8,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
const float* act_times_w_scale,
int thread_id,
int num_threads) {
assert(K % 8 == 0);
constexpr int K_T = 3, K_H = 3, K_W = 3;
constexpr int PAD_P = 1, PAD_N = 1, PAD_T = 1, PAD_B = 1, PAD_L = 1,
PAD_R = 1;
int T_OUT = (T + PAD_P + PAD_N - K_T) / stride_t + 1;
int H_OUT = (H + PAD_T + PAD_B - K_H) / stride_h + 1;
int W_OUT = (W + PAD_L + PAD_R - K_W) / stride_w + 1;
const int8_t* Bp = B.PackedMat();
int32_t* row_offsets = static_cast<int32_t*>(
fbgemmAlignedAlloc(64, (K + 31) / 32 * 32 * sizeof(int32_t)));
int n_begin, n_end, t_begin, t_end, h_begin, h_end;
// Reuse the 3-dim partition scheme for parallelization in matrix
// multiplication.
thread_type_t th_info =
fbgemmGetThreadPartition(N, T_OUT, H_OUT, thread_id, num_threads);
// Calculate the begin and end index along the batch (N) dimension
fbgemmPartition1D(
th_info.g_thread_id, th_info.g_num_threads, N, n_begin, n_end);
// Calculate the begin and end index along the T dimension
fbgemmPartition1D(
th_info.m_thread_id, th_info.m_num_threads, T_OUT, t_begin, t_end);
// Calculate the begin and end index along the H dimension
fbgemmPartition1D(
th_info.n_thread_id, th_info.n_num_threads, H_OUT, h_begin, h_end);
for (int n = n_begin; n < n_end; ++n) {
const uint8_t* A_base = A + n * T * H * W * K;
uint8_t* C_uint8_base = C_uint8 + n * T_OUT * H_OUT * W_OUT * K;
for (int t = t_begin; t < t_end; ++t) {
for (int h = h_begin; h < h_end; ++h) {
for (int w = 0; w < W_OUT; ++w) {
depthwise_3x3x3_per_channel_quantization_kernel_<
FUSE_RELU,
HAS_BIAS,
A_SYMMETRIC,
BIAS_TYPE>(
T,
H,
W,
K,
t,
h,
w,
stride_t,
stride_h,
stride_w,
A_zero_point,
A_base,
B_zero_point,
Bp,
C_multiplier,
C_zero_point,
C_int32,
C_uint8_base,
row_offsets,
col_offsets,
bias,
act_times_w_scale);
} // w
} // h
} // t
} // for each n
fbgemmAlignedFree(row_offsets);
};
// Dispatch A_SYMMETRIC and B_SYMMETRIC
template <bool FUSE_RELU, bool HAS_BIAS, typename BIAS_TYPE>
static void depthwise_3x3x3_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
float act_times_w_scale,
int thread_id,
int num_threads) {
int32_t* C_int32_temp = static_cast<int32_t*>(
fbgemmAlignedAlloc(64, (K + 31) / 32 * 32 * sizeof(int32_t)));
if (A_zero_point == 0 || col_offsets == nullptr) {
if (B_zero_point == 0) {
depthwise_3x3x3_pad_1_<
FUSE_RELU,
HAS_BIAS,
true /*A_symmetric*/,
true /*B_symmetric*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_pad_1_<
FUSE_RELU,
HAS_BIAS,
true /*A_symmetric*/,
false /*B_symmetric*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
} else {
if (B_zero_point == 0) {
depthwise_3x3x3_pad_1_<
FUSE_RELU,
HAS_BIAS,
false /*A_symmetric*/,
true /*B_symmetric*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_pad_1_<
FUSE_RELU,
HAS_BIAS,
false /*A_symmetric*/,
false /*B_symmetric*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
}
fbgemmAlignedFree(C_int32_temp);
}
// Dispatch HAS_BIAS
template <bool FUSE_RELU, typename BIAS_TYPE>
static void depthwise_3x3x3_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
float act_times_w_scale,
int thread_id,
int num_threads) {
if (bias) {
depthwise_3x3x3_pad_1_<FUSE_RELU, true /*HAS_BIAS*/, BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_pad_1_<FUSE_RELU, false /*HAS_BIAS*/, BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
}
// Dispatch FUSE_RELU
template <typename BIAS_TYPE>
void depthwise_3x3x3_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
bool fuse_relu,
float act_times_w_scale,
int thread_id,
int num_threads) {
if (B.GetKernelProduct() != 3 * 3 * 3) {
string msg =
"[FBGEMM_CONV_ERROR] Packed weight is expected to have kernel_prod " +
to_string(3 * 3 * 3) + " but has " + to_string(B.GetKernelProduct());
throw logic_error(msg);
}
if (stride_t == 0 || stride_h == 0 || stride_w == 0 || num_threads == 0) {
assert(
0 &&
"stride_t == 0 || stride_h == 0 || stride_w == 0 || num_threads == 0");
return;
}
if (N == 0) {
// In C2, batch size 0 is allowed, so we should just early return.
return;
}
if (fuse_relu) {
depthwise_3x3x3_pad_1_<true /*FUSE_RELU*/, BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_pad_1_<false /*FUSE_RELU*/, BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
}
// Dispatch A_SYMMETRIC
template <bool FUSE_RELU, bool HAS_BIAS, typename BIAS_TYPE>
static void depthwise_3x3x3_per_channel_quantization_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
const float* act_times_w_scale,
int thread_id,
int num_threads) {
int32_t* C_int32_temp = static_cast<int32_t*>(
fbgemmAlignedAlloc(64, (K + 31) / 32 * 32 * sizeof(int32_t)));
if (A_zero_point == 0 || col_offsets == nullptr) {
depthwise_3x3x3_per_channel_quantization_pad_1_<
FUSE_RELU,
HAS_BIAS,
true /*A_SYMM*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_per_channel_quantization_pad_1_<
FUSE_RELU,
HAS_BIAS,
false /*A_SYMM*/,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C_int32_temp,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
fbgemmAlignedFree(C_int32_temp);
}
// Dispatch HAS_BIAS
template <bool FUSE_RELU, typename BIAS_TYPE>
static void depthwise_3x3x3_per_channel_quantization_pad_1_(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
const float* act_times_w_scale,
int thread_id,
int num_threads) {
if (bias) {
depthwise_3x3x3_per_channel_quantization_pad_1_<
FUSE_RELU,
true /* HAS_BIAS */,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_per_channel_quantization_pad_1_<
FUSE_RELU,
false /* HAS_BIAS */,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
}
// Dispatch FUSE_RELU
template <typename BIAS_TYPE>
void depthwise_3x3x3_per_channel_quantization_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const BIAS_TYPE* bias,
bool fuse_relu,
const float* act_times_w_scale,
int thread_id,
int num_threads) {
if (B.GetKernelProduct() != 3 * 3 * 3) {
string msg =
"[FBGEMM_CONV_ERROR] Packed weight is expected to have kernel_prod " +
to_string(3 * 3 * 3) + " but has " + to_string(B.GetKernelProduct());
throw logic_error(msg);
}
if (stride_t == 0 || stride_h == 0 || stride_w == 0 || num_threads == 0) {
assert(
0 &&
"stride_t == 0 || stride_h == 0 || stride_w == 0 || num_threads == 0");
return;
}
if (N == 0) {
// In C2, batch size 0 is allowed, so we should just early return.
return;
}
if (fuse_relu) {
depthwise_3x3x3_per_channel_quantization_pad_1_<
true /* FUSE_RELU */,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
} else {
depthwise_3x3x3_per_channel_quantization_pad_1_<
false /* FUSE_RELU */,
BIAS_TYPE>(
N,
T,
H,
W,
K,
stride_t,
stride_h,
stride_w,
A_zero_point,
A,
B_zero_point,
B,
C_multiplier,
C_zero_point,
C,
col_offsets,
bias,
act_times_w_scale,
thread_id,
num_threads);
}
}
template FBGEMM_API void depthwise_3x3x3_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const int32_t* bias,
bool fuse_relu,
float act_times_w_scale,
int thread_id,
int num_threads);
template FBGEMM_API void depthwise_3x3x3_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
int32_t B_zero_point,
const PackedDepthWiseConvMatrix& B,
float C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const float* bias,
bool fuse_relu,
float act_times_w_scale,
int thread_id,
int num_threads);
template FBGEMM_API void depthwise_3x3x3_per_channel_quantization_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const int32_t* bias,
bool fuse_relu,
const float* act_times_w_scale,
int thread_id,
int num_threads);
template FBGEMM_API void depthwise_3x3x3_per_channel_quantization_pad_1(
int N,
int T,
int H,
int W,
int K,
int stride_t,
int stride_h,
int stride_w,
int32_t A_zero_point,
const uint8_t* A,
const int32_t* B_zero_point,
const PackedDepthWiseConvMatrix& B,
const float* C_multiplier,
int32_t C_zero_point,
uint8_t* C,
const int32_t* col_offsets,
const float* bias,
bool fuse_relu,
const float* act_times_w_scale,
int thread_id,
int num_threads);
} // namespace fbgemm
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/hlo_sharding.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/overflow_util.h"
#include "tensorflow/core/lib/core/errors.h"
namespace xla {
using absl::StrCat;
using absl::StrJoin;
HloSharding HloSharding::AssignDevice(int64 device_id) {
return HloSharding(device_id);
}
HloSharding HloSharding::Tile1D(const Shape& input_shape, int64 num_tiles) {
CHECK_EQ(1, input_shape.rank());
CHECK_GT(num_tiles, 1);
std::vector<int64> dimensions(1, num_tiles);
Array<int64> assignment(dimensions);
std::iota(assignment.begin(), assignment.end(), 0);
return HloSharding(assignment);
}
HloSharding HloSharding::PartialTile(
const Array<int64>& group_tile_assignment,
absl::Span<const absl::Span<const int64>> replication_groups) {
CHECK_EQ(group_tile_assignment.num_elements(), replication_groups.size());
if (replication_groups.size() == 1) {
return Replicate();
}
auto new_tile_dims = group_tile_assignment.dimensions();
new_tile_dims.push_back(replication_groups[0].size());
auto new_tile_assignment = Array<int64>(new_tile_dims);
new_tile_assignment.Each([&](absl::Span<const int64> indices, int64* device) {
std::vector<int64> group_index(indices.begin(), indices.end());
group_index.pop_back();
int64 group = group_tile_assignment(group_index);
*device = replication_groups[group][indices.back()];
});
return PartialTile(new_tile_assignment);
}
HloSharding HloSharding::PartialTile(
const Array<int64>& tile_assignment_last_dim_replicate) {
if (tile_assignment_last_dim_replicate.num_dimensions() == 1) {
return Replicate();
}
if (tile_assignment_last_dim_replicate.dimensions().back() == 1) {
auto new_tile_dims = tile_assignment_last_dim_replicate.dimensions();
new_tile_dims.pop_back();
auto fully_tiled = tile_assignment_last_dim_replicate;
fully_tiled.Reshape(new_tile_dims);
return HloSharding(fully_tiled);
}
std::vector<std::set<int64>> sorted_groups(
tile_assignment_last_dim_replicate.num_elements() /
tile_assignment_last_dim_replicate.dimensions().back());
auto get_group_id = [&](absl::Span<const int64> indices) {
int64 group_id = 0;
for (int64 i = 0; i < indices.size() - 1; ++i) {
group_id *= tile_assignment_last_dim_replicate.dim(i);
group_id += indices[i];
}
return group_id;
};
tile_assignment_last_dim_replicate.Each(
[&](absl::Span<const int64> indices, const int64 device) {
sorted_groups[get_group_id(indices)].insert(device);
});
Array<int64> sorted_tile(tile_assignment_last_dim_replicate.dimensions());
sorted_tile.Each([&](absl::Span<const int64> indices, int64* device) {
auto begin = sorted_groups[get_group_id(indices)].begin();
*device = *begin;
sorted_groups[get_group_id(indices)].erase(begin);
});
return HloSharding(sorted_tile, /*replicate_on_last_tile_dim=*/true);
}
HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) {
std::vector<HloSharding> flattened_list;
flattened_list.reserve(sub_shardings.leaf_count());
for (const auto& index_to_sharding : sub_shardings.leaves()) {
flattened_list.push_back(index_to_sharding.second);
}
if (flattened_list.empty()) {
// Empty tuple sharding ends up having no leaves, but we want to allow
// empty tuple HLO instruction results to have sharding, so we fetch the
// root ({}) sharding value from the ShapeTree.
// A ShapeTree created with ShapeTree<HloSharding>(shape, init) will have
// init as value at its root.
flattened_list.push_back(sub_shardings.element(ShapeIndex({})));
}
return HloSharding(flattened_list);
}
HloSharding HloSharding::Tuple(const Shape& tuple_shape,
absl::Span<const HloSharding> shardings) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
for (auto& sharding : shardings) {
CHECK(!sharding.IsTuple()) << sharding.ToString();
}
std::vector<HloSharding> flattened_list(shardings.begin(), shardings.end());
CHECK_EQ(flattened_list.size(), RequiredLeaves(tuple_shape))
<< "Flat list has " << flattened_list.size() << ", required "
<< RequiredLeaves(tuple_shape);
return HloSharding(flattened_list);
}
HloSharding HloSharding::SingleTuple(const Shape& tuple_shape,
const HloSharding& sharding) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
CHECK(!sharding.IsTuple()) << sharding.ToString();
int64 leaf_count = RequiredLeaves(tuple_shape);
std::vector<HloSharding> flattened_list;
flattened_list.resize(leaf_count, sharding);
return HloSharding(flattened_list);
}
HloSharding HloSharding::Single(const Shape& shape,
const HloSharding& sharding) {
return shape.IsTuple() ? SingleTuple(shape, sharding) : sharding;
}
string HloSharding::ToString() const {
if (IsTuple()) {
std::vector<string> parts;
parts.reserve(tuple_elements_.size());
for (const HloSharding& element : tuple_elements_) {
parts.push_back(element.ToString());
}
return StrCat("{", absl::StrJoin(parts, ", "), "}");
}
if (replicated_) {
return "{replicated}";
}
if (maximal_) {
return StrCat(
"{maximal device=", static_cast<int64>(*tile_assignment_.begin()), "}");
}
return StrCat(
"{devices=[", StrJoin(tile_assignment_.dimensions(), ","), "]",
StrJoin(tile_assignment_, ","),
replicate_on_last_tile_dim_ ? " last_tile_dim_replicate}" : "}");
}
bool HloSharding::UsesDevice(int64 device) const {
if (IsTuple()) {
return absl::c_any_of(tuple_elements_, [&](const HloSharding& s) {
return s.UsesDevice(device);
});
}
const auto& devices = tile_assignment_;
return replicated_ || absl::c_linear_search(devices, device);
}
std::map<int64, int64> HloSharding::UsedDevices(int64* count) const {
int64 element_count = 1;
std::map<int64, int64> device_map;
if (IsTuple()) {
for (auto& tuple_element_sharding : tuple_elements()) {
auto unique_device = tuple_element_sharding.UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
element_count = tuple_elements().size();
} else {
auto unique_device = UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
if (count != nullptr) {
*count = element_count;
}
return device_map;
}
std::vector<int64> HloSharding::TileIndexForDevice(int64 device) const {
CHECK(!maximal_);
CHECK(!IsTuple());
std::vector<int64> ret_index;
tile_assignment_.Each([&](absl::Span<const int64> index, int64 d) {
if (d == device) {
ret_index = {index.begin(), index.end()};
}
});
CHECK(!ret_index.empty());
if (replicate_on_last_tile_dim_) {
ret_index.pop_back();
}
return ret_index;
}
int64 HloSharding::DeviceForTileIndex(absl::Span<const int64> index) const {
CHECK(!replicated_);
CHECK(!IsTuple());
if (maximal_) {
return *tile_assignment_.begin();
}
if (replicate_on_last_tile_dim_ &&
index.size() < tile_assignment().num_dimensions()) {
std::vector<int64> first_replicated_index(index.begin(), index.end());
first_replicated_index.push_back(0);
return tile_assignment_(first_replicated_index);
}
return tile_assignment_(index);
}
std::vector<int64> HloSharding::TileOffsetForDevice(const Shape& shape,
int64 device) const {
CHECK(!IsTuple());
if (maximal_) {
return std::vector<int64>(shape.dimensions_size(), 0);
}
if (replicate_on_last_tile_dim_) {
CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions() - 1);
} else {
CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions());
}
std::vector<int64> index = TileIndexForDevice(device);
for (int64 i = 0; i < index.size(); ++i) {
const int64 shape_dim = shape.dimensions(i);
index[i] = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
}
return index;
}
std::vector<int64> HloSharding::TileLimitForDevice(const Shape& shape,
int64 device) const {
CHECK(!IsTuple());
if (maximal_) {
return std::vector<int64>(shape.dimensions().begin(),
shape.dimensions().end());
}
CHECK_EQ(shape.dimensions_size() + (ReplicateOnLastTileDim() ? 1 : 0),
tile_assignment_.num_dimensions());
std::vector<int64> index = TileIndexForDevice(device);
for (int64 i = 0; i < index.size(); ++i) {
const int64 shape_dim = shape.dimensions(i);
index[i] = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
}
return index;
}
int64 HloSharding::RequiredLeaves(const Shape& shape) {
// Empty tuples (with arbitrary nesting) have no leaf nodes as far as
// ShapeUtil and ShapeTree are concerned, but they do have a single
// tuple_elements_ entry since we want to allow empty tuple results to
// have sharding.
const int64 leaf_count = ShapeUtil::GetLeafCount(shape);
return (leaf_count == 0) ? 1 : leaf_count;
}
Status HloSharding::CheckLeafCount(const Shape& shape) const {
int64 shape_leaves = RequiredLeaves(shape);
TF_RET_CHECK(shape_leaves == tuple_elements_.size())
<< "Shape " << ShapeUtil::HumanString(shape) << " has " << shape_leaves
<< " leaf nodes while this sharding has " << tuple_elements_.size();
return Status::OK();
}
StatusOr<ShapeTree<HloSharding>> HloSharding::AsShapeTree(
const Shape& shape) const {
if (IsTuple()) {
ShapeTree<HloSharding> result(shape, HloSharding::Replicate());
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
auto it = tuple_elements_.begin();
for (auto& index_to_sharding : result.leaves()) {
index_to_sharding.second = *it++;
}
if (ShapeUtil::IsEmptyTuple(shape)) {
// Empty tuples have no leaves, but we want to assign them a sharding
// anyway, so we use the root element sharding.
*result.mutable_element(ShapeIndex({})) = *it;
}
return std::move(result);
} else {
return ShapeTree<HloSharding>(shape, *this);
}
}
StatusOr<HloSharding> HloSharding::GetTupleSharding(const Shape& shape) const {
if (IsTuple()) {
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
return *this;
}
return Tuple(ShapeTree<HloSharding>(shape, *this));
}
absl::optional<int64> HloSharding::UniqueDevice() const {
if (IsTuple()) {
if (tuple_elements_.empty()) {
return absl::nullopt;
}
absl::optional<int64> unique_device;
for (auto& tuple_sharding : tuple_elements_) {
auto device = tuple_sharding.UniqueDevice();
if (!device || (unique_device && *device != *unique_device)) {
return absl::nullopt;
}
unique_device = device;
}
return unique_device;
}
if (!replicated_ && maximal_) {
return static_cast<int64>(*tile_assignment_.begin());
}
return absl::nullopt;
}
int64 HloSharding::GetUniqueDevice() const {
auto device = UniqueDevice();
CHECK(device) << "Sharding does not have a unique device: " << *this;
return *device;
}
Status HloSharding::ValidateTuple(const Shape& shape, int64 num_devices) const {
if (!shape.IsTuple()) {
return tensorflow::errors::InvalidArgument(
StrCat("Sharding is tuple-shaped but validation shape is not."));
}
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
// Now we've validated the number of tuple elements, it's safe to request a
// shape tree.
ShapeTree<HloSharding> shape_tree = GetAsShapeTree(shape);
for (const auto& index_to_sharding : shape_tree.leaves()) {
Status status = index_to_sharding.second.ValidateNonTuple(
ShapeUtil::GetSubshape(shape, index_to_sharding.first), num_devices);
if (!status.ok()) {
tensorflow::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding tuple element ",
index_to_sharding.first.ToString(), " which is ",
index_to_sharding.second.ToString()));
return status;
}
}
return Status::OK();
}
Status HloSharding::Validate(const Shape& shape, int64 num_devices) const {
Status status = IsTuple() ? ValidateTuple(shape, num_devices)
: ValidateNonTuple(shape, num_devices);
if (!status.ok()) {
tensorflow::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding ", ToString(),
" against shape ", ShapeUtil::HumanString(shape)));
}
return status;
}
Status HloSharding::ValidateNonTuple(const Shape& shape,
int64 num_devices) const {
if (shape.IsTuple()) {
return tensorflow::errors::InvalidArgument(
StrCat("Validation shape is a tuple but sharding is not."));
}
if (replicated_) {
return Status::OK();
}
// All tile assignments must be less than the number of available cores and
// unique.
Status status = Status::OK();
absl::flat_hash_set<int64> seen_cores;
tile_assignment_.Each(
[&](absl::Span<const int64> indices, int32 core) {
// Don't overwrite a bad status, so we report the first error.
if (status.ok()) {
if (core >= num_devices) {
status = tensorflow::errors::InvalidArgument(StrCat(
"core ", core, " > ", num_devices, " in tile assignment"));
} else if (seen_cores.contains(core)) {
status = tensorflow::errors::InvalidArgument(
StrCat("core ", core, " is not unique in tile assignment"));
}
seen_cores.insert(core);
}
});
if (!status.ok()) {
return status;
}
if (IsTileMaximal()) {
return Status::OK();
}
// The tile assignment tensor must have the same rank as the input, or input
// rank + 1 for replicate_on_last_tile_dim_.
if (shape.rank() + (replicate_on_last_tile_dim_ ? 1 : 0) !=
tile_assignment_.num_dimensions()) {
return tensorflow::errors::InvalidArgument(
"Number of tile assignment dimensions is different to the input rank. "
"sharding=",
ToString(), ", input_shape=", ShapeUtil::HumanString(shape));
}
// The correct constructor has to be used to create tile maximal shardings.
if (tile_assignment_.num_elements() == 1) {
return tensorflow::errors::InvalidArgument(
"Tile assignment only contains a single device. If a replicated "
"sharding was intended, use HloSharding::Replicated(). If a device "
"placement was intended, use HloSharding::AssignDevice()");
}
return Status::OK();
}
/*static*/ StatusOr<HloSharding> HloSharding::FromProto(
const OpSharding& proto) {
if (proto.type() == OpSharding::TUPLE) {
std::vector<HloSharding> tuple_shardings;
tuple_shardings.reserve(proto.tuple_shardings().size());
for (const OpSharding& tuple_sharding_proto : proto.tuple_shardings()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(tuple_sharding_proto));
tuple_shardings.push_back(sharding);
}
return HloSharding(tuple_shardings);
} else if (proto.type() == OpSharding::REPLICATED) {
return Replicate();
} else if (proto.tile_assignment_devices().size() == 1) {
return HloSharding(proto.tile_assignment_devices(0));
}
TF_RET_CHECK(proto.type() != OpSharding::MAXIMAL)
<< "Maximal sharding is expected to have single device assignment, but "
<< proto.tile_assignment_devices().size() << " has provided.";
TF_RET_CHECK(proto.tile_assignment_devices().size() > 1);
TF_RET_CHECK(!proto.tile_assignment_dimensions().empty());
// RE: the product of tile assignment tensor dimensions must be
// equal to tile_assignment_devices.size().
int64 product_of_dimensions = 1;
for (auto dimension : proto.tile_assignment_dimensions()) {
TF_RET_CHECK(dimension > 0);
product_of_dimensions =
MultiplyWithoutOverflow(product_of_dimensions, dimension);
TF_RET_CHECK(product_of_dimensions > 0);
}
TF_RET_CHECK(product_of_dimensions == proto.tile_assignment_devices().size());
// Some versions of gcc cannot infer the TileAssignment constructor from a
// braced initializer-list, so create one manually.
std::vector<int64> devices(proto.tile_assignment_devices().begin(),
proto.tile_assignment_devices().end());
Array<int64> tile_assignment(
std::vector<int64>(proto.tile_assignment_dimensions().begin(),
proto.tile_assignment_dimensions().end()));
std::copy(proto.tile_assignment_devices().begin(),
proto.tile_assignment_devices().end(), tile_assignment.begin());
return proto.replicate_on_last_tile_dim() ? PartialTile(tile_assignment)
: HloSharding(tile_assignment);
}
OpSharding HloSharding::ToProto() const {
OpSharding result;
if (IsTuple()) {
for (const HloSharding& element : tuple_elements_) {
*result.add_tuple_shardings() = element.ToProto();
}
result.set_type(OpSharding::TUPLE);
return result;
}
for (int64 dim : tile_assignment_.dimensions()) {
result.add_tile_assignment_dimensions(dim);
}
for (auto device : tile_assignment_) {
result.add_tile_assignment_devices(device);
}
if (IsReplicated()) {
result.set_type(OpSharding::REPLICATED);
} else if (IsTileMaximal()) {
result.set_type(OpSharding::MAXIMAL);
} else {
result.set_type(OpSharding::OTHER);
result.set_replicate_on_last_tile_dim(ReplicateOnLastTileDim());
}
return result;
}
Shape HloSharding::TileShape(const Shape& shape) const {
if (IsTileMaximal()) {
return shape;
}
Shape result_shape = shape;
for (int64 i = 0; i < shape.dimensions_size(); ++i) {
result_shape.set_dimensions(
i, CeilOfRatio<int64>(shape.dimensions(i), tile_assignment_.dim(i)));
}
return result_shape;
}
Shape HloSharding::TileShape(const Shape& shape, int64 device) const {
if (IsTileMaximal()) {
return shape;
}
std::vector<int64> index = TileIndexForDevice(device);
Shape result_shape = shape;
for (int64 i = 0; i < index.size(); ++i) {
const int64 shape_dim = shape.dimensions(i);
int64 offset = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
int64 limit = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
result_shape.set_dimensions(i, limit - offset);
}
return result_shape;
}
int64 HloSharding::NumTiles() const {
if (IsTileMaximal()) {
return 1;
}
if (ReplicateOnLastTileDim()) {
return tile_assignment().num_elements() /
tile_assignment().dimensions().back();
}
return tile_assignment().num_elements();
}
HloSharding HloSharding::GetSubSharding(const Shape& shape,
const ShapeIndex& index) const {
CHECK(IsTuple());
int64 sharding_index = 0;
const Shape* sub_shape = &shape;
for (int64 idx : index) {
for (int64 i = 0; i < idx; ++i) {
sharding_index +=
ShapeUtil::GetLeafCount(ShapeUtil::GetSubshape(*sub_shape, {i}));
}
sub_shape = &ShapeUtil::GetSubshape(*sub_shape, {idx});
}
if (sub_shape->IsTuple()) {
auto begin_it = tuple_elements_.begin() + sharding_index;
std::vector<HloSharding> sub_shardings(
begin_it, begin_it + ShapeUtil::GetLeafCount(*sub_shape));
return HloSharding::Tuple(*sub_shape, sub_shardings);
} else {
return tuple_elements_[sharding_index];
}
}
absl::optional<HloSharding> HloSharding::ExtractSingleSharding() const {
if (!IsTuple()) {
return *this;
}
if (tuple_elements_.empty()) {
return absl::nullopt;
}
for (int64 i = 1; i < tuple_elements_.size(); ++i) {
if (tuple_elements_[0] != tuple_elements_[i]) {
return absl::nullopt;
}
}
return tuple_elements_.front();
}
size_t HloSharding::Hash() const {
if (tuple_) {
size_t h = 0;
for (const auto& element : tuple_elements_) {
h = tensorflow::Hash64Combine(h, element.Hash());
}
return h;
}
if (replicated_) {
return 0;
}
size_t h = 0;
for (uint32 v : tile_assignment_) {
h = tensorflow::Hash64Combine(h, std::hash<uint32>{}(v));
}
if (replicate_on_last_tile_dim_) {
h = tensorflow::Hash64Combine(h, std::hash<uint32>{}(1));
}
return h;
}
std::ostream& operator<<(std::ostream& out, const HloSharding& sharding) {
out << sharding.ToString();
return out;
}
} // namespace xla
|
/*
* Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuContactMethodImpl.h"
#include "PxcNpCache.h"
#include "PxcContactMethodImpl.h"
namespace physx
{
bool PxcContactConvexMesh2(CONTACT_METHOD_ARGS)
{
return contactConvexMesh(shape0, shape1, transform0, transform1, contactDistance, npCache, contactBuffer);
}
}
|
#include <touchgfx/hal/Types.hpp>
FONT_GLYPH_LOCATION_FLASH_PRAGMA
KEEP extern const uint8_t unicodes_SairaCondensed_Medium_12_4bpp_0[] FONT_GLYPH_LOCATION_FLASH_ATTRIBUTE =
{
// Unicode: [0x0030, zero]
0xB0, 0xEE, 0x08, 0xD4, 0x20, 0x1F, 0xB6, 0x00, 0x2F, 0xB6, 0x00, 0x3E, 0xB6, 0x00, 0x3E, 0xB6,
0x00, 0x2F, 0xD4, 0x20, 0x1F, 0xB0, 0xEE, 0x08,
// Unicode: [0x0031, one]
0xD4, 0x09, 0xB7, 0x09, 0x80, 0x09, 0x80, 0x09, 0x80, 0x09, 0x80, 0x09, 0x80, 0x09, 0x80, 0x09,
// Unicode: [0x0032, two]
0xE5, 0xDE, 0x04, 0x00, 0x80, 0x0A, 0x00, 0x60, 0x0A, 0x00, 0xD4, 0x06, 0xA1, 0x5D, 0x00, 0xC6,
0x00, 0x00, 0x97, 0x00, 0x00, 0xF7, 0xEE, 0x0B,
// Unicode: [0x0033, three]
0xE8, 0xCE, 0x02, 0x00, 0xC0, 0x07, 0x00, 0xC1, 0x05, 0xA0, 0xCF, 0x00, 0x00, 0xB0, 0x06, 0x00,
0x90, 0x08, 0x00, 0xC0, 0x07, 0xE9, 0xBE, 0x01,
// Unicode: [0x0034, four]
0x00, 0x6C, 0x00, 0x30, 0x0E, 0x00, 0x90, 0x38, 0x03, 0xE1, 0x82, 0x07, 0xB6, 0x90, 0x07, 0xE9,
0xFE, 0x4F, 0x00, 0xA0, 0x07, 0x00, 0xA0, 0x07,
// Unicode: [0x0035, five]
0xF5, 0xEE, 0x06, 0xB5, 0x00, 0x00, 0xB5, 0x00, 0x00, 0xF5, 0xDE, 0x04, 0x11, 0x80, 0x0B, 0x00,
0x50, 0x0C, 0x00, 0x80, 0x0B, 0xE5, 0xDE, 0x04,
// Unicode: [0x0036, six]
0x80, 0xEE, 0x0C, 0xE3, 0x02, 0x01, 0xB5, 0x00, 0x00, 0xD6, 0xED, 0x0A, 0xD6, 0x10, 0x2F, 0xB6,
0x00, 0x3E, 0xD4, 0x11, 0x2F, 0xA0, 0xEE, 0x09,
// Unicode: [0x0037, seven]
0xEA, 0xFE, 0x09, 0x00, 0xC0, 0x05, 0x00, 0xF2, 0x01, 0x00, 0xA7, 0x00, 0x00, 0x5D, 0x00, 0x30,
0x1E, 0x00, 0x80, 0x0A, 0x00, 0xD0, 0x05, 0x00,
// Unicode: [0x0038, eight]
0xB1, 0xEE, 0x0A, 0xC6, 0x10, 0x3E, 0xD4, 0x10, 0x2E, 0xB0, 0xFE, 0x0A, 0xD4, 0x10, 0x2E, 0xA7,
0x00, 0x4C, 0xD5, 0x10, 0x3E, 0xB1, 0xEE, 0x09,
// Unicode: [0x0039, nine]
0xC1, 0xEE, 0x06, 0xB6, 0x30, 0x0E, 0x97, 0x10, 0x1F, 0xB6, 0x30, 0x2F, 0xD2, 0xBE, 0x2F, 0x00,
0x10, 0x1F, 0x00, 0x50, 0x0E, 0xE2, 0xDE, 0x05,
// Unicode: [0x003A, colon]
0xD5, 0x21, 0x00, 0x00, 0x21, 0xD5,
// Unicode: [0x003F, question]
0xE9, 0x5E, 0x00, 0xC6, 0x00, 0xC5, 0x40, 0x7D, 0xC0, 0x05, 0x90, 0x02, 0x30, 0x01, 0xE0, 0x04
};
|
////////////////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "resource.h"
#include "DXPlayerMainFrame.h"
#include "DXPlayerView.h"
#include "DXPlayerDocument.h"
#include "DXPlayerControlsBar.h"
#include "DXPlayerAboutDlg.h"
#include "DXPlayerOptions.h"
#include "DXPlayerOptionsDlg.h"
#include "DXPainterHeaders.h"
#include "DXPlayerProjectInformationDlg.h"
#include "DXPlayerStatisticViewerDlg.h"
#include "DXPlayerTraceViewerDlg.h"
#include "DXPlayerTextureViewerDlg.h"
#include "DXPlayer.h"
using namespace std;
using namespace Gdiplus;
using namespace dxpainter;
////////////////////////////////////////////////////////////////////////////////
BEGIN_MESSAGE_MAP(DXPlayer, CWinApp)
ON_COMMAND(ID_APP_EXIT, OnAppExit)
ON_COMMAND(ID_APP_ABOUT, OnHelpAbout)
ON_COMMAND(ID_FILE_OPEN, OnFileOpen)
ON_COMMAND(ID_FILE_PROJECTINFORMATION, OnFileProjectInformation)
ON_COMMAND(ID_FILE_OPTIONS, OnFileOptions)
ON_COMMAND(ID_DATA_STATISTICVIEWER, OnDataStatisticViewer)
ON_COMMAND(ID_DATA_TRACEVIEWER, OnDataTraceViewer)
ON_COMMAND(ID_DATA_TEXTUREVIEWER, OnDataTextureViewer)
ON_COMMAND(ID_PLAYPAUSE, OnPlayPause)
ON_COMMAND(ID_STOP, OnStop)
ON_COMMAND(ID_PLAYSTEP, OnStep)
ON_COMMAND(ID_SCREENSHOT, OnTakeScreenshot)
END_MESSAGE_MAP()
////////////////////////////////////////////////////////////////////////////////
DXPlayer theApp;
const CString DXPlayer::OPTIONS_FILENAME = "dxplayer.config";
////////////////////////////////////////////////////////////////////////////////
DXPlayer::DXPlayer() :
m_frame(NULL),
m_view(NULL),
m_document(NULL),
m_painter(NULL),
m_painterActive(false)
{
}
////////////////////////////////////////////////////////////////////////////////
BOOL DXPlayer::InitInstance()
{
InitGdiPlus();
InitCommonControls();
CWinApp::InitInstance();
CSingleDocTemplate* pDocTemplate = new CSingleDocTemplate(IDR_MAINFRAME, RUNTIME_CLASS(DXPlayerDocument), RUNTIME_CLASS(DXPlayerMainFrame), RUNTIME_CLASS(DXPlayerView));
if (pDocTemplate)
{
AddDocTemplate(pDocTemplate);
}
else
{
return FALSE;
}
CCommandLineInfo cmdInfo;
ParseCommandLine(cmdInfo);
if (!ProcessShellCommand(cmdInfo))
{
return FALSE;
}
m_frame = (DXPlayerMainFrame*) m_pMainWnd;
m_view = (DXPlayerView*) m_pMainWnd->GetWindow(GW_CHILD);
m_document = m_view->GetDocument();
if (!(m_painter = DXPainterCreate()))
{
AfxMessageBox("ERROR: could'nt create a DXPainter instance");
return FALSE;
}
m_options.LoadXML(OPTIONS_FILENAME.GetString());
SetTitle(_T("(nothing opened)"));
UpdatePlayStatus();
m_pMainWnd->ShowWindow(SW_SHOW);
m_pMainWnd->UpdateWindow();
return TRUE;
}
////////////////////////////////////////////////////////////////////////////////
int DXPlayer::ExitInstance()
{
if (m_painter)
{
delete m_painter;
m_painter = NULL;
m_painterActive = false;
}
CloseAllDocuments(FALSE);
CloseGdiPlus();
return CWinApp::ExitInstance();
}
////////////////////////////////////////////////////////////////////////////////
HWND DXPlayer::GetViewportHWND()
{
return m_view->GetHWND();
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::SetTitle(const string& title)
{
m_document->SetTitle(title.c_str());
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::ShowBackgroundLogo(bool show)
{
m_view->EnableOwnerPaint(show);
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::SetViewportDimensions(UINT width, UINT height)
{
m_frame->SetDimensions(width, height);
ShowBackgroundLogo(false);
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::UpdatePlayStatus()
{
m_frame->SetFrameNumber(m_painter->GetCurrentFrameCount(), m_painter->GetTotalFrameCount());
}
////////////////////////////////////////////////////////////////////////////////
CString DXPlayer::OpenFileName(const CString& initialDirectory)
{
TCHAR tsFilters[]= TEXT("DXInterceptor Run Files (*.DXIntRun)\0*.DXIntRun\0\0");
TCHAR tsFile[MAX_PATH] = "";
OPENFILENAME ofn;
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = m_frame->m_hWnd;
ofn.lpstrFilter = tsFilters;
ofn.lpstrInitialDir = initialDirectory;
ofn.Flags = OFN_EXPLORER | OFN_HIDEREADONLY | OFN_FILEMUSTEXIST | OFN_PATHMUSTEXIST | OFN_NOCHANGEDIR;
ofn.nFilterIndex = 1;
ofn.lpstrFile = tsFile;
ofn.nMaxFile = sizeof(tsFile) / sizeof(TCHAR);
if (GetOpenFileName(&ofn))
{
return CString(tsFile);
}
return "";
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::InitGdiPlus()
{
GdiplusStartupInput gdiPlusStartupInput;
GdiplusStartup(&m_gdiPlusToken, &gdiPlusStartupInput, NULL);
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::CloseGdiPlus()
{
GdiplusShutdown(m_gdiPlusToken);
}
////////////////////////////////////////////////////////////////////////////////
BOOL DXPlayer::OnIdle(LONG lCount)
{
CWinApp::OnIdle(lCount);
if (m_painterActive)
{
switch (m_painter->Paint())
{
case DXPainter::PR_ERROR_DIRECT3D:
case DXPainter::PR_ERROR_PAINTER:
case DXPainter::PR_ERROR_TRACEMANAGER:
ShowBackgroundLogo(true);
m_painterActive = false;
return FALSE;
break;
case DXPainter::PR_OK_NOMOREFRAMES:
UpdatePlayStatus();
ShowBackgroundLogo(true);
m_painterActive = false;
return FALSE;
break;
case DXPainter::PR_OK_PAUSE:
case DXPainter::PR_OK_STOP:
case DXPainter::PR_OK_PLAYSTEPPERFRAME:
UpdatePlayStatus();
m_painterActive = false;
return FALSE;
break;
case DXPainter::PR_OK_PLAY:
UpdatePlayStatus();
return TRUE;
break;
}
}
return FALSE;
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnAppExit()
{
::PostQuitMessage(0);
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnHelpAbout()
{
DXPlayerAboutDlg aboutDlg;
aboutDlg.DoModal();
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnDataStatisticViewer()
{
if (m_painter && !m_painter->GetProjectFilePath().empty())
{
DXPlayerStatisticViewerDlg statisticDlg(m_painter->GetProjectFilePath());
statisticDlg.DoModal();
}
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnDataTraceViewer()
{
if (m_painter && !m_painter->GetProjectFilePath().empty())
{
DXPlayerTraceViewerDlg traceDlg(m_painter->GetProjectFilePath().c_str());
traceDlg.DoModal();
}
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnDataTextureViewer()
{
if (m_painter && !m_painter->GetProjectFilePath().empty())
{
DXPlayerTextureViewerDlg textureDlg(m_painter->GetProjectFilePath());
textureDlg.DoModal();
}
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnFileProjectInformation()
{
if (m_painter && !m_painter->GetProjectFilePath().empty())
{
DXPlayerProjectInformationDlg projectInfoDlg(*m_painter);
if (projectInfoDlg.DoModal() == IDOK)
{
SetTitle(m_painter->GetProjectGameName());
UpdatePlayStatus();
ShowBackgroundLogo(true);
}
}
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnFileOptions()
{
DXPlayerOptionsDlg optionsDlg(OPTIONS_FILENAME, m_frame);
optionsDlg.DoModal();
m_options.LoadXML(OPTIONS_FILENAME.GetString());
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnFileOpen()
{
string projectName = OpenFileName();
if (projectName.empty())
{
return;
}
DXPainter::InitParameters params;
params.StartWindowed = true;
params.ViewportWindowed = GetViewportHWND();
params.ViewportFullScreen = NULL;
params.EnableVSync = false;
params.TraceFilename = projectName;
if (m_painter->Init(¶ms))
{
SetTitle(m_painter->GetProjectGameName());
m_painterActive = true;
}
else
{
m_painterActive = false;
}
UpdatePlayStatus();
ShowBackgroundLogo(true);
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnPlayPause()
{
m_painterActive = true;
m_painter->Play();
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnStop()
{
m_painter->Stop();
UpdatePlayStatus();
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnStep()
{
m_painterActive = true;
m_painter->PlayStepPerFrame();
}
////////////////////////////////////////////////////////////////////////////////
void DXPlayer::OnTakeScreenshot()
{
string basePath = m_options.GetDestinationPath();
if (basePath.empty() || !::PathFileExists(basePath.c_str()))
{
TCHAR currentDir[MAX_PATH];
::GetCurrentDirectory(MAX_PATH, currentDir);
::PathAddBackslash(currentDir);
basePath = currentDir;
}
m_painter->TakeScreenshot(basePath, (DXPainter::ScreenshotFormat) m_options.GetScreenshotFormat());
}
////////////////////////////////////////////////////////////////////////////////
|
/***************************************************************************
*
* Copyright (c) 2013 Baidu, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**************************************************************************/
// Author: Wen Xiang <wenxiang@baidu.com>
#include <netinet/in.h>
#include <string>
#include <vector>
#include "boost/lexical_cast.hpp"
#include "boost/scoped_ptr.hpp"
#include "flume/runtime/common/memory_status_table.h"
#include "flume/runtime/util/iterator.h"
namespace baidu {
namespace flume {
namespace runtime {
class MemoryStatusTable::ScopeVisitorImpl : public StatusTable::ScopeVisitor {
public:
explicit ScopeVisitorImpl(const EntryList& entries) : m_entries(entries) {}
virtual ~ScopeVisitorImpl() {}
virtual StatusTable::Iterator* ListEntries() {
m_deleter.push_back(new internal::StlIterator<EntryList>(m_entries));
return &m_deleter.back();
}
virtual void Release() {
delete this;
}
private:
const EntryList& m_entries;
boost::ptr_vector<StatusTable::Iterator> m_deleter;
};
class MemoryStatusTable::NodeVisitorImpl : public StatusTable::NodeVisitor {
public:
explicit NodeVisitorImpl(Value* ptr) : m_ptr(ptr) {}
virtual bool IsValid() {
return m_ptr->status != Value::INVALID;
}
// return true if value is loaded successfully
virtual bool Read(std::string* value) {
if (m_ptr->status != Value::NORMAL) {
return false;
}
*value = m_ptr->value;
return true;
}
virtual void Update(const std::string& value) {
CHECK_NE(m_ptr->status, Value::INVALID);
m_ptr->status = Value::NORMAL;
m_ptr->value = value;
}
virtual void Invalidate() {
m_ptr->status = Value::INVALID;
}
virtual void Release() {
delete this;
}
private:
Value* m_ptr;
};
StatusTable::ScopeVisitor*
MemoryStatusTable::GetScopeVisitor(const std::string& id,
const std::vector<toft::StringPiece>& keys) {
ScopeInfo& info = m_scope_meta[id];
return new ScopeVisitorImpl(info.entries[ToPath(keys)]);
}
StatusTable::StatusTable::NodeVisitor*
MemoryStatusTable::GetNodeVisitor(const std::string& id,
const std::vector<toft::StringPiece>& keys) {
NodeInfo& node = m_node_meta[id];
Path path = ToPath(keys);
std::auto_ptr<NodeVisitorImpl> visitor(new NodeVisitorImpl(&node.values[path]));
ScopeInfo* scope = node.scope;
while (scope != NULL && !path.empty()) {
std::string entry = path.back();
path.pop_back();
scope->entries[path].insert(entry);
scope = scope->father;
}
return visitor.release();
}
inline MemoryStatusTable::Path
MemoryStatusTable::ToPath(const std::vector<toft::StringPiece>& keys) {
Path path;
for (size_t i = 0; i < keys.size(); ++i) {
path.push_back(keys[i].as_string());
}
return path;
}
} // namespace runtime
} // namespace flume
} // namespace baidu
|
/*************************************************************************/
/* Copyright (c) 2021 David Snopek */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "sg_broadphase_2d_internal.h"
#include "sg_bodies_2d_internal.h"
void SGBroadphase2DInternal::_add_element_to_cells(SGBroadphase2DInternal::Element *p_element) {
HashKey from = p_element->from;
HashKey to = p_element->to;
for (int32_t x = from.x; x <= to.x; x++) {
for (int32_t y = from.y; y <= to.y; y++) {
HashKey key(x, y);
Map<HashKey, Cell *>::Element *cell_element = cells.find(key);
Cell *cell;
if (cell_element) {
cell = cell_element->get();
}
else {
cell = memnew(Cell);
cell_element = cells.insert(key, cell);
}
cell->elements.push_back(p_element);
}
}
}
void SGBroadphase2DInternal::_remove_element_from_cells(SGBroadphase2DInternal::Element *p_element) {
HashKey from = p_element->from;
HashKey to = p_element->to;
for (int32_t x = from.x; x <= to.x; x++) {
for (int32_t y = from.y; y <= to.y; y++) {
HashKey key(x, y);
Map<HashKey, Cell *>::Element *cell_element = cells.find(key);
if (!cell_element) {
continue;
}
Cell *cell = cell_element->get();
cell->elements.erase(p_element);
if (cell->elements.size() == 0) {
cells.erase(key);
memdelete(cell);
}
}
}
}
void SGBroadphase2DInternal::_clear_cells() {
for (Map<HashKey, Cell *>::Element *E = cells.front(); E; E = E->next()) {
memdelete(E->get());
}
cells.clear();
}
SGBroadphase2DInternal::Element *SGBroadphase2DInternal::create_element(SGCollisionObject2DInternal *p_object) {
SGBroadphase2DInternal::Element *element = memnew(SGBroadphase2DInternal::Element);
elements.push_back(element);
element->object = p_object;
element->bounds = p_object->get_bounds();
SGFixedVector2Internal min = element->bounds.get_min();
SGFixedVector2Internal max = element->bounds.get_max();
element->from = HashKey(
min.x.to_int() / cell_size,
min.y.to_int() / cell_size);
element->to = HashKey(
max.x.to_int() / cell_size,
max.y.to_int() / cell_size);
_add_element_to_cells(element);
return element;
}
void SGBroadphase2DInternal::update_element(SGBroadphase2DInternal::Element *p_element) {
p_element->bounds = p_element->object->get_bounds();
SGFixedVector2Internal min = p_element->bounds.get_min();
SGFixedVector2Internal max = p_element->bounds.get_max();
HashKey from(
min.x.to_int() / cell_size,
min.y.to_int() / cell_size);
HashKey to(
max.x.to_int() / cell_size,
max.y.to_int() / cell_size);
if (p_element->from == from && p_element->to == to) {
return;
}
_remove_element_from_cells(p_element);
p_element->from = from;
p_element->to = to;
_add_element_to_cells(p_element);
}
void SGBroadphase2DInternal::delete_element(SGBroadphase2DInternal::Element *p_element) {
_remove_element_from_cells(p_element);
elements.erase(p_element);
memdelete(p_element);
}
void SGBroadphase2DInternal::find_nearby(const SGFixedRect2Internal &p_bounds, SGResultHandlerInternal *p_result_handler, int p_type) const {
SGFixedVector2Internal min = p_bounds.get_min();
SGFixedVector2Internal max = p_bounds.get_max();
HashKey from(
min.x.to_int() / cell_size,
min.y.to_int() / cell_size);
HashKey to(
max.x.to_int() / cell_size,
max.y.to_int() / cell_size);
uint64_t query_id = (++current_query_id);
for (int32_t x = from.x; x <= to.x; x++) {
for (int32_t y = from.y; y <= to.y; y++) {
HashKey key(x, y);
const Map<HashKey, Cell *>::Element *cell_element = cells.find(key);
Cell *cell;
if (!cell_element) {
continue;
}
cell = cell_element->get();
for (List<SGBroadphase2DInternal::Element *>::Element *E = cell->elements.front(); E; E = E->next()) {
SGBroadphase2DInternal::Element *element = E->get();
if (element->query_id == query_id) {
continue;
}
if ((element->object->get_object_type() & p_type) && p_bounds.intersects(element->bounds)) {
element->query_id = query_id;
p_result_handler->handle_result(element->object);
}
}
}
}
}
void SGBroadphase2DInternal::set_cell_size(int p_cell_size) {
if (cell_size != p_cell_size) {
cell_size = p_cell_size;
_clear_cells();
for (List<SGBroadphase2DInternal::Element *>::Element *E = elements.front(); E; E = E->next()) {
SGBroadphase2DInternal::Element *element = E->get();
SGFixedVector2Internal min = element->bounds.get_min();
SGFixedVector2Internal max = element->bounds.get_max();
element->from = HashKey(
min.x.to_int() / cell_size,
min.y.to_int() / cell_size);
element->to = HashKey(
max.x.to_int() / cell_size,
max.y.to_int() / cell_size);
_add_element_to_cells(element);
}
}
}
SGBroadphase2DInternal::SGBroadphase2DInternal(int p_cell_size) {
cell_size = p_cell_size;
current_query_id = 0;
}
SGBroadphase2DInternal::~SGBroadphase2DInternal() {
_clear_cells();
for (List<SGBroadphase2DInternal::Element *>::Element *E = elements.front(); E; E = E->next()) {
memdelete(E->get());
}
}
|
// Copyright (c) 2004-2022 Yoshikatsu Fujita / LittleWing Company Limited.
// See LICENSE file for terms and conditions of use.
#include "core.h"
#include "vm.h"
#include "heap.h"
#include "subr.h"
#include "arith.h"
#include "violation.h"
// flonum?
scm_obj_t
subr_flonum_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) return FLONUMP(argv[0]) ? scm_true : scm_false;
wrong_number_of_arguments_violation(vm, "flonum?", 1, 1, argc, argv);
return scm_undef;
}
// real->flonum
scm_obj_t
subr_real_to_flonum(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (real_pred(argv[0])) {
scm_flonum_t flonum = (scm_flonum_t)cnvt_to_inexact(vm->m_heap, argv[0]);
assert(FLONUMP(flonum));
return flonum;
}
wrong_type_argument_violation(vm, "real->flonum", 0, "real", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "real->flonum", 1, 1, argc, argv);
return scm_undef;
}
// fl=?
scm_obj_t
subr_fl_eq(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) == FLONUM(argv[1])) ? scm_true : scm_false;
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return scm_true;
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
for (int i = 0; i < argc - 1; i++) {
if (FLONUM(argv[i]) == FLONUM(argv[i + 1])) continue;
return scm_false;
}
return scm_true;
}
wrong_number_of_arguments_violation(vm, "fl=?", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl=?", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl<?
scm_obj_t
subr_fl_lt(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) < FLONUM(argv[1])) ? scm_true : scm_false;
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return scm_true;
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
for (int i = 0; i < argc - 1; i++) {
if (FLONUM(argv[i]) < FLONUM(argv[i + 1])) continue;
return scm_false;
}
return scm_true;
}
wrong_number_of_arguments_violation(vm, "fl<?", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl<?", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl>?
scm_obj_t
subr_fl_gt(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) > FLONUM(argv[1])) ? scm_true : scm_false;
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return scm_true;
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
for (int i = 0; i < argc - 1; i++) {
if (FLONUM(argv[i]) > FLONUM(argv[i + 1])) continue;
return scm_false;
}
return scm_true;
}
wrong_number_of_arguments_violation(vm, "fl>?", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl>?", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl<=?
scm_obj_t
subr_fl_le(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) <= FLONUM(argv[1])) ? scm_true : scm_false;
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return scm_true;
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
for (int i = 0; i < argc - 1; i++) {
if (FLONUM(argv[i]) <= FLONUM(argv[i + 1])) continue;
return scm_false;
}
return scm_true;
}
wrong_number_of_arguments_violation(vm, "fl<=?", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl<=?", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl>=?
scm_obj_t
subr_fl_ge(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) >= FLONUM(argv[1])) ? scm_true : scm_false;
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return scm_true;
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
for (int i = 0; i < argc - 1; i++) {
if (FLONUM(argv[i]) >= FLONUM(argv[i + 1])) continue;
return scm_false;
}
return scm_true;
}
wrong_number_of_arguments_violation(vm, "fl>=?", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl>=?", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// flinteger?
scm_obj_t
subr_fl_integer_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return integer_pred(argv[0]) ? scm_true : scm_false;
wrong_type_argument_violation(vm, "flinteger?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flinteger?", 1, 1, argc, argv);
return scm_undef;
}
// flzero?
scm_obj_t
subr_fl_zero_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return (FLONUM(argv[0]) == 0.0) ? scm_true : scm_false;
wrong_type_argument_violation(vm, "flzero?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flzero?", 1, 1, argc, argv);
return scm_undef;
}
// flpositive?
scm_obj_t
subr_fl_positive_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return (FLONUM(argv[0]) > 0.0) ? scm_true : scm_false;
wrong_type_argument_violation(vm, "flpositive?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flpositive?", 1, 1, argc, argv);
return scm_undef;
}
// flnegative?
scm_obj_t
subr_fl_negative_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return (FLONUM(argv[0]) < 0.0) ? scm_true : scm_false;
wrong_type_argument_violation(vm, "flnegative?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flnegative?", 1, 1, argc, argv);
return scm_undef;
}
// flodd?
scm_obj_t
subr_fl_odd_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0]) && integer_valued_pred(argv[0])) return n_even_pred(argv[0]) ? scm_false : scm_true;
wrong_type_argument_violation(vm, "flodd?", 0, "integer valued flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flodd?", 1, 1, argc, argv);
return scm_undef;
}
// fleven?
scm_obj_t
subr_fl_even_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0]) && integer_valued_pred(argv[0])) return n_even_pred(argv[0]) ? scm_true : scm_false;
wrong_type_argument_violation(vm, "fleven?", 0, "integer valued flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fleven?", 1, 1, argc, argv);
return scm_undef;
}
// flfinite?
scm_obj_t
subr_fl_finite_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
if (isnan(FLONUM(argv[0]))) return scm_false;
if (isinf(FLONUM(argv[0]))) return scm_false;
return scm_true;
}
wrong_type_argument_violation(vm, "flfinite?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flfinite?", 1, 1, argc, argv);
return scm_undef;
}
// flinfinite?
scm_obj_t
subr_fl_infinite_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
return (isinf(FLONUM(argv[0]))) ? scm_true : scm_false;
}
wrong_type_argument_violation(vm, "flinfinite?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flinfinite?", 1, 1, argc, argv);
return scm_undef;
}
// flnan?
scm_obj_t
subr_fl_nan_pred(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
return (isnan(FLONUM(argv[0]))) ? scm_true : scm_false;
}
wrong_type_argument_violation(vm, "flnan?", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flnan?", 1, 1, argc, argv);
return scm_undef;
}
// flmax
scm_obj_t
subr_fl_max(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) > FLONUM(argv[1])) ? argv[0] : argv[1];
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return argv[0];
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
int n = 0;
for (int i = 1; i < argc; i++) {
if (val < FLONUM(argv[i])) {
val = FLONUM(argv[i]);
n = i;
}
}
return argv[n];
}
wrong_number_of_arguments_violation(vm, "flmax", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "flmax", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// flmin
scm_obj_t
subr_fl_min(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return (FLONUM(argv[0]) < FLONUM(argv[1])) ? argv[0] : argv[1];
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return argv[0];
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
int n = 0;
for (int i = 1; i < argc; i++) {
if (val > FLONUM(argv[i])) {
val = FLONUM(argv[i]);
n = i;
}
}
return argv[n];
}
wrong_number_of_arguments_violation(vm, "flmin", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "flmin", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl+
scm_obj_t
subr_fl_add(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return make_flonum(vm->m_heap, FLONUM(argv[0]) + FLONUM(argv[1]));
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return argv[0];
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
for (int i = 1; i < argc; i++) {
val = val + FLONUM(argv[i]);
}
return make_flonum(vm->m_heap, val);
}
return make_flonum(vm->m_heap, 0.0);
raise_bad:
wrong_type_argument_violation(vm, "fl+", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl*
scm_obj_t
subr_fl_mul(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return make_flonum(vm->m_heap, FLONUM(argv[0]) * FLONUM(argv[1]));
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return argv[0];
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
for (int i = 1; i < argc; i++) {
val = val * FLONUM(argv[i]);
}
return make_flonum(vm->m_heap, val);
}
return make_flonum(vm->m_heap, 1.0);
raise_bad:
wrong_type_argument_violation(vm, "fl*", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl-
scm_obj_t
subr_fl_sub(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return make_flonum(vm->m_heap, FLONUM(argv[0]) - FLONUM(argv[1]));
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, -FLONUM(argv[0]));
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
for (int i = 1; i < argc; i++) {
val = val - FLONUM(argv[i]);
}
return make_flonum(vm->m_heap, val);
}
wrong_number_of_arguments_violation(vm, "fl-", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl-", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fl/
scm_obj_t
subr_fl_quotient(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return make_flonum(vm->m_heap, FLONUM(argv[0]) / FLONUM(argv[1]));
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, 1.0 / FLONUM(argv[0]));
bad = 0; goto raise_bad;
}
if (argc >= 3) {
for (int i = 0; i < argc; i++) {
if (FLONUMP(argv[i])) continue;
bad = i; goto raise_bad;
}
double val = FLONUM(argv[0]);
for (int i = 1; i < argc; i++) {
val = val / FLONUM(argv[i]);
}
return make_flonum(vm->m_heap, val);
}
wrong_number_of_arguments_violation(vm, "fl/", 1, -1, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fl/", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// flabs
scm_obj_t
subr_fl_abs(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, fabs(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flabs", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flabs", 1, 1, argc, argv);
return scm_undef;
}
// fldiv
scm_obj_t
subr_fl_div(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return arith_integer_div(vm->m_heap, argv[0], argv[1]);
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
wrong_number_of_arguments_violation(vm, "fldiv", 2, 2, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fldiv", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// fldiv0
scm_obj_t
subr_fl_div0(VM* vm, int argc, scm_obj_t argv[])
{
int bad;
if (argc == 2) {
if (BOTHFLONUMP(argv[0], argv[1])) return arith_integer_div0(vm->m_heap, argv[0], argv[1]);
bad = FLONUMP(argv[0]) ? 1 : 0; goto raise_bad;
}
wrong_number_of_arguments_violation(vm, "fldiv0", 2, 2, argc, argv);
return scm_undef;
raise_bad:
wrong_type_argument_violation(vm, "fldiv0", bad, "flonum", argv[bad], argc, argv);
return scm_undef;
}
// flnumerator
scm_obj_t
subr_fl_numerator(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
if (FLONUM(argv[0]) == 0.0) return argv[0];
scm_obj_t obj = cnvt_to_exact(vm->m_heap, argv[0]);
if (RATIONALP(obj)) return cnvt_to_inexact(vm->m_heap, ((scm_rational_t)obj)->nume);
return cnvt_to_inexact(vm->m_heap, obj);
}
wrong_type_argument_violation(vm, "flnumerator", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flnumerator", 1, 1, argc, argv);
return scm_undef;
}
// fldenominator
scm_obj_t
subr_fl_denominator(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
scm_obj_t obj = cnvt_to_exact(vm->m_heap, argv[0]);
if (RATIONALP(obj)) return cnvt_to_inexact(vm->m_heap, ((scm_rational_t)obj)->deno);
return make_flonum(vm->m_heap, 1.0);
}
wrong_type_argument_violation(vm, "fldenominator", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fldenominator", 1, 1, argc, argv);
return scm_undef;
}
// flfloor
scm_obj_t
subr_fl_floor(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, floor(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flfloor", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flfloor", 1, 1, argc, argv);
return scm_undef;
}
// flceiling
scm_obj_t
subr_fl_ceiling(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, ceil(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flceiling", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flceiling", 1, 1, argc, argv);
return scm_undef;
}
// fltruncate
scm_obj_t
subr_fl_truncate(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, trunc(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "fltruncate", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fltruncate", 1, 1, argc, argv);
return scm_undef;
}
// flround
scm_obj_t
subr_fl_round(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
double value = FLONUM(argv[0]);
double ans = floor(value + 0.5);
if (ans != value + 0.5) return make_flonum(vm->m_heap, ans);
if (ans * 0.5 == floor(ans * 0.5)) return make_flonum(vm->m_heap, ans);
return make_flonum(vm->m_heap, ans - 1.0);
}
wrong_type_argument_violation(vm, "flround", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flround", 1, 1, argc, argv);
return scm_undef;
}
// flexp
scm_obj_t
subr_fl_exp(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, exp(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flexp", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flexp", 1, 1, argc, argv);
return scm_undef;
}
// flexpt
scm_obj_t
subr_fl_expt(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 2) {
if (FLONUMP(argv[0])) {
if (FLONUMP(argv[1])) {
double fl1 = FLONUM(argv[0]);
double fl2 = FLONUM(argv[1]);
return make_flonum(vm->m_heap, pow(fl1, fl2));
}
wrong_type_argument_violation(vm, "flexpt", 1, "flonum", argv[1], argc, argv);
return scm_undef;
}
wrong_type_argument_violation(vm, "flexpt", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flexpt", 2, 2, argc, argv);
return scm_undef;
}
// fllog
scm_obj_t
subr_fl_log(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) {
double value = FLONUM(argv[0]);
if (isinf(value)) {
if (value > 0.0) return argv[0];
return make_flonum(vm->m_heap, VALUE_NAN);
}
return make_flonum(vm->m_heap, log(value));
}
wrong_type_argument_violation(vm, "fllog", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
if (argc == 2) {
if (FLONUMP(argv[0]) & FLONUMP(argv[1])) return make_flonum(vm->m_heap, log(FLONUM(argv[0])) / log(FLONUM(argv[1])));
if (FLONUMP(argv[0])) {
wrong_type_argument_violation(vm, "fllog", 1, "flonum", argv[1], argc, argv);
return scm_undef;
}
wrong_type_argument_violation(vm, "fllog", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fllog", 1, 2, argc, argv);
return scm_undef;
}
// flsin
scm_obj_t
subr_fl_sin(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, sin(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flsin", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flsin", 1, 1, argc, argv);
return scm_undef;
}
// flcos
scm_obj_t
subr_fl_cos(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, cos(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flcos", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flcos", 1, 1, argc, argv);
return scm_undef;
}
// fltan
scm_obj_t
subr_fl_tan(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, tan(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "fltan", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fltan", 1, 1, argc, argv);
return scm_undef;
}
// flasin
scm_obj_t
subr_fl_asin(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, asin(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flasin", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flasin", 1, 1, argc, argv);
return scm_undef;
}
// flacos
scm_obj_t
subr_fl_acos(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, acos(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flacos", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flacos", 1, 1, argc, argv);
return scm_undef;
}
// flatan
scm_obj_t
subr_fl_atan(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, atan(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flatan", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
if (argc == 2) {
if (FLONUMP(argv[0]) & FLONUMP(argv[1])) return make_flonum(vm->m_heap, atan2(FLONUM(argv[0]), FLONUM(argv[1])));
if (FLONUMP(argv[0])) {
wrong_type_argument_violation(vm, "flatan", 1, "flonum", argv[1], argc, argv);
return scm_undef;
}
wrong_type_argument_violation(vm, "flatan", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flatan", 1, 1, argc, argv);
return scm_undef;
}
// fixnum->flonum
scm_obj_t
subr_fixnum_to_flonum(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FIXNUMP(argv[0])) return make_flonum(vm->m_heap, FIXNUM(argv[0]));
wrong_type_argument_violation(vm, "fixnum->flonum", 0, "fixnum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "fixnum->flonum", 1, 1, argc, argv);
return scm_undef;
}
// flsqrt
scm_obj_t
subr_fl_sqrt(VM* vm, int argc, scm_obj_t argv[])
{
if (argc == 1) {
if (FLONUMP(argv[0])) return make_flonum(vm->m_heap, sqrt(FLONUM(argv[0])));
wrong_type_argument_violation(vm, "flsqrt", 0, "flonum", argv[0], argc, argv);
return scm_undef;
}
wrong_number_of_arguments_violation(vm, "flsqrt", 1, 1, argc, argv);
return scm_undef;
}
void init_subr_flonum(object_heap_t* heap)
{
#define DEFSUBR(SYM, FUNC) heap->intern_system_subr(SYM, FUNC)
DEFSUBR("flonum?", subr_flonum_pred);
DEFSUBR("real->flonum", subr_real_to_flonum);
DEFSUBR("fl=?", subr_fl_eq);
DEFSUBR("fl<?", subr_fl_lt);
DEFSUBR("fl>?", subr_fl_gt);
DEFSUBR("fl<=?", subr_fl_le);
DEFSUBR("fl>=?", subr_fl_ge);
DEFSUBR("flinteger?", subr_fl_integer_pred);
DEFSUBR("flzero?", subr_fl_zero_pred);
DEFSUBR("flpositive?", subr_fl_positive_pred);
DEFSUBR("flnegative?", subr_fl_negative_pred);
DEFSUBR("flodd?", subr_fl_odd_pred);
DEFSUBR("fleven?", subr_fl_even_pred);
DEFSUBR("flfinite?", subr_fl_finite_pred);
DEFSUBR("flinfinite?", subr_fl_infinite_pred);
DEFSUBR("flnan?", subr_fl_nan_pred);
DEFSUBR("flmax", subr_fl_max);
DEFSUBR("flmin", subr_fl_min);
DEFSUBR("fl+", subr_fl_add);
DEFSUBR("fl*", subr_fl_mul);
DEFSUBR("fl-", subr_fl_sub);
DEFSUBR("fl/", subr_fl_quotient);
DEFSUBR("fldiv", subr_fl_div);
DEFSUBR("fldiv0", subr_fl_div0);
DEFSUBR("flnumerator", subr_fl_numerator);
DEFSUBR("fldenominator", subr_fl_denominator);
DEFSUBR("flfloor", subr_fl_floor);
DEFSUBR("flceiling", subr_fl_ceiling);
DEFSUBR("fltruncate", subr_fl_truncate);
DEFSUBR("flround", subr_fl_round);
DEFSUBR("flexp", subr_fl_exp);
DEFSUBR("flexpt", subr_fl_expt);
DEFSUBR("fllog", subr_fl_log);
DEFSUBR("flsin", subr_fl_sin);
DEFSUBR("flcos", subr_fl_cos);
DEFSUBR("fltan", subr_fl_tan);
DEFSUBR("flasin", subr_fl_asin);
DEFSUBR("flacos", subr_fl_acos);
DEFSUBR("flatan", subr_fl_atan);
DEFSUBR("flabs", subr_fl_abs);
DEFSUBR("flsqrt", subr_fl_sqrt);
DEFSUBR("fixnum->flonum", subr_fixnum_to_flonum);
}
|
// RUN: %clang_cc1 %s -verify -fsyntax-only -pedantic
// expected-no-diagnostics
// This file tests the clang extension which allows initializing the components
// of a complex number individually using an initialization list. Basically,
// if you have an explicit init list for a complex number that contains two
// initializers, this extension kicks in to turn it into component-wise
// initialization.
//
// See also the testcase for the C version of this extension in
// test/Sema/complex-init-list.c.
// Basic testcase
// (No pedantic warning is necessary because _Complex is not part of C++.)
_Complex float valid1 = { 1.0f, 2.0f };
|
/* Based on nsURLParsers.cc from Mozilla
* -------------------------------------
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Darin Fisher (original author)
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "url/third_party/mozilla/url_parse.h"
#include <stdlib.h>
#include "base/logging.h"
#include "url/url_parse_internal.h"
#include "url/url_util.h"
#include "url/url_util_internal.h"
namespace url {
namespace {
// Returns true if the given character is a valid digit to use in a port.
inline bool IsPortDigit(base::char16 ch) {
return ch >= '0' && ch <= '9';
}
// Returns the offset of the next authority terminator in the input starting
// from start_offset. If no terminator is found, the return value will be equal
// to spec_len.
template<typename CHAR>
int FindNextAuthorityTerminator(const CHAR* spec,
int start_offset,
int spec_len) {
for (int i = start_offset; i < spec_len; i++) {
if (IsAuthorityTerminator(spec[i]))
return i;
}
return spec_len; // Not found.
}
template<typename CHAR>
void ParseUserInfo(const CHAR* spec,
const Component& user,
Component* username,
Component* password) {
// Find the first colon in the user section, which separates the username and
// password.
int colon_offset = 0;
while (colon_offset < user.len && spec[user.begin + colon_offset] != ':')
colon_offset++;
if (colon_offset < user.len) {
// Found separator: <username>:<password>
*username = Component(user.begin, colon_offset);
*password = MakeRange(user.begin + colon_offset + 1,
user.begin + user.len);
} else {
// No separator, treat everything as the username
*username = user;
*password = Component();
}
}
template<typename CHAR>
void ParseServerInfo(const CHAR* spec,
const Component& serverinfo,
Component* hostname,
Component* port_num) {
if (serverinfo.len == 0) {
// No server info, host name is empty.
hostname->reset();
port_num->reset();
return;
}
// If the host starts with a left-bracket, assume the entire host is an
// IPv6 literal. Otherwise, assume none of the host is an IPv6 literal.
// This assumption will be overridden if we find a right-bracket.
//
// Our IPv6 address canonicalization code requires both brackets to exist,
// but the ability to locate an incomplete address can still be useful.
int ipv6_terminator = spec[serverinfo.begin] == '[' ? serverinfo.end() : -1;
int colon = -1;
// Find the last right-bracket, and the last colon.
for (int i = serverinfo.begin; i < serverinfo.end(); i++) {
switch (spec[i]) {
case ']':
ipv6_terminator = i;
break;
case ':':
colon = i;
break;
}
}
if (colon > ipv6_terminator) {
// Found a port number: <hostname>:<port>
*hostname = MakeRange(serverinfo.begin, colon);
if (hostname->len == 0)
hostname->reset();
*port_num = MakeRange(colon + 1, serverinfo.end());
} else {
// No port: <hostname>
*hostname = serverinfo;
port_num->reset();
}
}
// Given an already-identified auth section, breaks it into its consituent
// parts. The port number will be parsed and the resulting integer will be
// filled into the given *port variable, or -1 if there is no port number or it
// is invalid.
template<typename CHAR>
void DoParseAuthority(const CHAR* spec,
const Component& auth,
Component* username,
Component* password,
Component* hostname,
Component* port_num) {
DCHECK(auth.is_valid()) << "We should always get an authority";
if (auth.len == 0) {
username->reset();
password->reset();
hostname->reset();
port_num->reset();
return;
}
// Search backwards for @, which is the separator between the user info and
// the server info.
int i = auth.begin + auth.len - 1;
while (i > auth.begin && spec[i] != '@')
i--;
if (spec[i] == '@') {
// Found user info: <user-info>@<server-info>
ParseUserInfo(spec, Component(auth.begin, i - auth.begin),
username, password);
ParseServerInfo(spec, MakeRange(i + 1, auth.begin + auth.len),
hostname, port_num);
} else {
// No user info, everything is server info.
username->reset();
password->reset();
ParseServerInfo(spec, auth, hostname, port_num);
}
}
template <typename CHAR>
inline void FindQueryAndRefParts(const CHAR* spec,
const Component& path,
int* query_separator,
int* ref_separator) {
int path_end = path.begin + path.len;
for (int i = path.begin; i < path_end; i++) {
switch (spec[i]) {
case '?':
// Only match the query string if it precedes the reference fragment
// and when we haven't found one already.
if (*query_separator < 0)
*query_separator = i;
break;
case '#':
// Record the first # sign only.
if (*ref_separator < 0) {
*ref_separator = i;
return;
}
break;
}
}
}
template<typename CHAR>
void ParsePath(const CHAR* spec,
const Component& path,
Component* filepath,
Component* query,
Component* ref) {
// path = [/]<segment1>/<segment2>/<...>/<segmentN>;<param>?<query>#<ref>
// Special case when there is no path.
if (path.len == -1) {
filepath->reset();
query->reset();
ref->reset();
return;
}
DCHECK(path.len > 0) << "We should never have 0 length paths";
// Search for first occurrence of either ? or #.
int query_separator = -1; // Index of the '?'
int ref_separator = -1; // Index of the '#'
FindQueryAndRefParts(spec, path, &query_separator, &ref_separator);
// Markers pointing to the character after each of these corresponding
// components. The code below words from the end back to the beginning,
// and will update these indices as it finds components that exist.
int file_end, query_end;
// Ref fragment: from the # to the end of the path.
int path_end = path.begin + path.len;
if (ref_separator >= 0) {
file_end = query_end = ref_separator;
*ref = MakeRange(ref_separator + 1, path_end);
} else {
file_end = query_end = path_end;
ref->reset();
}
// Query fragment: everything from the ? to the next boundary (either the end
// of the path or the ref fragment).
if (query_separator >= 0) {
file_end = query_separator;
*query = MakeRange(query_separator + 1, query_end);
} else {
query->reset();
}
// File path: treat an empty file path as no file path.
if (file_end != path.begin)
*filepath = MakeRange(path.begin, file_end);
else
filepath->reset();
}
template<typename CHAR>
bool DoExtractScheme(const CHAR* url,
int url_len,
Component* scheme) {
// Skip leading whitespace and control characters.
int begin = 0;
while (begin < url_len && ShouldTrimFromURL(url[begin]))
begin++;
if (begin == url_len)
return false; // Input is empty or all whitespace.
// Find the first colon character.
for (int i = begin; i < url_len; i++) {
if (url[i] == ':') {
*scheme = MakeRange(begin, i);
return true;
}
}
return false; // No colon found: no scheme
}
// Fills in all members of the Parsed structure except for the scheme.
//
// |spec| is the full spec being parsed, of length |spec_len|.
// |after_scheme| is the character immediately following the scheme (after the
// colon) where we'll begin parsing.
//
// Compatability data points. I list "host", "path" extracted:
// Input IE6 Firefox Us
// ----- -------------- -------------- --------------
// http://foo.com/ "foo.com", "/" "foo.com", "/" "foo.com", "/"
// http:foo.com/ "foo.com", "/" "foo.com", "/" "foo.com", "/"
// http:/foo.com/ fail(*) "foo.com", "/" "foo.com", "/"
// http:\foo.com/ fail(*) "\foo.com", "/"(fail) "foo.com", "/"
// http:////foo.com/ "foo.com", "/" "foo.com", "/" "foo.com", "/"
//
// (*) Interestingly, although IE fails to load these URLs, its history
// canonicalizer handles them, meaning if you've been to the corresponding
// "http://foo.com/" link, it will be colored.
template <typename CHAR>
void DoParseAfterScheme(const CHAR* spec,
int spec_len,
int after_scheme,
Parsed* parsed) {
int num_slashes = CountConsecutiveSlashes(spec, after_scheme, spec_len);
int after_slashes = after_scheme + num_slashes;
// First split into two main parts, the authority (username, password, host,
// and port) and the full path (path, query, and reference).
Component authority;
Component full_path;
// Found "//<some data>", looks like an authority section. Treat everything
// from there to the next slash (or end of spec) to be the authority. Note
// that we ignore the number of slashes and treat it as the authority.
int end_auth = FindNextAuthorityTerminator(spec, after_slashes, spec_len);
authority = Component(after_slashes, end_auth - after_slashes);
if (end_auth == spec_len) // No beginning of path found.
full_path = Component();
else // Everything starting from the slash to the end is the path.
full_path = Component(end_auth, spec_len - end_auth);
// Now parse those two sub-parts.
DoParseAuthority(spec, authority, &parsed->username, &parsed->password,
&parsed->host, &parsed->port);
ParsePath(spec, full_path, &parsed->path, &parsed->query, &parsed->ref);
}
// The main parsing function for standard URLs. Standard URLs have a scheme,
// host, path, etc.
template<typename CHAR>
void DoParseStandardURL(const CHAR* spec, int spec_len, Parsed* parsed) {
DCHECK(spec_len >= 0);
// Strip leading & trailing spaces and control characters.
int begin = 0;
TrimURL(spec, &begin, &spec_len);
int after_scheme;
if (DoExtractScheme(spec, spec_len, &parsed->scheme)) {
after_scheme = parsed->scheme.end() + 1; // Skip past the colon.
} else {
// Say there's no scheme when there is no colon. We could also say that
// everything is the scheme. Both would produce an invalid URL, but this way
// seems less wrong in more cases.
parsed->scheme.reset();
after_scheme = begin;
}
DoParseAfterScheme(spec, spec_len, after_scheme, parsed);
}
template<typename CHAR>
void DoParseFileSystemURL(const CHAR* spec, int spec_len, Parsed* parsed) {
DCHECK(spec_len >= 0);
// Get the unused parts of the URL out of the way.
parsed->username.reset();
parsed->password.reset();
parsed->host.reset();
parsed->port.reset();
parsed->path.reset(); // May use this; reset for convenience.
parsed->ref.reset(); // May use this; reset for convenience.
parsed->query.reset(); // May use this; reset for convenience.
parsed->clear_inner_parsed(); // May use this; reset for convenience.
// Strip leading & trailing spaces and control characters.
int begin = 0;
TrimURL(spec, &begin, &spec_len);
// Handle empty specs or ones that contain only whitespace or control chars.
if (begin == spec_len) {
parsed->scheme.reset();
return;
}
int inner_start = -1;
// Extract the scheme. We also handle the case where there is no scheme.
if (DoExtractScheme(&spec[begin], spec_len - begin, &parsed->scheme)) {
// Offset the results since we gave ExtractScheme a substring.
parsed->scheme.begin += begin;
if (parsed->scheme.end() == spec_len - 1)
return;
inner_start = parsed->scheme.end() + 1;
} else {
// No scheme found; that's not valid for filesystem URLs.
parsed->scheme.reset();
return;
}
Component inner_scheme;
const CHAR* inner_spec = &spec[inner_start];
int inner_spec_len = spec_len - inner_start;
if (DoExtractScheme(inner_spec, inner_spec_len, &inner_scheme)) {
// Offset the results since we gave ExtractScheme a substring.
inner_scheme.begin += inner_start;
if (inner_scheme.end() == spec_len - 1)
return;
} else {
// No scheme found; that's not valid for filesystem URLs.
// The best we can do is return "filesystem://".
return;
}
Parsed inner_parsed;
if (CompareSchemeComponent(spec, inner_scheme, kFileScheme)) {
// File URLs are special.
ParseFileURL(inner_spec, inner_spec_len, &inner_parsed);
} else if (CompareSchemeComponent(spec, inner_scheme, kFileSystemScheme)) {
// Filesystem URLs don't nest.
return;
} else if (IsStandard(spec, inner_scheme)) {
// All "normal" URLs.
DoParseStandardURL(inner_spec, inner_spec_len, &inner_parsed);
} else {
return;
}
// All members of inner_parsed need to be offset by inner_start.
// If we had any scheme that supported nesting more than one level deep,
// we'd have to recurse into the inner_parsed's inner_parsed when
// adjusting by inner_start.
inner_parsed.scheme.begin += inner_start;
inner_parsed.username.begin += inner_start;
inner_parsed.password.begin += inner_start;
inner_parsed.host.begin += inner_start;
inner_parsed.port.begin += inner_start;
inner_parsed.query.begin += inner_start;
inner_parsed.ref.begin += inner_start;
inner_parsed.path.begin += inner_start;
// Query and ref move from inner_parsed to parsed.
parsed->query = inner_parsed.query;
inner_parsed.query.reset();
parsed->ref = inner_parsed.ref;
inner_parsed.ref.reset();
parsed->set_inner_parsed(inner_parsed);
if (!inner_parsed.scheme.is_valid() || !inner_parsed.path.is_valid() ||
inner_parsed.inner_parsed()) {
return;
}
// The path in inner_parsed should start with a slash, then have a filesystem
// type followed by a slash. From the first slash up to but excluding the
// second should be what it keeps; the rest goes to parsed. If the path ends
// before the second slash, it's still pretty clear what the user meant, so
// we'll let that through.
if (!IsURLSlash(spec[inner_parsed.path.begin])) {
return;
}
int inner_path_end = inner_parsed.path.begin + 1; // skip the leading slash
while (inner_path_end < spec_len &&
!IsURLSlash(spec[inner_path_end]))
++inner_path_end;
parsed->path.begin = inner_path_end;
int new_inner_path_length = inner_path_end - inner_parsed.path.begin;
parsed->path.len = inner_parsed.path.len - new_inner_path_length;
parsed->inner_parsed()->path.len = new_inner_path_length;
}
// Initializes a path URL which is merely a scheme followed by a path. Examples
// include "about:foo" and "javascript:alert('bar');"
template<typename CHAR>
void DoParsePathURL(const CHAR* spec, int spec_len,
bool trim_path_end,
Parsed* parsed) {
// Get the non-path and non-scheme parts of the URL out of the way, we never
// use them.
parsed->username.reset();
parsed->password.reset();
parsed->host.reset();
parsed->port.reset();
parsed->path.reset();
parsed->query.reset();
parsed->ref.reset();
// Strip leading & trailing spaces and control characters.
int scheme_begin = 0;
TrimURL(spec, &scheme_begin, &spec_len, trim_path_end);
// Handle empty specs or ones that contain only whitespace or control chars.
if (scheme_begin == spec_len) {
parsed->scheme.reset();
parsed->path.reset();
return;
}
int path_begin;
// Extract the scheme, with the path being everything following. We also
// handle the case where there is no scheme.
if (ExtractScheme(&spec[scheme_begin], spec_len - scheme_begin,
&parsed->scheme)) {
// Offset the results since we gave ExtractScheme a substring.
parsed->scheme.begin += scheme_begin;
path_begin = parsed->scheme.end() + 1;
} else {
// No scheme case.
parsed->scheme.reset();
path_begin = scheme_begin;
}
if (path_begin == spec_len)
return;
DCHECK_LT(path_begin, spec_len);
ParsePath(spec,
MakeRange(path_begin, spec_len),
&parsed->path,
&parsed->query,
&parsed->ref);
}
template<typename CHAR>
void DoParseMailtoURL(const CHAR* spec, int spec_len, Parsed* parsed) {
DCHECK(spec_len >= 0);
// Get the non-path and non-scheme parts of the URL out of the way, we never
// use them.
parsed->username.reset();
parsed->password.reset();
parsed->host.reset();
parsed->port.reset();
parsed->ref.reset();
parsed->query.reset(); // May use this; reset for convenience.
// Strip leading & trailing spaces and control characters.
int begin = 0;
TrimURL(spec, &begin, &spec_len);
// Handle empty specs or ones that contain only whitespace or control chars.
if (begin == spec_len) {
parsed->scheme.reset();
parsed->path.reset();
return;
}
int path_begin = -1;
int path_end = -1;
// Extract the scheme, with the path being everything following. We also
// handle the case where there is no scheme.
if (ExtractScheme(&spec[begin], spec_len - begin, &parsed->scheme)) {
// Offset the results since we gave ExtractScheme a substring.
parsed->scheme.begin += begin;
if (parsed->scheme.end() != spec_len - 1) {
path_begin = parsed->scheme.end() + 1;
path_end = spec_len;
}
} else {
// No scheme found, just path.
parsed->scheme.reset();
path_begin = begin;
path_end = spec_len;
}
// Split [path_begin, path_end) into a path + query.
for (int i = path_begin; i < path_end; ++i) {
if (spec[i] == '?') {
parsed->query = MakeRange(i + 1, path_end);
path_end = i;
break;
}
}
// For compatability with the standard URL parser, treat no path as
// -1, rather than having a length of 0
if (path_begin == path_end) {
parsed->path.reset();
} else {
parsed->path = MakeRange(path_begin, path_end);
}
}
// Converts a port number in a string to an integer. We'd like to just call
// sscanf but our input is not NULL-terminated, which sscanf requires. Instead,
// we copy the digits to a small stack buffer (since we know the maximum number
// of digits in a valid port number) that we can NULL terminate.
template<typename CHAR>
int DoParsePort(const CHAR* spec, const Component& component) {
// Easy success case when there is no port.
const int kMaxDigits = 5;
if (!component.is_nonempty())
return PORT_UNSPECIFIED;
// Skip over any leading 0s.
Component digits_comp(component.end(), 0);
for (int i = 0; i < component.len; i++) {
if (spec[component.begin + i] != '0') {
digits_comp = MakeRange(component.begin + i, component.end());
break;
}
}
if (digits_comp.len == 0)
return 0; // All digits were 0.
// Verify we don't have too many digits (we'll be copying to our buffer so
// we need to double-check).
if (digits_comp.len > kMaxDigits)
return PORT_INVALID;
// Copy valid digits to the buffer.
char digits[kMaxDigits + 1]; // +1 for null terminator
for (int i = 0; i < digits_comp.len; i++) {
CHAR ch = spec[digits_comp.begin + i];
if (!IsPortDigit(ch)) {
// Invalid port digit, fail.
return PORT_INVALID;
}
digits[i] = static_cast<char>(ch);
}
// Null-terminate the string and convert to integer. Since we guarantee
// only digits, atoi's lack of error handling is OK.
digits[digits_comp.len] = 0;
int port = atoi(digits);
if (port > 65535)
return PORT_INVALID; // Out of range.
return port;
}
template<typename CHAR>
void DoExtractFileName(const CHAR* spec,
const Component& path,
Component* file_name) {
// Handle empty paths: they have no file names.
if (!path.is_nonempty()) {
file_name->reset();
return;
}
// Extract the filename range from the path which is between
// the last slash and the following semicolon.
int file_end = path.end();
for (int i = path.end() - 1; i >= path.begin; i--) {
if (spec[i] == ';') {
file_end = i;
} else if (IsURLSlash(spec[i])) {
// File name is everything following this character to the end
*file_name = MakeRange(i + 1, file_end);
return;
}
}
// No slash found, this means the input was degenerate (generally paths
// will start with a slash). Let's call everything the file name.
*file_name = MakeRange(path.begin, file_end);
return;
}
template<typename CHAR>
bool DoExtractQueryKeyValue(const CHAR* spec,
Component* query,
Component* key,
Component* value) {
if (!query->is_nonempty())
return false;
int start = query->begin;
int cur = start;
int end = query->end();
// We assume the beginning of the input is the beginning of the "key" and we
// skip to the end of it.
key->begin = cur;
while (cur < end && spec[cur] != '&' && spec[cur] != '=')
cur++;
key->len = cur - key->begin;
// Skip the separator after the key (if any).
if (cur < end && spec[cur] == '=')
cur++;
// Find the value part.
value->begin = cur;
while (cur < end && spec[cur] != '&')
cur++;
value->len = cur - value->begin;
// Finally skip the next separator if any
if (cur < end && spec[cur] == '&')
cur++;
// Save the new query
*query = MakeRange(cur, end);
return true;
}
} // namespace
Parsed::Parsed() : potentially_dangling_markup(false), inner_parsed_(NULL) {}
Parsed::Parsed(const Parsed& other)
: scheme(other.scheme),
username(other.username),
password(other.password),
host(other.host),
port(other.port),
path(other.path),
query(other.query),
ref(other.ref),
potentially_dangling_markup(other.potentially_dangling_markup),
inner_parsed_(NULL) {
if (other.inner_parsed_)
set_inner_parsed(*other.inner_parsed_);
}
Parsed& Parsed::operator=(const Parsed& other) {
if (this != &other) {
scheme = other.scheme;
username = other.username;
password = other.password;
host = other.host;
port = other.port;
path = other.path;
query = other.query;
ref = other.ref;
potentially_dangling_markup = other.potentially_dangling_markup;
if (other.inner_parsed_)
set_inner_parsed(*other.inner_parsed_);
else
clear_inner_parsed();
}
return *this;
}
Parsed::~Parsed() {
delete inner_parsed_;
}
int Parsed::Length() const {
if (ref.is_valid())
return ref.end();
return CountCharactersBefore(REF, false);
}
int Parsed::CountCharactersBefore(ComponentType type,
bool include_delimiter) const {
if (type == SCHEME)
return scheme.begin;
// There will be some characters after the scheme like "://" and we don't
// know how many. Search forwards for the next thing until we find one.
int cur = 0;
if (scheme.is_valid())
cur = scheme.end() + 1; // Advance over the ':' at the end of the scheme.
if (username.is_valid()) {
if (type <= USERNAME)
return username.begin;
cur = username.end() + 1; // Advance over the '@' or ':' at the end.
}
if (password.is_valid()) {
if (type <= PASSWORD)
return password.begin;
cur = password.end() + 1; // Advance over the '@' at the end.
}
if (host.is_valid()) {
if (type <= HOST)
return host.begin;
cur = host.end();
}
if (port.is_valid()) {
if (type < PORT || (type == PORT && include_delimiter))
return port.begin - 1; // Back over delimiter.
if (type == PORT)
return port.begin; // Don't want delimiter counted.
cur = port.end();
}
if (path.is_valid()) {
if (type <= PATH)
return path.begin;
cur = path.end();
}
if (query.is_valid()) {
if (type < QUERY || (type == QUERY && include_delimiter))
return query.begin - 1; // Back over delimiter.
if (type == QUERY)
return query.begin; // Don't want delimiter counted.
cur = query.end();
}
if (ref.is_valid()) {
if (type == REF && !include_delimiter)
return ref.begin; // Back over delimiter.
// When there is a ref and we get here, the component we wanted was before
// this and not found, so we always know the beginning of the ref is right.
return ref.begin - 1; // Don't want delimiter counted.
}
return cur;
}
Component Parsed::GetContent() const {
const int begin = CountCharactersBefore(USERNAME, false);
const int len = Length() - begin;
// For compatability with the standard URL parser, we treat no content as
// -1, rather than having a length of 0 (we normally wouldn't care so
// much for these non-standard URLs).
return len ? Component(begin, len) : Component();
}
bool ExtractScheme(const char* url, int url_len, Component* scheme) {
return DoExtractScheme(url, url_len, scheme);
}
bool ExtractScheme(const base::char16* url, int url_len, Component* scheme) {
return DoExtractScheme(url, url_len, scheme);
}
// This handles everything that may be an authority terminator, including
// backslash. For special backslash handling see DoParseAfterScheme.
bool IsAuthorityTerminator(base::char16 ch) {
return IsURLSlash(ch) || ch == '?' || ch == '#';
}
void ExtractFileName(const char* url,
const Component& path,
Component* file_name) {
DoExtractFileName(url, path, file_name);
}
void ExtractFileName(const base::char16* url,
const Component& path,
Component* file_name) {
DoExtractFileName(url, path, file_name);
}
bool ExtractQueryKeyValue(const char* url,
Component* query,
Component* key,
Component* value) {
return DoExtractQueryKeyValue(url, query, key, value);
}
bool ExtractQueryKeyValue(const base::char16* url,
Component* query,
Component* key,
Component* value) {
return DoExtractQueryKeyValue(url, query, key, value);
}
void ParseAuthority(const char* spec,
const Component& auth,
Component* username,
Component* password,
Component* hostname,
Component* port_num) {
DoParseAuthority(spec, auth, username, password, hostname, port_num);
}
void ParseAuthority(const base::char16* spec,
const Component& auth,
Component* username,
Component* password,
Component* hostname,
Component* port_num) {
DoParseAuthority(spec, auth, username, password, hostname, port_num);
}
int ParsePort(const char* url, const Component& port) {
return DoParsePort(url, port);
}
int ParsePort(const base::char16* url, const Component& port) {
return DoParsePort(url, port);
}
void ParseStandardURL(const char* url, int url_len, Parsed* parsed) {
DoParseStandardURL(url, url_len, parsed);
}
void ParseStandardURL(const base::char16* url, int url_len, Parsed* parsed) {
DoParseStandardURL(url, url_len, parsed);
}
void ParsePathURL(const char* url,
int url_len,
bool trim_path_end,
Parsed* parsed) {
DoParsePathURL(url, url_len, trim_path_end, parsed);
}
void ParsePathURL(const base::char16* url,
int url_len,
bool trim_path_end,
Parsed* parsed) {
DoParsePathURL(url, url_len, trim_path_end, parsed);
}
void ParseFileSystemURL(const char* url, int url_len, Parsed* parsed) {
DoParseFileSystemURL(url, url_len, parsed);
}
void ParseFileSystemURL(const base::char16* url, int url_len, Parsed* parsed) {
DoParseFileSystemURL(url, url_len, parsed);
}
void ParseMailtoURL(const char* url, int url_len, Parsed* parsed) {
DoParseMailtoURL(url, url_len, parsed);
}
void ParseMailtoURL(const base::char16* url, int url_len, Parsed* parsed) {
DoParseMailtoURL(url, url_len, parsed);
}
void ParsePathInternal(const char* spec,
const Component& path,
Component* filepath,
Component* query,
Component* ref) {
ParsePath(spec, path, filepath, query, ref);
}
void ParsePathInternal(const base::char16* spec,
const Component& path,
Component* filepath,
Component* query,
Component* ref) {
ParsePath(spec, path, filepath, query, ref);
}
void ParseAfterScheme(const char* spec,
int spec_len,
int after_scheme,
Parsed* parsed) {
DoParseAfterScheme(spec, spec_len, after_scheme, parsed);
}
void ParseAfterScheme(const base::char16* spec,
int spec_len,
int after_scheme,
Parsed* parsed) {
DoParseAfterScheme(spec, spec_len, after_scheme, parsed);
}
} // namespace url
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/shell/renderer/test_runner/MockWebRTCDataChannelHandler.h"
#include <assert.h>
#include "content/shell/renderer/test_runner/WebTestDelegate.h"
#include "third_party/WebKit/public/platform/WebRTCDataChannelHandlerClient.h"
using namespace blink;
namespace WebTestRunner {
class DataChannelReadyStateTask : public WebMethodTask<MockWebRTCDataChannelHandler> {
public:
DataChannelReadyStateTask(MockWebRTCDataChannelHandler* object, WebRTCDataChannelHandlerClient* dataChannelClient, WebRTCDataChannelHandlerClient::ReadyState state)
: WebMethodTask<MockWebRTCDataChannelHandler>(object)
, m_dataChannelClient(dataChannelClient)
, m_state(state)
{
}
virtual void runIfValid() OVERRIDE
{
m_dataChannelClient->didChangeReadyState(m_state);
}
private:
WebRTCDataChannelHandlerClient* m_dataChannelClient;
WebRTCDataChannelHandlerClient::ReadyState m_state;
};
/////////////////////
MockWebRTCDataChannelHandler::MockWebRTCDataChannelHandler(WebString label, const WebRTCDataChannelInit& init, WebTestDelegate* delegate)
: m_client(0)
, m_label(label)
, m_init(init)
, m_delegate(delegate)
{
m_reliable = (init.ordered && init.maxRetransmits == -1 && init.maxRetransmitTime == -1);
}
void MockWebRTCDataChannelHandler::setClient(WebRTCDataChannelHandlerClient* client)
{
m_client = client;
if (m_client)
m_delegate->postTask(new DataChannelReadyStateTask(this, m_client, WebRTCDataChannelHandlerClient::ReadyStateOpen));
}
blink::WebString MockWebRTCDataChannelHandler::label()
{
return m_label;
}
bool MockWebRTCDataChannelHandler::isReliable()
{
return m_reliable;
}
bool MockWebRTCDataChannelHandler::ordered() const
{
return m_init.ordered;
}
unsigned short MockWebRTCDataChannelHandler::maxRetransmitTime() const
{
return m_init.maxRetransmitTime;
}
unsigned short MockWebRTCDataChannelHandler::maxRetransmits() const
{
return m_init.maxRetransmits;
}
WebString MockWebRTCDataChannelHandler::protocol() const
{
return m_init.protocol;
}
bool MockWebRTCDataChannelHandler::negotiated() const
{
return m_init.negotiated;
}
unsigned short MockWebRTCDataChannelHandler::id() const
{
return m_init.id;
}
unsigned long MockWebRTCDataChannelHandler::bufferedAmount()
{
return 0;
}
bool MockWebRTCDataChannelHandler::sendStringData(const WebString& data)
{
assert(m_client);
m_client->didReceiveStringData(data);
return true;
}
bool MockWebRTCDataChannelHandler::sendRawData(const char* data, size_t size)
{
assert(m_client);
m_client->didReceiveRawData(data, size);
return true;
}
void MockWebRTCDataChannelHandler::close()
{
assert(m_client);
m_delegate->postTask(new DataChannelReadyStateTask(this, m_client, WebRTCDataChannelHandlerClient::ReadyStateClosed));
}
}
|
//
// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
#include <float.h>
#include <string.h>
#include <stdio.h>
#include "DetourNavMesh.h"
#include "DetourNode.h"
#include "DetourCommon.h"
#include "DetourMath.h"
#include "DetourAlloc.h"
#include "DetourAssert.h"
#include <new>
inline bool overlapSlabs(const float* amin, const float* amax,
const float* bmin, const float* bmax,
const float px, const float py)
{
// Check for horizontal overlap.
// The segment is shrunken a little so that slabs which touch
// at end points are not connected.
const float minx = dtMax(amin[0]+px,bmin[0]+px);
const float maxx = dtMin(amax[0]-px,bmax[0]-px);
if (minx > maxx)
return false;
// Check vertical overlap.
const float ad = (amax[1]-amin[1]) / (amax[0]-amin[0]);
const float ak = amin[1] - ad*amin[0];
const float bd = (bmax[1]-bmin[1]) / (bmax[0]-bmin[0]);
const float bk = bmin[1] - bd*bmin[0];
const float aminy = ad*minx + ak;
const float amaxy = ad*maxx + ak;
const float bminy = bd*minx + bk;
const float bmaxy = bd*maxx + bk;
const float dmin = bminy - aminy;
const float dmax = bmaxy - amaxy;
// Crossing segments always overlap.
if (dmin*dmax < 0)
return true;
// Check for overlap at endpoints.
const float thr = dtSqr(py*2);
if (dmin*dmin <= thr || dmax*dmax <= thr)
return true;
return false;
}
static float getSlabCoord(const float* va, const int side)
{
if (side == 0 || side == 4)
return va[0];
else if (side == 2 || side == 6)
return va[2];
return 0;
}
static void calcSlabEndPoints(const float* va, const float* vb, float* bmin, float* bmax, const int side)
{
if (side == 0 || side == 4)
{
if (va[2] < vb[2])
{
bmin[0] = va[2];
bmin[1] = va[1];
bmax[0] = vb[2];
bmax[1] = vb[1];
}
else
{
bmin[0] = vb[2];
bmin[1] = vb[1];
bmax[0] = va[2];
bmax[1] = va[1];
}
}
else if (side == 2 || side == 6)
{
if (va[0] < vb[0])
{
bmin[0] = va[0];
bmin[1] = va[1];
bmax[0] = vb[0];
bmax[1] = vb[1];
}
else
{
bmin[0] = vb[0];
bmin[1] = vb[1];
bmax[0] = va[0];
bmax[1] = va[1];
}
}
}
inline int computeTileHash(int x, int y, const int mask)
{
const unsigned int h1 = 0x8da6b343; // Large multiplicative constants;
const unsigned int h2 = 0xd8163841; // here arbitrarily chosen primes
unsigned int n = h1 * x + h2 * y;
return (int)(n & mask);
}
inline unsigned int allocLink(dtMeshTile* tile)
{
if (tile->linksFreeList == DT_NULL_LINK)
return DT_NULL_LINK;
unsigned int link = tile->linksFreeList;
tile->linksFreeList = tile->links[link].next;
return link;
}
inline void freeLink(dtMeshTile* tile, unsigned int link)
{
tile->links[link].next = tile->linksFreeList;
tile->linksFreeList = link;
}
dtNavMesh* dtAllocNavMesh()
{
void* mem = dtAlloc(sizeof(dtNavMesh), DT_ALLOC_PERM);
if (!mem) return 0;
return new(mem) dtNavMesh;
}
/// @par
///
/// This function will only free the memory for tiles with the #DT_TILE_FREE_DATA
/// flag set.
void dtFreeNavMesh(dtNavMesh* navmesh)
{
if (!navmesh) return;
navmesh->~dtNavMesh();
dtFree(navmesh);
}
//////////////////////////////////////////////////////////////////////////////////////////
/**
@class dtNavMesh
The navigation mesh consists of one or more tiles defining three primary types of structural data:
A polygon mesh which defines most of the navigation graph. (See rcPolyMesh for its structure.)
A detail mesh used for determining surface height on the polygon mesh. (See rcPolyMeshDetail for its structure.)
Off-mesh connections, which define custom point-to-point edges within the navigation graph.
The general build process is as follows:
-# Create rcPolyMesh and rcPolyMeshDetail data using the Recast build pipeline.
-# Optionally, create off-mesh connection data.
-# Combine the source data into a dtNavMeshCreateParams structure.
-# Create a tile data array using dtCreateNavMeshData().
-# Allocate at dtNavMesh object and initialize it. (For single tile navigation meshes,
the tile data is loaded during this step.)
-# For multi-tile navigation meshes, load the tile data using dtNavMesh::addTile().
Notes:
- This class is usually used in conjunction with the dtNavMeshQuery class for pathfinding.
- Technically, all navigation meshes are tiled. A 'solo' mesh is simply a navigation mesh initialized
to have only a single tile.
- This class does not implement any asynchronous methods. So the ::dtStatus result of all methods will
always contain either a success or failure flag.
@see dtNavMeshQuery, dtCreateNavMeshData, dtNavMeshCreateParams, #dtAllocNavMesh, #dtFreeNavMesh
*/
dtNavMesh::dtNavMesh() :
m_tileWidth(0),
m_tileHeight(0),
m_maxTiles(0),
m_tileLutSize(0),
m_tileLutMask(0),
m_posLookup(0),
m_nextFree(0),
m_tiles(0)
{
#ifndef DT_POLYREF64
m_saltBits = 0;
m_tileBits = 0;
m_polyBits = 0;
#endif
memset(&m_params, 0, sizeof(dtNavMeshParams));
m_orig[0] = 0;
m_orig[1] = 0;
m_orig[2] = 0;
}
dtNavMesh::~dtNavMesh()
{
for (int i = 0; i < m_maxTiles; ++i)
{
if (m_tiles[i].flags & DT_TILE_FREE_DATA)
{
dtFree(m_tiles[i].data);
m_tiles[i].data = 0;
m_tiles[i].dataSize = 0;
}
}
dtFree(m_posLookup);
dtFree(m_tiles);
}
dtStatus dtNavMesh::init(const dtNavMeshParams* params)
{
memcpy(&m_params, params, sizeof(dtNavMeshParams));
dtVcopy(m_orig, params->orig);
m_tileWidth = params->tileWidth;
m_tileHeight = params->tileHeight;
// Init tiles
m_maxTiles = params->maxTiles;
m_tileLutSize = dtNextPow2(params->maxTiles/4);
if (!m_tileLutSize) m_tileLutSize = 1;
m_tileLutMask = m_tileLutSize-1;
m_tiles = (dtMeshTile*)dtAlloc(sizeof(dtMeshTile)*m_maxTiles, DT_ALLOC_PERM);
if (!m_tiles)
return DT_FAILURE | DT_OUT_OF_MEMORY;
m_posLookup = (dtMeshTile**)dtAlloc(sizeof(dtMeshTile*)*m_tileLutSize, DT_ALLOC_PERM);
if (!m_posLookup)
return DT_FAILURE | DT_OUT_OF_MEMORY;
memset(m_tiles, 0, sizeof(dtMeshTile)*m_maxTiles);
memset(m_posLookup, 0, sizeof(dtMeshTile*)*m_tileLutSize);
m_nextFree = 0;
for (int i = m_maxTiles-1; i >= 0; --i)
{
m_tiles[i].salt = 1;
m_tiles[i].next = m_nextFree;
m_nextFree = &m_tiles[i];
}
// Init ID generator values.
#ifndef DT_POLYREF64
m_tileBits = dtIlog2(dtNextPow2((unsigned int)params->maxTiles));
m_polyBits = dtIlog2(dtNextPow2((unsigned int)params->maxPolys));
// Only allow 31 salt bits, since the salt mask is calculated using 32bit uint and it will overflow.
m_saltBits = dtMin((unsigned int)31, 32 - m_tileBits - m_polyBits);
if (m_saltBits < 10)
return DT_FAILURE | DT_INVALID_PARAM;
#endif
return DT_SUCCESS;
}
dtStatus dtNavMesh::init(unsigned char* data, const int dataSize, const int flags)
{
// Make sure the data is in right format.
dtMeshHeader* header = (dtMeshHeader*)data;
if (header->magic != DT_NAVMESH_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (header->version != DT_NAVMESH_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
dtNavMeshParams params;
dtVcopy(params.orig, header->bmin);
params.tileWidth = header->bmax[0] - header->bmin[0];
params.tileHeight = header->bmax[2] - header->bmin[2];
params.maxTiles = 1;
params.maxPolys = header->polyCount;
dtStatus status = init(¶ms);
if (dtStatusFailed(status))
return status;
return addTile(data, dataSize, flags, 0, 0);
}
/// @par
///
/// @note The parameters are created automatically when the single tile
/// initialization is performed.
const dtNavMeshParams* dtNavMesh::getParams() const
{
return &m_params;
}
//////////////////////////////////////////////////////////////////////////////////////////
int dtNavMesh::findConnectingPolys(const float* va, const float* vb,
const dtMeshTile* tile, int side,
dtPolyRef* con, float* conarea, int maxcon) const
{
if (!tile) return 0;
float amin[2], amax[2];
calcSlabEndPoints(va, vb, amin, amax, side);
const float apos = getSlabCoord(va, side);
// Remove links pointing to 'side' and compact the links array.
float bmin[2], bmax[2];
unsigned short m = DT_EXT_LINK | (unsigned short)side;
int n = 0;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
const int nv = poly->vertCount;
for (int j = 0; j < nv; ++j)
{
// Skip edges which do not point to the right side.
if (poly->neis[j] != m) continue;
const float* vc = &tile->verts[poly->verts[j]*3];
const float* vd = &tile->verts[poly->verts[(j+1) % nv]*3];
const float bpos = getSlabCoord(vc, side);
// Segments are not close enough.
if (dtAbs(apos-bpos) > 0.01f)
continue;
// Check if the segments touch.
calcSlabEndPoints(vc,vd, bmin,bmax, side);
if (!overlapSlabs(amin,amax, bmin,bmax, 0.01f, tile->header->walkableClimb)) continue;
// Add return value.
if (n < maxcon)
{
conarea[n*2+0] = dtMax(amin[0], bmin[0]);
conarea[n*2+1] = dtMin(amax[0], bmax[0]);
con[n] = base | (dtPolyRef)i;
n++;
}
break;
}
}
return n;
}
void dtNavMesh::unconnectLinks(dtMeshTile* tile, dtMeshTile* target)
{
if (!tile || !target) return;
const unsigned int targetNum = decodePolyIdTile(getTileRef(target));
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
unsigned int j = poly->firstLink;
unsigned int pj = DT_NULL_LINK;
while (j != DT_NULL_LINK)
{
if (decodePolyIdTile(tile->links[j].ref) == targetNum)
{
// Remove link.
unsigned int nj = tile->links[j].next;
if (pj == DT_NULL_LINK)
poly->firstLink = nj;
else
tile->links[pj].next = nj;
freeLink(tile, j);
j = nj;
}
else
{
// Advance
pj = j;
j = tile->links[j].next;
}
}
}
}
void dtNavMesh::connectExtLinks(dtMeshTile* tile, dtMeshTile* target, int side)
{
if (!tile) return;
// Connect border links.
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
// Create new links.
// unsigned short m = DT_EXT_LINK | (unsigned short)side;
const int nv = poly->vertCount;
for (int j = 0; j < nv; ++j)
{
// Skip non-portal edges.
if ((poly->neis[j] & DT_EXT_LINK) == 0)
continue;
const int dir = (int)(poly->neis[j] & 0xff);
if (side != -1 && dir != side)
continue;
// Create new links
const float* va = &tile->verts[poly->verts[j]*3];
const float* vb = &tile->verts[poly->verts[(j+1) % nv]*3];
dtPolyRef nei[4];
float neia[4*2];
int nnei = findConnectingPolys(va,vb, target, dtOppositeTile(dir), nei,neia,4);
for (int k = 0; k < nnei; ++k)
{
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = nei[k];
link->edge = (unsigned char)j;
link->side = (unsigned char)dir;
link->next = poly->firstLink;
poly->firstLink = idx;
// Compress portal limits to a byte value.
if (dir == 0 || dir == 4)
{
float tmin = (neia[k*2+0]-va[2]) / (vb[2]-va[2]);
float tmax = (neia[k*2+1]-va[2]) / (vb[2]-va[2]);
if (tmin > tmax)
dtSwap(tmin,tmax);
link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f);
link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f);
}
else if (dir == 2 || dir == 6)
{
float tmin = (neia[k*2+0]-va[0]) / (vb[0]-va[0]);
float tmax = (neia[k*2+1]-va[0]) / (vb[0]-va[0]);
if (tmin > tmax)
dtSwap(tmin,tmax);
link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f);
link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f);
}
}
}
}
}
}
void dtNavMesh::connectExtOffMeshLinks(dtMeshTile* tile, dtMeshTile* target, int side)
{
if (!tile) return;
// Connect off-mesh links.
// We are interested on links which land from target tile to this tile.
const unsigned char oppositeSide = (side == -1) ? 0xff : (unsigned char)dtOppositeTile(side);
for (int i = 0; i < target->header->offMeshConCount; ++i)
{
dtOffMeshConnection* targetCon = &target->offMeshCons[i];
if (targetCon->side != oppositeSide)
continue;
dtPoly* targetPoly = &target->polys[targetCon->poly];
// Skip off-mesh connections which start location could not be connected at all.
if (targetPoly->firstLink == DT_NULL_LINK)
continue;
const float ext[3] = { targetCon->rad, target->header->walkableClimb, targetCon->rad };
// Find polygon to connect to.
const float* p = &targetCon->pos[3];
float nearestPt[3];
dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt);
if (!ref)
continue;
// findNearestPoly may return too optimistic results, further check to make sure.
if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(targetCon->rad))
continue;
// Make sure the location is on current mesh.
float* v = &target->verts[targetPoly->verts[1]*3];
dtVcopy(v, nearestPt);
// Link off-mesh connection to target poly.
unsigned int idx = allocLink(target);
if (idx != DT_NULL_LINK)
{
dtLink* link = &target->links[idx];
link->ref = ref;
link->edge = (unsigned char)1;
link->side = oppositeSide;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = targetPoly->firstLink;
targetPoly->firstLink = idx;
}
// Link target poly to off-mesh connection.
if (targetCon->flags & DT_OFFMESH_CON_BIDIR)
{
unsigned int tidx = allocLink(tile);
if (tidx != DT_NULL_LINK)
{
const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref);
dtPoly* landPoly = &tile->polys[landPolyIdx];
dtLink* link = &tile->links[tidx];
link->ref = getPolyRefBase(target) | (dtPolyRef)(targetCon->poly);
link->edge = 0xff;
link->side = (unsigned char)(side == -1 ? 0xff : side);
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = landPoly->firstLink;
landPoly->firstLink = tidx;
}
}
}
}
void dtNavMesh::connectIntLinks(dtMeshTile* tile)
{
if (!tile) return;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
poly->firstLink = DT_NULL_LINK;
if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
continue;
// Build edge links backwards so that the links will be
// in the linked list from lowest index to highest.
for (int j = poly->vertCount-1; j >= 0; --j)
{
// Skip hard and non-internal edges.
if (poly->neis[j] == 0 || (poly->neis[j] & DT_EXT_LINK)) continue;
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = base | (dtPolyRef)(poly->neis[j]-1);
link->edge = (unsigned char)j;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = poly->firstLink;
poly->firstLink = idx;
}
}
}
}
void dtNavMesh::baseOffMeshLinks(dtMeshTile* tile)
{
if (!tile) return;
dtPolyRef base = getPolyRefBase(tile);
// Base off-mesh connection start points.
for (int i = 0; i < tile->header->offMeshConCount; ++i)
{
dtOffMeshConnection* con = &tile->offMeshCons[i];
dtPoly* poly = &tile->polys[con->poly];
const float ext[3] = { con->rad, tile->header->walkableClimb, con->rad };
// Find polygon to connect to.
const float* p = &con->pos[0]; // First vertex
float nearestPt[3];
dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt);
if (!ref) continue;
// findNearestPoly may return too optimistic results, further check to make sure.
if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(con->rad))
continue;
// Make sure the location is on current mesh.
float* v = &tile->verts[poly->verts[0]*3];
dtVcopy(v, nearestPt);
// Link off-mesh connection to target poly.
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = ref;
link->edge = (unsigned char)0;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = poly->firstLink;
poly->firstLink = idx;
}
// Start end-point is always connect back to off-mesh connection.
unsigned int tidx = allocLink(tile);
if (tidx != DT_NULL_LINK)
{
const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref);
dtPoly* landPoly = &tile->polys[landPolyIdx];
dtLink* link = &tile->links[tidx];
link->ref = base | (dtPolyRef)(con->poly);
link->edge = 0xff;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = landPoly->firstLink;
landPoly->firstLink = tidx;
}
}
}
void dtNavMesh::closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest, bool* posOverPoly) const
{
const dtMeshTile* tile = 0;
const dtPoly* poly = 0;
getTileAndPolyByRefUnsafe(ref, &tile, &poly);
// Off-mesh connections don't have detail polygons.
if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
{
const float* v0 = &tile->verts[poly->verts[0]*3];
const float* v1 = &tile->verts[poly->verts[1]*3];
const float d0 = dtVdist(pos, v0);
const float d1 = dtVdist(pos, v1);
const float u = d0 / (d0+d1);
dtVlerp(closest, v0, v1, u);
if (posOverPoly)
*posOverPoly = false;
return;
}
const unsigned int ip = (unsigned int)(poly - tile->polys);
const dtPolyDetail* pd = &tile->detailMeshes[ip];
// Clamp point to be inside the polygon.
float verts[DT_VERTS_PER_POLYGON*3];
float edged[DT_VERTS_PER_POLYGON];
float edget[DT_VERTS_PER_POLYGON];
const int nv = poly->vertCount;
for (int i = 0; i < nv; ++i)
dtVcopy(&verts[i*3], &tile->verts[poly->verts[i]*3]);
dtVcopy(closest, pos);
if (!dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget))
{
// Point is outside the polygon, dtClamp to nearest edge.
float dmin = edged[0];
int imin = 0;
for (int i = 1; i < nv; ++i)
{
if (edged[i] < dmin)
{
dmin = edged[i];
imin = i;
}
}
const float* va = &verts[imin*3];
const float* vb = &verts[((imin+1)%nv)*3];
dtVlerp(closest, va, vb, edget[imin]);
if (posOverPoly)
*posOverPoly = false;
}
else
{
if (posOverPoly)
*posOverPoly = true;
}
// Find height at the location.
for (int j = 0; j < pd->triCount; ++j)
{
const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4];
const float* v[3];
for (int k = 0; k < 3; ++k)
{
if (t[k] < poly->vertCount)
v[k] = &tile->verts[poly->verts[t[k]]*3];
else
v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3];
}
float h;
if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h))
{
closest[1] = h;
break;
}
}
}
dtPolyRef dtNavMesh::findNearestPolyInTile(const dtMeshTile* tile,
const float* center, const float* extents,
float* nearestPt) const
{
float bmin[3], bmax[3];
dtVsub(bmin, center, extents);
dtVadd(bmax, center, extents);
// Get nearby polygons from proximity grid.
dtPolyRef polys[128];
int polyCount = queryPolygonsInTile(tile, bmin, bmax, polys, 128);
// Find nearest polygon amongst the nearby polygons.
dtPolyRef nearest = 0;
float nearestDistanceSqr = FLT_MAX;
for (int i = 0; i < polyCount; ++i)
{
dtPolyRef ref = polys[i];
float closestPtPoly[3];
float diff[3];
bool posOverPoly = false;
float d;
closestPointOnPoly(ref, center, closestPtPoly, &posOverPoly);
// If a point is directly over a polygon and closer than
// climb height, favor that instead of straight line nearest point.
dtVsub(diff, center, closestPtPoly);
if (posOverPoly)
{
d = dtAbs(diff[1]) - tile->header->walkableClimb;
d = d > 0 ? d*d : 0;
}
else
{
d = dtVlenSqr(diff);
}
if (d < nearestDistanceSqr)
{
dtVcopy(nearestPt, closestPtPoly);
nearestDistanceSqr = d;
nearest = ref;
}
}
return nearest;
}
int dtNavMesh::queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax,
dtPolyRef* polys, const int maxPolys) const
{
if (tile->bvTree)
{
const dtBVNode* node = &tile->bvTree[0];
const dtBVNode* end = &tile->bvTree[tile->header->bvNodeCount];
const float* tbmin = tile->header->bmin;
const float* tbmax = tile->header->bmax;
const float qfac = tile->header->bvQuantFactor;
// Calculate quantized box
unsigned short bmin[3], bmax[3];
// dtClamp query box to world box.
float minx = dtClamp(qmin[0], tbmin[0], tbmax[0]) - tbmin[0];
float miny = dtClamp(qmin[1], tbmin[1], tbmax[1]) - tbmin[1];
float minz = dtClamp(qmin[2], tbmin[2], tbmax[2]) - tbmin[2];
float maxx = dtClamp(qmax[0], tbmin[0], tbmax[0]) - tbmin[0];
float maxy = dtClamp(qmax[1], tbmin[1], tbmax[1]) - tbmin[1];
float maxz = dtClamp(qmax[2], tbmin[2], tbmax[2]) - tbmin[2];
// Quantize
bmin[0] = (unsigned short)(qfac * minx) & 0xfffe;
bmin[1] = (unsigned short)(qfac * miny) & 0xfffe;
bmin[2] = (unsigned short)(qfac * minz) & 0xfffe;
bmax[0] = (unsigned short)(qfac * maxx + 1) | 1;
bmax[1] = (unsigned short)(qfac * maxy + 1) | 1;
bmax[2] = (unsigned short)(qfac * maxz + 1) | 1;
// Traverse tree
dtPolyRef base = getPolyRefBase(tile);
int n = 0;
while (node < end)
{
const bool overlap = dtOverlapQuantBounds(bmin, bmax, node->bmin, node->bmax);
const bool isLeafNode = node->i >= 0;
if (isLeafNode && overlap)
{
if (n < maxPolys)
polys[n++] = base | (dtPolyRef)node->i;
}
if (overlap || isLeafNode)
node++;
else
{
const int escapeIndex = -node->i;
node += escapeIndex;
}
}
return n;
}
else
{
float bmin[3], bmax[3];
int n = 0;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* p = &tile->polys[i];
// Do not return off-mesh connection polygons.
if (p->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
continue;
// Calc polygon bounds.
const float* v = &tile->verts[p->verts[0]*3];
dtVcopy(bmin, v);
dtVcopy(bmax, v);
for (int j = 1; j < p->vertCount; ++j)
{
v = &tile->verts[p->verts[j]*3];
dtVmin(bmin, v);
dtVmax(bmax, v);
}
if (dtOverlapBounds(qmin,qmax, bmin,bmax))
{
if (n < maxPolys)
polys[n++] = base | (dtPolyRef)i;
}
}
return n;
}
}
/// @par
///
/// The add operation will fail if the data is in the wrong format, the allocated tile
/// space is full, or there is a tile already at the specified reference.
///
/// The lastRef parameter is used to restore a tile with the same tile
/// reference it had previously used. In this case the #dtPolyRef's for the
/// tile will be restored to the same values they were before the tile was
/// removed.
///
/// The nav mesh assumes exclusive access to the data passed and will make
/// changes to the dynamic portion of the data. For that reason the data
/// should not be reused in other nav meshes until the tile has been successfully
/// removed from this nav mesh.
///
/// @see dtCreateNavMeshData, #removeTile
dtStatus dtNavMesh::addTile(unsigned char* data, int dataSize, int flags,
dtTileRef lastRef, dtTileRef* result)
{
// Make sure the data is in right format.
dtMeshHeader* header = (dtMeshHeader*)data;
if (header->magic != DT_NAVMESH_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (header->version != DT_NAVMESH_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
// Make sure the location is free.
if (getTileAt(header->x, header->y, header->layer))
return DT_FAILURE;
// Allocate a tile.
dtMeshTile* tile = 0;
if (!lastRef)
{
if (m_nextFree)
{
tile = m_nextFree;
m_nextFree = tile->next;
tile->next = 0;
}
}
else
{
// Try to relocate the tile to specific index with same salt.
int tileIndex = (int)decodePolyIdTile((dtPolyRef)lastRef);
if (tileIndex >= m_maxTiles)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Try to find the specific tile id from the free list.
dtMeshTile* target = &m_tiles[tileIndex];
dtMeshTile* prev = 0;
tile = m_nextFree;
while (tile && tile != target)
{
prev = tile;
tile = tile->next;
}
// Could not find the correct location.
if (tile != target)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Remove from freelist
if (!prev)
m_nextFree = tile->next;
else
prev->next = tile->next;
// Restore salt.
tile->salt = decodePolyIdSalt((dtPolyRef)lastRef);
}
// Make sure we could allocate a tile.
if (!tile)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Insert tile into the position lut.
int h = computeTileHash(header->x, header->y, m_tileLutMask);
tile->next = m_posLookup[h];
m_posLookup[h] = tile;
// Patch header pointers.
const int headerSize = dtAlign4(sizeof(dtMeshHeader));
const int vertsSize = dtAlign4(sizeof(float)*3*header->vertCount);
const int polysSize = dtAlign4(sizeof(dtPoly)*header->polyCount);
const int linksSize = dtAlign4(sizeof(dtLink)*(header->maxLinkCount));
const int detailMeshesSize = dtAlign4(sizeof(dtPolyDetail)*header->detailMeshCount);
const int detailVertsSize = dtAlign4(sizeof(float)*3*header->detailVertCount);
const int detailTrisSize = dtAlign4(sizeof(unsigned char)*4*header->detailTriCount);
const int bvtreeSize = dtAlign4(sizeof(dtBVNode)*header->bvNodeCount);
const int offMeshLinksSize = dtAlign4(sizeof(dtOffMeshConnection)*header->offMeshConCount);
unsigned char* d = data + headerSize;
tile->verts = dtGetThenAdvanceBufferPointer<float>(d, vertsSize);
tile->polys = dtGetThenAdvanceBufferPointer<dtPoly>(d, polysSize);
tile->links = dtGetThenAdvanceBufferPointer<dtLink>(d, linksSize);
tile->detailMeshes = dtGetThenAdvanceBufferPointer<dtPolyDetail>(d, detailMeshesSize);
tile->detailVerts = dtGetThenAdvanceBufferPointer<float>(d, detailVertsSize);
tile->detailTris = dtGetThenAdvanceBufferPointer<unsigned char>(d, detailTrisSize);
tile->bvTree = dtGetThenAdvanceBufferPointer<dtBVNode>(d, bvtreeSize);
tile->offMeshCons = dtGetThenAdvanceBufferPointer<dtOffMeshConnection>(d, offMeshLinksSize);
// If there are no items in the bvtree, reset the tree pointer.
if (!bvtreeSize)
tile->bvTree = 0;
// Build links freelist
tile->linksFreeList = 0;
tile->links[header->maxLinkCount-1].next = DT_NULL_LINK;
for (int i = 0; i < header->maxLinkCount-1; ++i)
tile->links[i].next = i+1;
// Init tile.
tile->header = header;
tile->data = data;
tile->dataSize = dataSize;
tile->flags = flags;
connectIntLinks(tile);
// Base off-mesh connections to their starting polygons and connect connections inside the tile.
baseOffMeshLinks(tile);
connectExtOffMeshLinks(tile, tile, -1);
// Create connections with neighbour tiles.
static const int MAX_NEIS = 32;
dtMeshTile* neis[MAX_NEIS];
int nneis;
// Connect with layers in current tile.
nneis = getTilesAt(header->x, header->y, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
if (neis[j] == tile)
continue;
connectExtLinks(tile, neis[j], -1);
connectExtLinks(neis[j], tile, -1);
connectExtOffMeshLinks(tile, neis[j], -1);
connectExtOffMeshLinks(neis[j], tile, -1);
}
// Connect with neighbour tiles.
for (int i = 0; i < 8; ++i)
{
nneis = getNeighbourTilesAt(header->x, header->y, i, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
connectExtLinks(tile, neis[j], i);
connectExtLinks(neis[j], tile, dtOppositeTile(i));
connectExtOffMeshLinks(tile, neis[j], i);
connectExtOffMeshLinks(neis[j], tile, dtOppositeTile(i));
}
}
if (result)
*result = getTileRef(tile);
return DT_SUCCESS;
}
const dtMeshTile* dtNavMesh::getTileAt(const int x, const int y, const int layer) const
{
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y &&
tile->header->layer == layer)
{
return tile;
}
tile = tile->next;
}
return 0;
}
int dtNavMesh::getNeighbourTilesAt(const int x, const int y, const int side, dtMeshTile** tiles, const int maxTiles) const
{
int nx = x, ny = y;
switch (side)
{
case 0: nx++; break;
case 1: nx++; ny++; break;
case 2: ny++; break;
case 3: nx--; ny++; break;
case 4: nx--; break;
case 5: nx--; ny--; break;
case 6: ny--; break;
case 7: nx++; ny--; break;
};
return getTilesAt(nx, ny, tiles, maxTiles);
}
int dtNavMesh::getTilesAt(const int x, const int y, dtMeshTile** tiles, const int maxTiles) const
{
int n = 0;
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y)
{
if (n < maxTiles)
tiles[n++] = tile;
}
tile = tile->next;
}
return n;
}
/// @par
///
/// This function will not fail if the tiles array is too small to hold the
/// entire result set. It will simply fill the array to capacity.
int dtNavMesh::getTilesAt(const int x, const int y, dtMeshTile const** tiles, const int maxTiles) const
{
int n = 0;
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y)
{
if (n < maxTiles)
tiles[n++] = tile;
}
tile = tile->next;
}
return n;
}
dtTileRef dtNavMesh::getTileRefAt(const int x, const int y, const int layer) const
{
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y &&
tile->header->layer == layer)
{
return getTileRef(tile);
}
tile = tile->next;
}
return 0;
}
const dtMeshTile* dtNavMesh::getTileByRef(dtTileRef ref) const
{
if (!ref)
return 0;
unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref);
unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref);
if ((int)tileIndex >= m_maxTiles)
return 0;
const dtMeshTile* tile = &m_tiles[tileIndex];
if (tile->salt != tileSalt)
return 0;
return tile;
}
int dtNavMesh::getMaxTiles() const
{
return m_maxTiles;
}
dtMeshTile* dtNavMesh::getTile(int i)
{
return &m_tiles[i];
}
const dtMeshTile* dtNavMesh::getTile(int i) const
{
return &m_tiles[i];
}
void dtNavMesh::calcTileLoc(const float* pos, int* tx, int* ty) const
{
*tx = (int)floorf((pos[0]-m_orig[0]) / m_tileWidth);
*ty = (int)floorf((pos[2]-m_orig[2]) / m_tileHeight);
}
dtStatus dtNavMesh::getTileAndPolyByRef(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
if (ip >= (unsigned int)m_tiles[it].header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
*tile = &m_tiles[it];
*poly = &m_tiles[it].polys[ip];
return DT_SUCCESS;
}
/// @par
///
/// @warning Only use this function if it is known that the provided polygon
/// reference is valid. This function is faster than #getTileAndPolyByRef, but
/// it does not validate the reference.
void dtNavMesh::getTileAndPolyByRefUnsafe(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const
{
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
*tile = &m_tiles[it];
*poly = &m_tiles[it].polys[ip];
}
bool dtNavMesh::isValidPolyRef(dtPolyRef ref) const
{
if (!ref) return false;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return false;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return false;
if (ip >= (unsigned int)m_tiles[it].header->polyCount) return false;
return true;
}
/// @par
///
/// This function returns the data for the tile so that, if desired,
/// it can be added back to the navigation mesh at a later point.
///
/// @see #addTile
dtStatus dtNavMesh::removeTile(dtTileRef ref, unsigned char** data, int* dataSize)
{
if (!ref)
return DT_FAILURE | DT_INVALID_PARAM;
unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref);
unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref);
if ((int)tileIndex >= m_maxTiles)
return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[tileIndex];
if (tile->salt != tileSalt)
return DT_FAILURE | DT_INVALID_PARAM;
// Remove tile from hash lookup.
int h = computeTileHash(tile->header->x,tile->header->y,m_tileLutMask);
dtMeshTile* prev = 0;
dtMeshTile* cur = m_posLookup[h];
while (cur)
{
if (cur == tile)
{
if (prev)
prev->next = cur->next;
else
m_posLookup[h] = cur->next;
break;
}
prev = cur;
cur = cur->next;
}
// Remove connections to neighbour tiles.
static const int MAX_NEIS = 32;
dtMeshTile* neis[MAX_NEIS];
int nneis;
// Disconnect from other layers in current tile.
nneis = getTilesAt(tile->header->x, tile->header->y, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
if (neis[j] == tile) continue;
unconnectLinks(neis[j], tile);
}
// Disconnect from neighbour tiles.
for (int i = 0; i < 8; ++i)
{
nneis = getNeighbourTilesAt(tile->header->x, tile->header->y, i, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
unconnectLinks(neis[j], tile);
}
// Reset tile.
if (tile->flags & DT_TILE_FREE_DATA)
{
// Owns data
dtFree(tile->data);
tile->data = 0;
tile->dataSize = 0;
if (data) *data = 0;
if (dataSize) *dataSize = 0;
}
else
{
if (data) *data = tile->data;
if (dataSize) *dataSize = tile->dataSize;
}
tile->header = 0;
tile->flags = 0;
tile->linksFreeList = 0;
tile->polys = 0;
tile->verts = 0;
tile->links = 0;
tile->detailMeshes = 0;
tile->detailVerts = 0;
tile->detailTris = 0;
tile->bvTree = 0;
tile->offMeshCons = 0;
// Update salt, salt should never be zero.
#ifdef DT_POLYREF64
tile->salt = (tile->salt+1) & ((1<<DT_SALT_BITS)-1);
#else
tile->salt = (tile->salt+1) & ((1<<m_saltBits)-1);
#endif
if (tile->salt == 0)
tile->salt++;
// Add to free list.
tile->next = m_nextFree;
m_nextFree = tile;
return DT_SUCCESS;
}
dtTileRef dtNavMesh::getTileRef(const dtMeshTile* tile) const
{
if (!tile) return 0;
const unsigned int it = (unsigned int)(tile - m_tiles);
return (dtTileRef)encodePolyId(tile->salt, it, 0);
}
/// @par
///
/// Example use case:
/// @code
///
/// const dtPolyRef base = navmesh->getPolyRefBase(tile);
/// for (int i = 0; i < tile->header->polyCount; ++i)
/// {
/// const dtPoly* p = &tile->polys[i];
/// const dtPolyRef ref = base | (dtPolyRef)i;
///
/// // Use the reference to access the polygon data.
/// }
/// @endcode
dtPolyRef dtNavMesh::getPolyRefBase(const dtMeshTile* tile) const
{
if (!tile) return 0;
const unsigned int it = (unsigned int)(tile - m_tiles);
return encodePolyId(tile->salt, it, 0);
}
struct dtTileState
{
int magic; // Magic number, used to identify the data.
int version; // Data version number.
dtTileRef ref; // Tile ref at the time of storing the data.
};
struct dtPolyState
{
unsigned short flags; // Flags (see dtPolyFlags).
unsigned char area; // Area ID of the polygon.
};
/// @see #storeTileState
int dtNavMesh::getTileStateSize(const dtMeshTile* tile) const
{
if (!tile) return 0;
const int headerSize = dtAlign4(sizeof(dtTileState));
const int polyStateSize = dtAlign4(sizeof(dtPolyState) * tile->header->polyCount);
return headerSize + polyStateSize;
}
/// @par
///
/// Tile state includes non-structural data such as polygon flags, area ids, etc.
/// @note The state data is only valid until the tile reference changes.
/// @see #getTileStateSize, #restoreTileState
dtStatus dtNavMesh::storeTileState(const dtMeshTile* tile, unsigned char* data, const int maxDataSize) const
{
// Make sure there is enough space to store the state.
const int sizeReq = getTileStateSize(tile);
if (maxDataSize < sizeReq)
return DT_FAILURE | DT_BUFFER_TOO_SMALL;
dtTileState* tileState = dtGetThenAdvanceBufferPointer<dtTileState>(data, dtAlign4(sizeof(dtTileState)));
dtPolyState* polyStates = dtGetThenAdvanceBufferPointer<dtPolyState>(data, dtAlign4(sizeof(dtPolyState) * tile->header->polyCount));
// Store tile state.
tileState->magic = DT_NAVMESH_STATE_MAGIC;
tileState->version = DT_NAVMESH_STATE_VERSION;
tileState->ref = getTileRef(tile);
// Store per poly state.
for (int i = 0; i < tile->header->polyCount; ++i)
{
const dtPoly* p = &tile->polys[i];
dtPolyState* s = &polyStates[i];
s->flags = p->flags;
s->area = p->getArea();
}
return DT_SUCCESS;
}
/// @par
///
/// Tile state includes non-structural data such as polygon flags, area ids, etc.
/// @note This function does not impact the tile's #dtTileRef and #dtPolyRef's.
/// @see #storeTileState
dtStatus dtNavMesh::restoreTileState(dtMeshTile* tile, const unsigned char* data, const int maxDataSize)
{
// Make sure there is enough space to store the state.
const int sizeReq = getTileStateSize(tile);
if (maxDataSize < sizeReq)
return DT_FAILURE | DT_INVALID_PARAM;
const dtTileState* tileState = dtGetThenAdvanceBufferPointer<const dtTileState>(data, dtAlign4(sizeof(dtTileState)));
const dtPolyState* polyStates = dtGetThenAdvanceBufferPointer<const dtPolyState>(data, dtAlign4(sizeof(dtPolyState) * tile->header->polyCount));
// Check that the restore is possible.
if (tileState->magic != DT_NAVMESH_STATE_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (tileState->version != DT_NAVMESH_STATE_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
if (tileState->ref != getTileRef(tile))
return DT_FAILURE | DT_INVALID_PARAM;
// Restore per poly state.
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* p = &tile->polys[i];
const dtPolyState* s = &polyStates[i];
p->flags = s->flags;
p->setArea(s->area);
}
return DT_SUCCESS;
}
/// @par
///
/// Off-mesh connections are stored in the navigation mesh as special 2-vertex
/// polygons with a single edge. At least one of the vertices is expected to be
/// inside a normal polygon. So an off-mesh connection is "entered" from a
/// normal polygon at one of its endpoints. This is the polygon identified by
/// the prevRef parameter.
dtStatus dtNavMesh::getOffMeshConnectionPolyEndPoints(dtPolyRef prevRef, dtPolyRef polyRef, float* startPos, float* endPos) const
{
unsigned int salt, it, ip;
if (!polyRef)
return DT_FAILURE;
// Get current polygon
decodePolyId(polyRef, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
// Make sure that the current poly is indeed off-mesh link.
if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION)
return DT_FAILURE;
// Figure out which way to hand out the vertices.
int idx0 = 0, idx1 = 1;
// Find link that points to first vertex.
for (unsigned int i = poly->firstLink; i != DT_NULL_LINK; i = tile->links[i].next)
{
if (tile->links[i].edge == 0)
{
if (tile->links[i].ref != prevRef)
{
idx0 = 1;
idx1 = 0;
}
break;
}
}
dtVcopy(startPos, &tile->verts[poly->verts[idx0]*3]);
dtVcopy(endPos, &tile->verts[poly->verts[idx1]*3]);
return DT_SUCCESS;
}
const dtOffMeshConnection* dtNavMesh::getOffMeshConnectionByRef(dtPolyRef ref) const
{
unsigned int salt, it, ip;
if (!ref)
return 0;
// Get current polygon
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return 0;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return 0;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return 0;
const dtPoly* poly = &tile->polys[ip];
// Make sure that the current poly is indeed off-mesh link.
if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION)
return 0;
const unsigned int idx = ip - tile->header->offMeshBase;
dtAssert(idx < (unsigned int)tile->header->offMeshConCount);
return &tile->offMeshCons[idx];
}
dtStatus dtNavMesh::setPolyFlags(dtPolyRef ref, unsigned short flags)
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
dtPoly* poly = &tile->polys[ip];
// Change flags.
poly->flags = flags;
return DT_SUCCESS;
}
dtStatus dtNavMesh::getPolyFlags(dtPolyRef ref, unsigned short* resultFlags) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
*resultFlags = poly->flags;
return DT_SUCCESS;
}
dtStatus dtNavMesh::setPolyArea(dtPolyRef ref, unsigned char area)
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
dtPoly* poly = &tile->polys[ip];
poly->setArea(area);
return DT_SUCCESS;
}
dtStatus dtNavMesh::getPolyArea(dtPolyRef ref, unsigned char* resultArea) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
*resultArea = poly->getArea();
return DT_SUCCESS;
}
|
#include "stdafx.h"
#include "uespLogMonitor.h"
#include "OptionsDlg.h"
#include "afxdialogex.h"
#include "uespLogMonitorDlg.h"
IMPLEMENT_DYNAMIC(COptionsDlg, CDialogEx)
BEGIN_MESSAGE_MAP(COptionsDlg, CDialogEx)
ON_BN_CLICKED(IDC_BROWSE_BUTTON, &COptionsDlg::OnBnClickedBrowseButton)
ON_CBN_SELCHANGE(IDC_LOGNAME_LIST, &COptionsDlg::OnCbnSelchangeLognameList)
ON_BN_CLICKED(IDC_BROWSEBACKUPDATA_BUTTON, &COptionsDlg::OnBnClickedBrowsebackupdataButton)
ON_BN_CLICKED(IDC_BROWSEBACKUPCHARDATA_BUTTON, &COptionsDlg::OnBnClickedBrowsebackupbuilddataButton)
ON_BN_CLICKED(IDC_BUILDDATAENABLED_CHECK, &COptionsDlg::OnBnClickedBuilddataenabledCheck)
END_MESSAGE_MAP()
COptionsDlg::COptionsDlg(CWnd* pParent) :
CDialogEx(COptionsDlg::IDD, pParent),
m_pOptions(nullptr)
{
}
COptionsDlg::~COptionsDlg()
{
}
void COptionsDlg::DoDataExchange(CDataExchange* pDX)
{
CDialogEx::DoDataExchange(pDX);
DDX_Control(pDX, IDC_UPDATETIME_TEXT, m_UpdateTimeText);
DDX_Control(pDX, IDC_FORMURL_TEXT, m_FormURLText);
DDX_Control(pDX, IDC_SAVEDVARPATH_TEXT, m_SavedVarPathText);
DDX_Control(pDX, IDC_CUSTOMNAME_TEXT, m_CustomNameText);
DDX_Control(pDX, IDC_LOGNAME_LIST, m_LogNameList);
DDX_Control(pDX, IDC_LOGLEVEL_LIST, m_LogLevelList);
DDX_Control(pDX, IDC_LASTTIMESTAMP_TEXT, m_LastTimestampText);
DDX_Control(pDX, IDC_ENABLED_CHECK, m_EnabledCheck);
DDX_Control(pDX, IDC_BACKUPFILENAME_TEXT, m_BackupDataFilename);
DDX_Control(pDX, IDC_BACKUPTIMESTAMP_TEXT, m_BackupTimestampText);
DDX_Control(pDX, IDC_BUILDDATAENABLED_CHECK, m_BuildDataEnabledCheck);
DDX_Control(pDX, IDC_BUILDDATAFORMURL_TEXT, m_BuildDataFormURLText);
DDX_Control(pDX, IDC_BACKUPBUILDDATAFOLDER_TEXT, m_BackupBuildDataFolder);
DDX_Control(pDX, IDC_UESPWIKIUSERNAME_TEXT, m_UespWikiUserNameText);
DDX_Control(pDX, IDC_CHARDATAENABLED_CHECK, m_CharDataEnabledCheck);
DDX_Control(pDX, IDC_CHARDATAFORMURL_TEXT, m_CharDataFormURLText);
DDX_Control(pDX, IDC_BACKUPCHARDATAFOLDER_TEXT, m_BackupCharDataFolder);
DDX_Control(pDX, IDC_AUTODOWNLOADPRICES_CHECK, m_AutoDownloadPricesCheck);
DDX_Control(pDX, IDC_PRICESERVER_LIST, m_PriceServerList);
DDX_Control(pDX, IDC_UPLOADSCREENSHOTS_CHECK, m_UploadScreenshotsCheck);
}
int COptionsDlg::DoModal (ulm_options_t& Options)
{
m_pOptions = &Options;
return CDialogEx::DoModal();
}
BOOL COptionsDlg::OnInitDialog()
{
CDialogEx::OnInitDialog();
FillLogNameList();
FillLogLevelList();
FillPriceServerList();
SetControlData();
return TRUE;
}
void AddComboString (CComboBox& Combo, const char* pString, const int Data)
{
int ListIndex = Combo.AddString(pString);
if (ListIndex >= 0) Combo.SetItemData(ListIndex, Data);
}
int SelectComboItem (CComboBox& Combo, const int Data)
{
for (int i = 0; i < Combo.GetCount(); ++i)
{
if (Combo.GetItemData(i) == Data)
{
Combo.SetCurSel(i);
return i;
}
}
return -1;
}
int GetComboSelData (CComboBox& Combo, const int Default)
{
int ListIndex = Combo.GetCurSel();
if (ListIndex < 0) return Default;
return Combo.GetItemData(ListIndex);
}
void COptionsDlg::FillPriceServerList()
{
AddComboString(m_PriceServerList, "PC-NA", 1);
AddComboString(m_PriceServerList, "PC-EU", 2);
AddComboString(m_PriceServerList, "PTS", 3);
AddComboString(m_PriceServerList, "Other", 4);
}
void COptionsDlg::FillLogNameList()
{
AddComboString(m_LogNameList, "Anonymous", ULM_USELOGNAME_ANONYMOUS);
AddComboString(m_LogNameList, "Character", ULM_USELOGNAME_CHARACTER);
AddComboString(m_LogNameList, "Player/Account", ULM_USELOGNAME_PLAYER);
AddComboString(m_LogNameList, "Custom", ULM_USELOGNAME_CUSTOM);
}
void COptionsDlg::FillLogLevelList()
{
AddComboString(m_LogLevelList, "None", ULM_LOGLEVEL_NONE);
AddComboString(m_LogLevelList, "Error", ULM_LOGLEVEL_ERROR);
AddComboString(m_LogLevelList, "Warning", ULM_LOGLEVEL_WARNING);
AddComboString(m_LogLevelList, "Info", ULM_LOGLEVEL_INFO);
AddComboString(m_LogLevelList, "Debug", ULM_LOGLEVEL_DEBUG);
AddComboString(m_LogLevelList, "All", ULM_LOGLEVEL_ALL);
}
void COptionsDlg::OnOK()
{
GetControlData();
CDialogEx::OnOK();
}
void COptionsDlg::GetControlData()
{
CString Buffer;
if (m_pOptions == nullptr) return;
m_UpdateTimeText.GetWindowText(Buffer);
m_pOptions->UpdateTime = atoi(Buffer);
m_LastTimestampText.GetWindowText(Buffer);
m_pOptions->LastTimeStamp = _atoi64(Buffer);
m_BackupTimestampText.GetWindowText(Buffer);
m_pOptions->LastBackupTimeStamp = _atoi64(Buffer);
m_FormURLText.GetWindowText(Buffer);
m_pOptions->FormURL = Buffer;
m_SavedVarPathText.GetWindowText(Buffer);
m_pOptions->SavedVarPath = Buffer;
m_BackupDataFilename.GetWindowText(Buffer);
m_pOptions->BackupDataFilename = Buffer;
m_BackupBuildDataFolder.GetWindowText(Buffer);
m_pOptions->BackupBuildDataFolder = Buffer;
m_CustomNameText.GetWindowText(Buffer);
m_pOptions->CustomLogName = Buffer;
m_UespWikiUserNameText.GetWindowText(Buffer);
m_pOptions->UespWikiAccountName = Buffer;
m_pOptions->LogLevel = static_cast<ulm_loglevel_t>(GetComboSelData(m_LogLevelList, m_pOptions->LogLevel));
m_pOptions->UseLogName = static_cast<ulm_uselogname_t>(GetComboSelData(m_LogNameList, m_pOptions->UseLogName));
m_pOptions->Enabled = m_EnabledCheck.GetCheck() != 0;
m_pOptions->BuildDataEnabled = m_BuildDataEnabledCheck.GetCheck() != 0;
m_BuildDataFormURLText.GetWindowText(Buffer);
m_pOptions->BuildDataFormURL = Buffer;
m_pOptions->CharDataEnabled = m_CharDataEnabledCheck.GetCheck() != 0;
m_BackupCharDataFolder.GetWindowText(Buffer);
m_pOptions->BackupCharDataFolder = Buffer;
m_CharDataFormURLText.GetWindowText(Buffer);
m_pOptions->CharDataFormURL = Buffer;
m_pOptions->AutoDownloadPrices = m_AutoDownloadPricesCheck.GetCheck() != 0;
m_PriceServerList.GetWindowText(Buffer);
m_pOptions->PriceServer = Buffer;
if (Buffer == "PC-NA") m_pOptions->PriceServer = "NA";
if (Buffer == "PC-EU") m_pOptions->PriceServer = "EU";
m_pOptions->UploadScreenshots = m_UploadScreenshotsCheck.GetCheck() != 0;
}
void COptionsDlg::SetControlData()
{
CString Buffer;
if (m_pOptions == nullptr) return;
Buffer.Format("%d", m_pOptions->UpdateTime);
m_UpdateTimeText.SetWindowText(Buffer);
Buffer.Format("%lld", m_pOptions->LastTimeStamp);
m_LastTimestampText.SetWindowText(Buffer);
Buffer.Format("%lld", m_pOptions->LastBackupTimeStamp);
m_BackupTimestampText.SetWindowText(Buffer);
m_FormURLText.SetWindowText(m_pOptions->FormURL.c_str());
m_SavedVarPathText.SetWindowText(m_pOptions->SavedVarPath.c_str());
m_CustomNameText.SetWindowText(m_pOptions->CustomLogName.c_str());
m_UespWikiUserNameText.SetWindowText(m_pOptions->UespWikiAccountName.c_str());
m_BackupDataFilename.SetWindowText(m_pOptions->BackupDataFilename.c_str());
m_BackupBuildDataFolder.SetWindowText(m_pOptions->BackupBuildDataFolder.c_str());
m_BackupCharDataFolder.SetWindowText(m_pOptions->BackupCharDataFolder.c_str());
m_EnabledCheck.SetCheck(m_pOptions->Enabled);
SelectComboItem(m_LogLevelList, m_pOptions->LogLevel);
SelectComboItem(m_LogNameList, m_pOptions->UseLogName);
m_BuildDataEnabledCheck.SetCheck(m_pOptions->BuildDataEnabled);
m_BuildDataFormURLText.SetWindowText(m_pOptions->BuildDataFormURL.c_str());
m_CharDataEnabledCheck.SetCheck(m_pOptions->CharDataEnabled);
m_CharDataFormURLText.SetWindowText(m_pOptions->CharDataFormURL.c_str());
m_AutoDownloadPricesCheck.SetCheck(m_pOptions->AutoDownloadPrices);
Buffer = m_pOptions->PriceServer.c_str();
if (Buffer == "NA") Buffer = "PC-NA";
if (Buffer == "EU") Buffer = "PC-EU";
m_PriceServerList.SelectString(-1, Buffer);
m_UploadScreenshotsCheck.SetCheck(m_pOptions->UploadScreenshots);
UpdateCustomNameState();
}
void COptionsDlg::UpdateCustomNameState()
{
int LogName = GetComboSelData(m_LogNameList, ULM_USELOGNAME_ANONYMOUS);
m_CustomNameText.EnableWindow(LogName == ULM_USELOGNAME_CUSTOM);
}
static int CALLBACK BrowseCallbackProc(HWND hwnd,UINT uMsg, LPARAM lParam, LPARAM lpData)
{
if(uMsg == BFFM_INITIALIZED)
{
std::string tmp = (const char *) lpData;
SendMessage(hwnd, BFFM_SETSELECTION, TRUE, lpData);
}
return 0;
}
void COptionsDlg::OnBnClickedBrowseButton()
{
BROWSEINFO BrowseInfo = { 0 };
TCHAR Path[MAX_PATH + 64];
CString Buffer;
m_SavedVarPathText.GetWindowText(Buffer);
BrowseInfo.lpszTitle = "Select folder...";
BrowseInfo.ulFlags = BIF_RETURNONLYFSDIRS | BIF_NEWDIALOGSTYLE;
BrowseInfo.lpfn = BrowseCallbackProc;
BrowseInfo.lParam = (LPARAM) (const char *) Buffer;
LPITEMIDLIST pList = SHBrowseForFolder(&BrowseInfo);
if (pList == nullptr) return;
SHGetPathFromIDList (pList, Path);
IMalloc * imalloc = nullptr;
if ( SUCCEEDED( SHGetMalloc ( &imalloc )) )
{
imalloc->Free(pList);
imalloc->Release();
}
m_SavedVarPathText.SetWindowText(Path);
}
void COptionsDlg::OnBnClickedBrowsebackupdataButton()
{
CString Buffer;
m_BackupDataFilename.GetWindowText(Buffer);
CFileDialog FileDlg(FALSE, nullptr, Buffer, OFN_HIDEREADONLY, "Text Files (*.txt)|*.txt|All Files (*.*)|*.*||", this);
if (FileDlg.DoModal() != IDOK) return;
Buffer = FileDlg.GetPathName();
m_BackupDataFilename.SetWindowText(Buffer);
}
void COptionsDlg::OnCbnSelchangeLognameList()
{
UpdateCustomNameState();
}
void COptionsDlg::OnBnClickedBrowsebackupbuilddataButton()
{
CFolderPickerDialog m_dlg;
CString Buffer;
m_BackupBuildDataFolder.GetWindowText(Buffer);
m_dlg.m_ofn.lpstrTitle = _T("Choose Folder for Backup Build Data:");
m_dlg.m_ofn.lpstrInitialDir = Buffer;
if (m_dlg.DoModal() != IDOK) return;
Buffer = m_dlg.GetPathName();
Buffer += _T("\\");
m_BackupBuildDataFolder.SetWindowText(Buffer);
}
void COptionsDlg::OnBnClickedBrowsebackupchardataButton()
{
CFolderPickerDialog m_dlg;
CString Buffer;
m_BackupCharDataFolder.GetWindowText(Buffer);
m_dlg.m_ofn.lpstrTitle = _T("Choose Folder for Backup Character Data:");
m_dlg.m_ofn.lpstrInitialDir = Buffer;
if (m_dlg.DoModal() != IDOK) return;
Buffer = m_dlg.GetPathName();
Buffer += _T("\\");
m_BackupCharDataFolder.SetWindowText(Buffer);
}
void COptionsDlg::OnBnClickedBuilddataenabledCheck()
{
// TODO: Add your control notification handler code here
}
|
/* -------------------------------------------------------------------
* @doc
* @notice Template file wizards/projects/qtquickapplication/main.cpp
*
* @copyright Arboreus (http://arboreus.systems)
* @author Alexandr Kirilov (http://alexandr.kirilov.me)
* @created 25/07/2021 at 19:36:19
* */// --------------------------------------------------------------
// System includes
#include <QGuiApplication>
#include <QQmlApplicationEngine>
// Application includes
// Constants
// Qt Quick Application
int main(int inCounter, char *inArguments[]) {
#if QT_VERSION < QT_VERSION_CHECK(6, 0, 0)
QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
#endif
QGuiApplication oApplication(inCounter, inArguments);
QQmlApplicationEngine oEngine;
const QUrl oURL(QStringLiteral("qrc:/main.qml"));
QObject::connect(
&oEngine, &QQmlApplicationEngine::objectCreated,
&oApplication, [oURL](QObject *obj, const QUrl &objUrl) {
if (!obj && oURL == objUrl) {
QCoreApplication::exit(-1);
}
}, Qt::QueuedConnection
);
oEngine.load(oURL);
return oApplication.exec();
}
|
/** \file
* Measures the distance between two graph positions along the reference path
* (approximated by the longest connecting path)
*/
#ifndef VG_ALGORITHMS_REF_PATH_DISTANCE_HPP_INCLUDED
#define VG_ALGORITHMS_REF_PATH_DISTANCE_HPP_INCLUDED
#include <structures/rank_pairing_heap.hpp>
#include "handle.hpp"
#include "position.hpp"
namespace vg {
namespace algorithms {
using namespace std;
/// Search the local region around two positions and return the longest distance between
/// them along any paths found during this search. Returns numeric_limits<int64_t>::max()
/// if no shared path is found.
int64_t ref_path_distance(const PathPositionHandleGraph* graph, const pos_t& pos_1, const pos_t& pos_2,
int64_t min_search_dist, int64_t max_search_dist);
}
}
#endif // VG_ALGORITHMS_REF_PATH_DISTANCE_HPP_INCLUDED
|
#include "graph.h"
#include "scheduler.h"
#include <cmath>
#include <set>
#include <queue>
#include <unordered_map>
#include <iostream>
int sameHop(Flow &f, Hop &hop, int start, bool &hopJump) {
for (int i = start; i < f.hops.size(); i++) {
if (i - start != f.hops[i].fwdttl - f.hops[start].fwdttl)
hopJump = true;
if (hop.ip == f.hops[i].ip)
return i;
}
return -1;
}
bool
Flow::updateHop(uint32_t dst, Hop h) {
int i;
for (i = hops.size() - 1; i >= 0; i--) {
if (hops[i].ip == dst) {
break;
}
if (hops[i].ip == h.ip) {
// some routers reply with rotating interfaces
if (i != hops.size() - 1)
return false;
hops[i].fwdttl = min(hops[i].fwdttl, h.fwdttl);
break;
}
if (hops[i].fwdttl < h.fwdttl) {
hops.insert(hops.begin() + i + 1, h);
break;
}
}
if (i < 0) hops.insert(hops.begin(), h);
return true;
}
int
getHopPos(Flow &f, Hop &h)
{
for (int i = 0; i < f.hops.size(); i++)
if (f.hops[i].ip == h.ip) return i;
return -1;
}
bool hasHopJump(Flow &f, int start, int i, int dir)
{
if (i == start) return false;
if (f.hops[i].fwdttl - f.hops[i - dir].fwdttl != dir)
return true;
return false;
}
pair<Hop, Hop>
selHop(vector<Flow> &flows, Hop start, Hop end, int dir)
{
int i, j, k;
int idx1, idx2, idx3, idx4;
bool hopJump;
vector<int> counts;
vector<pair<int, int>> indices;
idx1 = getHopPos(flows[0], start);
idx2 = getHopPos(flows[1], start);
idx3 = getHopPos(flows[0], end);
idx4 = getHopPos(flows[1], end);
if (idx1 == -1 || idx2 == -1 || idx3 == -1 || idx4 == -1)
return make_pair(Hop(), Hop());
k = idx2;
hopJump = false;
for (i = idx1; i != idx3 + dir; i += dir) {
for (j = k; j != idx4 + dir; j += dir) {
hopJump = hasHopJump(flows[0], idx1, i, dir) || hasHopJump(flows[1], idx2, j, dir);
if (hopJump) break;
if (flows[0].hops[i].ip == flows[1].hops[j].ip) {
if (flows[0].hops[i].cnt > 0 && flows[1].hops[j].cnt > 0) {
// found = true;
// flowInd1 = i;
// flowInd2 = j;
counts.push_back(min(flows[0].hops[i].cnt, flows[1].hops[j].cnt));
indices.push_back(make_pair(i, j));
}
k = j + dir;
}
}
if (hopJump) break;
}
if (counts.empty())
return make_pair(Hop(), Hop());
int maxIndex = distance(counts.begin(), max_element(counts.begin(), counts.end()));
int ind1 = indices[maxIndex].first;
int ind2 = indices[maxIndex].second;
return make_pair(flows[0].hops[ind1], flows[1].hops[ind2]);
}
bool isMultiPath(LBReg &lbReg) {
return (lbReg.link.type == CMPL_MULTI_PATH ||
lbReg.link.type == INCMPL_MULTI_PATH);
}
double
LBRegDiffHelper(vector<Flow> &flows, vector<LBReg> &lbRegs, int ind1, int ind2)
{
int i, j;
Hop start, mid1, mid2, end;
mid1 = lbRegs[ind1].start;
if (ind1 == 0 || isMultiPath(lbRegs[ind1-1])) {
start = mid1;
} else {
start = lbRegs[ind1-1].start;
}
mid2 = lbRegs[ind2].end;
if (ind2 == lbRegs.size() - 1 || isMultiPath(lbRegs[ind2+1])) {
end = mid2;
} else {
end = lbRegs[ind2+1].end;
}
pair<Hop, Hop> p1 = selHop(flows, mid1, start, -1);
pair<Hop, Hop> p2 = selHop(flows, mid2, end, 1);
if (p1.first.cnt == 0 || p1.second.cnt == 0 ||
p2.first.cnt == 0 || p2.second.cnt == 0) {
return -1;
}
return (p2.second.val - p1.second.val) - (p2.first.val - p1.first.val);
}
double
Graph::calcLBRegDiff(vector<Flow> &flows, vector<LBReg> &lbRegs)
{
int i;
int firstLB = -1, lastLB = -1;
for (i = 0; i < lbRegs.size(); i++) {
//if (lbRegs[i].link.type == INCMPL_SINGLE_PATH ||
// lbRegs[i].link.type == CMPL_SINGLE_PATH)
// continue;
if (firstLB == -1) firstLB = i;
lastLB = i;
lbRegs[i].link.val = LBRegDiffHelper(flows, lbRegs, i, i);
}
if (firstLB != -1 && lastLB != -1)
return LBRegDiffHelper(flows, lbRegs, firstLB, lastLB);
return -1;
}
void
Graph::printGraph(uint32_t id, ostream *out) {
if (!out || !root) return;
queue<pair<Node *, int>> q;
q.push(make_pair(root, 0));
unordered_map<uint32_t, bool> visited;
*out << fixed << getCurrTime() << " " << id << " " << "2" << " ";
while (!q.empty()) {
Node *n = q.front().first;
int level = q.front().second;
q.pop();
if (visited.count(n->hop.ip) == 0) {
*out << n->hop.ip << " " << (int)n->hop.fwdttl << " ";
//printf("level:%d, ip:%lu ttls:", level, n->hop.ip);
//for (int i = 0; i < NUM_FLOWS_ENUM; i++)
// printf("%d ", n->ttls[i]);
//printf("indegree:%d\n", n->indegree);
visited[n->hop.ip] = 1;
//printf("Link: ");
//for (auto &link : n->links) {
// printf("%d ", link.type);
//}
//printf("\n");
}
for (auto &h : n->nextHops) {
q.push(make_pair(h, level + 1));
}
}
*out << endl;
}
Node *
Graph::addHop(Node *curr, int flowid, Hop &h) {
Node *node;
node = findNodeInGraph(h.ip, root);
if (!node) {
node = new Node(h);
curr->nextHops.push_back(node);
} else {
if (h.val < node->hop.val) {
node->hop.val = h.val;
node->hop.cnt = h.cnt;
} else if (h.val == node->hop.val) {
node->hop.cnt = max(node->hop.cnt, h.cnt);
}
// curr: curr node
// node: next node
if (curr->hop.ip == node->hop.ip)
return curr;
if (findNodeInGraph(curr->hop.ip, node))
return NULL;
vector<Node *>::iterator it;
it = find(curr->nextHops.begin(), curr->nextHops.end(), node);
if (it == curr->nextHops.end()) {
curr->nextHops.push_back(node);
} else {
int index = distance(curr->nextHops.begin(), it);
if (h.fwdttl - curr->hop.fwdttl == 1 &&
curr->links[index].type == INDIRECT_LINK)
{
curr->links[index].type = DIRECT_LINK;
curr->links[index].val = flowid;
}
node->ttls[flowid] = h.fwdttl;
return node;
}
}
node->ttls[flowid] = h.fwdttl;
if (h.fwdttl - curr->hop.fwdttl == 1) {
curr->links.push_back(Link(DIRECT_LINK, flowid));
} else {
curr->links.push_back(Link(INDIRECT_LINK, flowid));
}
return node;
}
/*
vector<Flow>
enumPath(Node *t, uint32_t dst, unordered_map<uint32_t, bool> &v) {
Flow f;
vector<Flow> paths, subpaths;
v[t->hop.ip] = true;
f.hops.push_back(Hop(t->hop.ip, t->hop.ttl));
if (t->hop.ip == dst) {
paths.push_back(f);
} else {
for (auto &n : t->nextHops) {
if (v.count(t->hop.ip) == 0 || !v[t->hop.ip]) {
subpaths = enumPath(n, dst, v);
insert(paths.end(), subpaths.begin(), subpaths.end());
}
}
}
f.hops.pop_back();
v[t->hop.ip] = false;
return paths;
}
*/
bool
subpathExist(vector<Hop> &subpath, vector<vector<Hop>> &subpaths) {
bool same;
for (auto &s : subpaths) {
if (subpath.size() != s.size()) continue;
same = true;
for (int i = 0; i < subpath.size(); i++) {
if (subpath[i].ip != s[i].ip) {
same = false;
break;
}
}
if (same) return true;
}
return false;
}
vector<vector<Hop>>
findUniqPaths(vector<Flow> &flows, uint32_t src, uint32_t dst) {
int i;
vector<vector<Hop>> subpaths;
for (i = 0; i < flows.size(); i++) {
vector<Hop> subpath;
bool start = false, end = false;
for (auto hop : flows[i].hops) {
if (hop.ip == src)
start = true;
if (hop.ip == dst)
end = true;
hop.cnt = i;
if (start) subpath.push_back(hop);
if (end) break;
}
if (!start || !end) continue;
if (subpathExist(subpath, subpaths)) continue;
subpaths.push_back(subpath);
}
return subpaths;
}
void
pruneIndirectLinks(Node *t, vector<Flow> flows, unordered_map<uint32_t, bool> &visited) {
if (!t || t->nextHops.empty() || visited.count(t->hop.ip))
return;
visited[t->hop.ip] = true;
int i, j;
vector<int> sel;
for (i = 0; i < t->links.size(); i++) {
if (t->links[i].type == INDIRECT_LINK) {
vector<int> target;
int index = t->links[i].val;
int ttldiff = t->nextHops[i]->ttls[index] - t->ttls[index];
for (j = 0; j < NUM_FLOWS_ENUM; j++) {
if (t->nextHops[i]->ttls[j] - t->ttls[j] == ttldiff) {
target.push_back(j);
}
}
vector<Flow> targetFlows;
vector<vector<Hop>> paths;
for (auto j : target)
targetFlows.push_back(flows[j]);
paths = findUniqPaths(targetFlows, t->hop.ip, t->nextHops[i]->hop.ip);
if (paths.size() > 1) {
sel.push_back(i);
}
}
}
for (i = sel.size() - 1; i >= 0; i--) {
t->links.erase(t->links.begin() + sel[i]);
t->nextHops.erase(t->nextHops.begin() + sel[i]);
}
for (i = 0; i < t->nextHops.size(); i++) {
t->nextHops[i]->indegree++;
pruneIndirectLinks(t->nextHops[i], flows, visited);
}
}
/*
void
pruneDanglingBranch(uint32_t dst, Node *t) {
if (t->nextHops.empty()) { return; }
vector<Node *> nextHops = t->nextHops;
ptrdiff_t pos;
for (auto &n : nextHops) {
if (n->nextHops.empty() && n->hop.ip != dst) {
pos = find(t->nextHops.begin(), t->nextHops.end(), n) - t->nextHops.begin();
t->nextHops.erase(it);
t->links.erase();
free(n);
}
}
for (auto &n : t->nextHops) {
pruneDanglingBranch(dst, n);
}
}
*/
int
countEnds(Node *t, unordered_map<uint32_t, bool> &visited) {
int i, count = 0;
if (visited.count(t->hop.ip)) { return 0; }
visited[t->hop.ip] = true;
if (t->nextHops.empty()) { return 1; }
for (i = 0; i < t->nextHops.size(); i++)
count += countEnds(t->nextHops[i], visited);
return count;
}
void
pruneMultipleEnds(Node *t,
uint32_t dst,
set<Node *> &deletedNodes,
unordered_map<uint32_t, bool> &visited)
{
int i;
if (!t || visited.count(t->hop.ip) || t->nextHops.empty()) { return; }
visited[t->hop.ip] = true;
for (i = 0; i < t->nextHops.size(); i++) {
Node *node = t->nextHops[i];
if (node->nextHops.empty() && node->hop.ip != dst) {
deletedNodes.insert(node);
t->links.erase(t->links.begin() + i);
t->nextHops.erase(t->nextHops.begin() + i);
} else {
pruneMultipleEnds(node, dst, deletedNodes, visited);
}
}
}
void
calcIndegree(Node *t, unordered_map<uint32_t, bool> &visited) {
if (!t || t->nextHops.empty() || visited.count(t->hop.ip))
return;
visited[t->hop.ip] = true;
int i;
for (i = 0; i < t->nextHops.size(); i++) {
t->nextHops[i]->indegree++;
calcIndegree(t->nextHops[i], visited);
}
}
void
Graph::buildGraph(uint32_t dst, vector<Flow> flows) {
int i, j;
Node *curr;
unordered_map<uint32_t, bool> visited;
// add destination
Hop endHop(dst, UINT8_MAX);
// for not being considered as no measurements
endHop.cnt = 6;
endHop.val = UINT16_MAX;
for (auto &f : flows) {
if (f.hops.size() > 0 && f.hops.back().ip == dst)
continue;
f.hops.insert(f.hops.end(), endHop);
}
// Construct the graph
for (i = 0; i < flows.size(); i++) {
if (flows[i].hops.empty()) continue;
if (!root) {
root = new Node(flows[i].hops[0]);
curr = root;
} else {
curr = findNodeInGraph(flows[i].hops[0].ip, root);
if (!curr) { continue; }
}
for (j = 1; j < flows[i].hops.size(); j++) {
curr = addHop(curr, i, flows[i].hops[j]);
if (curr == NULL) break;
}
}
// printGraph(dst, &std::cout);
calcIndegree(root, visited);
// pruneIndirectLinks(root, flows, visited);
/*
uint32_t lasthop = 0;
for (auto &f : flows) {
if (f.hops.back().ip == dst)
lasthop = dst;
}
if (lasthop == 0) {
int hopcount = 0;
for (i = 0; i < flows.size(); i++) {
if (hopcount <= flows[i].hops.back().ttl) {
hopcount = flows[i].hops.back().ttl;
lasthop = flows[i].hops.back().ip;
}
}
}
set<Node *> newNodes, totalNodes;
do {
visited.clear();
newNodes.clear();
pruneMultipleEnds(root, lasthop, newNodes, visited);
totalNodes.insert(newNodes.begin(), newNodes.end());
} while (!newNodes.empty());
for (auto &node : totalNodes)
delete node;
*/
// printGraph(dst, &std::cout);
}
vector<LBReg>
Graph::findLBReg(uint32_t dst, vector<Flow> &flows, ofstream *outlist) {
int i, j;
vector<LBReg> lbRegs;
buildGraph(dst, flows);
// printGraph(dst, outlist);
//printf("after pruning\n");
//g.printGraph();
// if a graph has multiple ends, no bottleneck point can be
// determined. Thus, no second LB reg is identifiable.
//unordered_map<uint32_t, bool> visited;
//int numEnds = countEnds(g.getRoot(), visited);
//printf("numEnds:%d\n", numEnds);
/* find LB regions */
queue<Node *> q;
set<Node *> boundaryIPs;
Node *u;
bool hopJump = false;
LINK_TYPE linkType;
Node *singlePathStart, *multiPathStart;
#define SINGLE_PATH 0
#define MULTI_PATH 1
#define UNKNOWN -1
int lbType = UNKNOWN;
singlePathStart = multiPathStart = NULL;
// enqueue vertices with indegree 0
if (root) {
q.push(root);
boundaryIPs.insert(root);
}
while (!q.empty()) {
/*
queue<Node *> p = q;
printf("queue:");
while (!p.empty()) {
Node *w = p.front();
p.pop();
printf("%lu ", w->hop.ip);
}
printf(" set: ");
for (set<Node *>::iterator it = boundaryIPs.begin(); it != boundaryIPs.end(); it++) {
printf("%lu ", (*it)->hop.ip);
}
printf("\n");
*/
u = q.front();
q.pop();
boundaryIPs.erase(boundaryIPs.find(u));
if (lbType == UNKNOWN && u->nextHops.size() == 1) {
lbType = SINGLE_PATH;
singlePathStart = u;
}
if (u->nextHops.size() > 1) {
if (lbType == SINGLE_PATH) {
linkType = (hopJump) ? INCMPL_SINGLE_PATH : CMPL_SINGLE_PATH;
lbRegs.push_back(LBReg(singlePathStart->hop, u->hop, linkType));
lbType = UNKNOWN;
hopJump = false;
}
if (lbType == UNKNOWN) {
multiPathStart = u;
}
lbType = MULTI_PATH;
}
for (i = 0; i < u->nextHops.size(); i++) {
Node *n = u->nextHops[i];
boundaryIPs.insert(n);
if (--n->indegree == 0)
q.push(n);
if (u->links[i].type == INDIRECT_LINK)
hopJump = true;
}
if (boundaryIPs.size() == 1) {
if (lbType == MULTI_PATH) {
linkType = (hopJump) ? INCMPL_MULTI_PATH : CMPL_MULTI_PATH;
lbRegs.push_back(LBReg(multiPathStart->hop, (*boundaryIPs.begin())->hop, linkType));
lbType = UNKNOWN;
hopJump = false;
}
}
}
if (singlePathStart != u && lbType == SINGLE_PATH) {
linkType = (hopJump) ? INCMPL_SINGLE_PATH : CMPL_SINGLE_PATH;
lbRegs.push_back(LBReg(singlePathStart->hop, u->hop, linkType));
}
return lbRegs;
}
/*
vector<LBReg> Graph::findLBReg(vector<Flow> &flows, int16_t &totalRange, ANAL_MODE mode) {
int i = 0, j = 0;
Flow &f1 = flows[0];
Flow &f2 = flows[1];
vector<LBReg> LBRegs;
if (f1.hops.size() == 0 || f2.hops.size() == 0) return LBRegs;
bool cmmLastHop, hopJump = false;
Hop firstCmmHop = f1.hops[0], lastCmmHop = firstCmmHop;
while (i < f1.hops.size() && j < f2.hops.size()) {
if (f1.hops[i].ip == f2.hops[j].ip) {
lastCmmHop = f1.hops[i];
lastCmmHop.val = (f1.hops[i].val + f2.hops[j].val) / 2;
hopJump = hopJump || (hasHopJump(f1, 0, i, 1) || hasHopJump(f2, 0, j, 1));
cmmLastHop = true;
i++; j++;
} else {
cmmLastHop = false;
if (firstCmmHop.ip != lastCmmHop.ip) {
if (hopJump) {
LBRegs.push_back(LBReg(firstCmmHop, lastCmmHop, INCMPL_SINGLE_PATH));
} else {
LBRegs.push_back(LBReg(firstCmmHop, lastCmmHop, CMPL_SINGLE_PATH));
}
}
hopJump = false;
for (; i < f1.hops.size(); i++) {
hopJump = hopJump || (hasHopJump(f1, 0, i, 1) || hasHopJump(f2, 0, j, 1));
int k = sameHop(f2, f1.hops[i], j, hopJump);
if (k != -1) {
j = k;
Hop nextCmmHop = f2.hops[j];
nextCmmHop.val = (f1.hops[i].val + f2.hops[j].val) / 2;
if (hopJump) {
LBRegs.push_back(LBReg(lastCmmHop, nextCmmHop, INCMPL_MULTI_PATH));
} else {
LBRegs.push_back(LBReg(lastCmmHop, nextCmmHop, CMPL_MULTI_PATH));
}
firstCmmHop = lastCmmHop = nextCmmHop;
hopJump = false;
cmmLastHop = true;
break;
}
}
}
}
if (firstCmmHop.ip != lastCmmHop.ip && cmmLastHop) {
if (hopJump) {
LBRegs.push_back(LBReg(firstCmmHop, lastCmmHop, INCMPL_SINGLE_PATH));
} else {
LBRegs.push_back(LBReg(firstCmmHop, lastCmmHop, CMPL_SINGLE_PATH));
}
}
if (mode == LB_ANALYSIS)
totalRange = calcLBRegDiff(flows, LBRegs);
// check if ranges at different LBRegs sum up to total
//int sum = 0;
//for (i = 0; i < LBRegs.size(); i++) {
// sum += isMultiPath(LBRegs[i]) ? LBRegs[i].link.val : 0;
//}
//if (abs(sum - lbRegRangeTotal) <= MEAS_ERROR)
// return LBRegs;
//LBRegs.clear();
return LBRegs;
}
*/
Node *
findNodeHelper(uint32_t ip, Node *root, unordered_map<uint32_t, bool> &v) {
if (!root || v.count(root->hop.ip))
return NULL;
v[root->hop.ip] = true;
if (root->hop.ip == ip)
return root;
for (auto &next : root->nextHops) {
Node *target = findNodeHelper(ip, next, v);
if (target) return target;
}
return NULL;
}
Node *
Graph::findNodeInGraph(uint32_t ip, Node *root) {
unordered_map<uint32_t, bool> visited;
return findNodeHelper(ip, root, visited);
}
void Graph::updateGraph(vector<LBReg> &lbRegs) {
Node *start = findNodeInGraph(lbRegs[0].start.ip, root);
Node *node = start, *next;
bool exist;
if (!node) {
next = new Node(lbRegs[0].start);
root->nextHops.push_back(next);
root->links.push_back(Link(INCMPL_SINGLE_PATH, 0));
node = next;
}
for (int i = 0; i < lbRegs.size(); i++) {
exist = false;
for (int j = 0; j < node->nextHops.size(); j++) {
next = node->nextHops[j];
if (next->hop.ip == lbRegs[i].end.ip) {
if (isMultiPath(lbRegs[i])) {
node->links[j].type = lbRegs[i].link.type;
if(abs(node->links[j].val) > abs(lbRegs[i].link.val)) {
node->links[j].val = lbRegs[i].link.val;
}
}
node = next;
exist = true;
break;
}
}
if (!exist) {
next = findNodeInGraph(lbRegs[i].end.ip, root);
if (!next) next = new Node(lbRegs[i].end);
node->nextHops.push_back(next);
node->links.push_back(lbRegs[i].link);
node = next;
}
}
return;
}
// Flow
// findLBRouterHelper(Node *t,
// vector<Flow> &flows,
// unordered_map<Node *, bool> &visited)
// {
// int i;
// Flow routers;
// if (visited.count(t)) return routers;
// visited[t] = true;
// if (t->nextHops.size() > 1) {
// for (i = 0; i < NUM_FLOWS_ENUM; i++) {
// if (t->ttls[i] != 0) {
// Hop h(t->hop.ip, t->ttls[i]);
// h.val = i;
// h.numNextHops = t->nextHops.size();
// routers.hops.push_back(h);
// break;
// }
// }
// }
// for (auto &next : t->nextHops) {
// Flow f = findLBRouterHelper(next, flows, visited);
// routers.hops.insert(routers.hops.end(), f.hops.begin(), f.hops.end());
// }
// return routers;
// }
// Flow
// Graph::findLBRouter(uint32_t id, vector<Flow> &flows) {
// buildGraph(id, flows);
// //printGraph();
// unordered_map<Node *, bool> visited;
// return findLBRouterHelper(root, flows, visited);
// }
void
freeGraph(Node *t, unordered_map<Node *, bool> &visited)
{
int i;
if (!t || visited.count(t)) return;
visited[t] = true;
for (auto &next : t->nextHops) {
freeGraph(next, visited);
}
delete t;
return;
}
Graph::~Graph() {
unordered_map<Node *, bool> visited;
freeGraph(root, visited);
}
|
// Copyright (c) 2007-2015 Hartmut Kaiser
// Copyright (c) 2013 Agustin Berge
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/// \file lcos/wait_any.hpp
#if !defined(HPX_LCOS_WAIT_ANY_APR_17_2012_1143AM)
#define HPX_LCOS_WAIT_ANY_APR_17_2012_1143AM
#if defined(DOXYGEN)
namespace hpx
{
/// The function \a wait_any is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param first [in] The iterator pointing to the first element of a
/// sequence of \a future or \a shared_future objects for
/// which \a wait_any should wait.
/// \param last [in] The iterator pointing to the last element of a
/// sequence of \a future or \a shared_future objects for
/// which \a wait_any should wait.
/// \param ec [in,out] this represents the error status on exit, if
/// this is pre-initialized to \a hpx#throws the function
/// will throw on error instead.
///
/// \note The function \a wait_any returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any
/// returns.
///
/// \note As long as \a ec is not pre-initialized to \a hpx::throws this
/// function doesn't throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance of
/// \a hpx::exception.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename InputIter>
void wait_any(InputIter first, InputIter last, error_code& ec = throws);
/// The function \a wait_any is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param futures [in] A vector holding an arbitrary amount of \a future or
/// \a shared_future objects for which \a wait_any should
/// wait.
/// \param ec [in,out] this represents the error status on exit, if
/// this is pre-initialized to \a hpx#throws the function
/// will throw on error instead.
///
/// \note The function \a wait_any returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any
/// returns.
///
/// \note As long as \a ec is not pre-initialized to \a hpx::throws this
/// function doesn't throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance of
/// \a hpx::exception.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename R>
void wait_any(std::vector<future<R>>& futures, error_code& ec = throws);
/// The function \a wait_any is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param futures [in] Amn array holding an arbitrary amount of \a future or
/// \a shared_future objects for which \a wait_any should
/// wait.
/// \param ec [in,out] this represents the error status on exit, if
/// this is pre-initialized to \a hpx#throws the function
/// will throw on error instead.
///
/// \note The function \a wait_any returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any
/// returns.
///
/// \note As long as \a ec is not pre-initialized to \a hpx::throws this
/// function doesn't throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance of
/// \a hpx::exception.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename R, std:;size_t N>
void wait_any(std::array<future<R>, N>& futures, error_code& ec = throws);
/// The function \a wait_any is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param futures [in] An arbitrary number of \a future or \a shared_future
/// objects, possibly holding different types for which
/// \a wait_any should wait.
/// \param ec [in,out] this represents the error status on exit, if
/// this is pre-initialized to \a hpx#throws the function
/// will throw on error instead.
///
/// \note The function \a wait_any returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any
/// returns.
///
/// \note As long as \a ec is not pre-initialized to \a hpx::throws this
/// function doesn't throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance of
/// \a hpx::exception.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename ...T>
void wait_any(error_code& ec, T&&... futures);
/// The function \a wait_any is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param futures [in] An arbitrary number of \a future or \a shared_future
/// objects, possibly holding different types for which
/// \a wait_any should wait.
///
/// \note The function \a wait_any returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any
/// returns.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename ...T>
void wait_any(T&&... futures);
/// The function \a wait_any_n is a non-deterministic choice operator. It
/// OR-composes all future objects given and returns after one future of
/// that list finishes execution.
///
/// \param first [in] The iterator pointing to the first element of a
/// sequence of \a future or \a shared_future objects for
/// which \a wait_any_n should wait.
/// \param count [in] The number of elements in the sequence starting at
/// \a first.
/// \param ec [in,out] this represents the error status on exit, if
/// this is pre-initialized to \a hpx#throws the function
/// will throw on error instead.
///
/// \note The function \a wait_any_n returns after at least one future has
/// become ready. All input futures are still valid after \a wait_any_n
/// returns.
///
/// \return The function \a wait_all_n will return an iterator
/// referring to the first element in the input sequence
/// after the last processed element.
///
/// \note As long as \a ec is not pre-initialized to \a hpx::throws this
/// function doesn't throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance of
/// \a hpx::exception.
///
/// \note None of the futures in the input sequence are invalidated.
template <typename InputIter>
InputIter wait_any_n(InputIter first, std::size_t count,
error_code& ec = throws);
}
#else // DOXYGEN
#include <hpx/config.hpp>
#include <hpx/lcos/future.hpp>
#include <hpx/lcos/wait_some.hpp>
#include <hpx/pp/strip_parens.hpp>
#include <hpx/runtime/threads/thread.hpp>
#include <hpx/util/always_void.hpp>
#include <hpx/util/tuple.hpp>
#include <boost/utility/swap.hpp>
#include <array>
#include <cstddef>
#include <utility>
#include <vector>
///////////////////////////////////////////////////////////////////////////////
namespace hpx { namespace lcos
{
///////////////////////////////////////////////////////////////////////////
template <typename Future>
void wait_any(std::vector<Future> const& futures, error_code& ec = throws)
{
return lcos::wait_some(1, futures, ec);
}
template <typename Future>
void wait_any(std::vector<Future>& lazy_values, error_code& ec = throws)
{
return lcos::wait_any(
const_cast<std::vector<Future> const&>(lazy_values), ec);
}
template <typename Future>
void wait_any(std::vector<Future> && lazy_values, error_code& ec = throws)
{
return lcos::wait_any(
const_cast<std::vector<Future> const&>(lazy_values), ec);
}
///////////////////////////////////////////////////////////////////////////
template <typename Future, std::size_t N>
void wait_any(std::array<Future, N> const& futures, error_code& ec = throws)
{
return lcos::wait_some(1, futures, ec);
}
template <typename Future, std::size_t N>
void wait_any(std::array<Future, N>& lazy_values, error_code& ec = throws)
{
return lcos::wait_any(
const_cast<std::array<Future, N> const&>(lazy_values), ec);
}
template <typename Future, std::size_t N>
void wait_any(std::array<Future, N> && lazy_values, error_code& ec = throws)
{
return lcos::wait_any(
const_cast<std::array<Future, N> const&>(lazy_values), ec);
}
///////////////////////////////////////////////////////////////////////////
template <typename Iterator>
typename util::always_void<
typename lcos::detail::future_iterator_traits<Iterator>::type
>::type
wait_any(Iterator begin, Iterator end, error_code& ec = throws)
{
return lcos::wait_some(1, begin, end, ec);
}
inline void wait_any(error_code& ec = throws)
{
return lcos::wait_some(1, ec);
}
///////////////////////////////////////////////////////////////////////////
template <typename Iterator>
Iterator
wait_any_n(Iterator begin, std::size_t count,
error_code& ec = throws)
{
return wait_some_n(1, begin, count, ec);
}
///////////////////////////////////////////////////////////////////////////
template <typename... Ts>
void wait_any(error_code& ec, Ts&&... ts)
{
return lcos::wait_some(1, ec, std::forward<Ts>(ts)...);
}
template <typename... Ts>
void wait_any(Ts&&... ts)
{
return lcos::wait_some(1, std::forward<Ts>(ts)...);
}
}}
namespace hpx
{
using lcos::wait_any;
using lcos::wait_any_n;
}
#endif // DOXYGEN
#endif
|
// Copyright (c) 2010-2021, Lawrence Livermore National Security, LLC. Produced
// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
// LICENSE and NOTICE for details. LLNL-CODE-806117.
//
// This file is part of the MFEM library. For more information and source code
// availability visit https://mfem.org.
//
// MFEM is free software; you can redistribute it and/or modify it under the
// terms of the BSD-3 license. We welcome feedback and contributions, see file
// CONTRIBUTING.md for details.
// Implementations of classes FABilinearFormExtension, EABilinearFormExtension,
// PABilinearFormExtension and MFBilinearFormExtension.
#include "nonlinearform.hpp"
#include "ceed/util.hpp"
namespace mfem
{
NonlinearFormExtension::NonlinearFormExtension(NonlinearForm *form)
: Operator(form->FESpace()->GetTrueVSize()), n(form)
{
// empty
}
PANonlinearFormExtension::PANonlinearFormExtension(NonlinearForm *form):
NonlinearFormExtension(form), fes(*form->FESpace())
{
const ElementDofOrdering ordering = ElementDofOrdering::LEXICOGRAPHIC;
elem_restrict_lex = fes.GetElementRestriction(ordering);
if (elem_restrict_lex)
{
localX.SetSize(elem_restrict_lex->Height(), Device::GetMemoryType());
localY.SetSize(elem_restrict_lex->Height(), Device::GetMemoryType());
localY.UseDevice(true); // ensure 'localY = 0.0' is done on device
}
}
void PANonlinearFormExtension::Assemble()
{
Array<NonlinearFormIntegrator*> &integrators = *n->GetDNFI();
const int Ni = integrators.Size();
for (int i = 0; i < Ni; ++i)
{
integrators[i]->AssemblePA(*n->FESpace());
}
}
void PANonlinearFormExtension::Mult(const Vector &x, Vector &y) const
{
Array<NonlinearFormIntegrator*> &integrators = *n->GetDNFI();
const int iSz = integrators.Size();
if (elem_restrict_lex && !DeviceCanUseCeed())
{
elem_restrict_lex->Mult(x, localX);
localY = 0.0;
for (int i = 0; i < iSz; ++i)
{
integrators[i]->AddMultPA(localX, localY);
}
elem_restrict_lex->MultTranspose(localY, y);
}
else
{
y.UseDevice(true); // typically this is a large vector, so store on device
y = 0.0;
for (int i = 0; i < iSz; ++i)
{
integrators[i]->AddMultPA(x, y);
}
}
}
MFNonlinearFormExtension::MFNonlinearFormExtension(NonlinearForm *form):
NonlinearFormExtension(form), fes(*form->FESpace())
{
const ElementDofOrdering ordering = ElementDofOrdering::LEXICOGRAPHIC;
elem_restrict_lex = fes.GetElementRestriction(ordering);
if (elem_restrict_lex)
{
localX.SetSize(elem_restrict_lex->Height(), Device::GetMemoryType());
localY.SetSize(elem_restrict_lex->Height(), Device::GetMemoryType());
localY.UseDevice(true); // ensure 'localY = 0.0' is done on device
}
}
void MFNonlinearFormExtension::Assemble()
{
Array<NonlinearFormIntegrator*> &integrators = *n->GetDNFI();
const int Ni = integrators.Size();
for (int i = 0; i < Ni; ++i)
{
integrators[i]->AssembleMF(*n->FESpace());
}
}
void MFNonlinearFormExtension::Mult(const Vector &x, Vector &y) const
{
Array<NonlinearFormIntegrator*> &integrators = *n->GetDNFI();
const int iSz = integrators.Size();
if (elem_restrict_lex && !DeviceCanUseCeed())
{
elem_restrict_lex->Mult(x, localX);
localY = 0.0;
for (int i = 0; i < iSz; ++i)
{
integrators[i]->AddMultMF(localX, localY);
}
elem_restrict_lex->MultTranspose(localY, y);
}
else
{
y.UseDevice(true); // typically this is a large vector, so store on device
y = 0.0;
for (int i = 0; i < iSz; ++i)
{
integrators[i]->AddMultMF(x, y);
}
}
}
}
|
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <climits>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <iomanip>
#include <iterator>
#include <list>
#include <map>
#include <numeric>
#include <queue>
#include <set>
#include <sstream>
#include <stack>
#include <string>
#include <utility>
#include <vector>
using namespace std;
const double EPS = 1e-9;
const int INF = 0x7f7f7f7f;
const double PI=acos(-1.0);
#define READ(f) freopen(f, "r", stdin)
#define WRITE(f) freopen(f, "w", stdout)
#define MP(x, y) make_pair(x, y)
#define SZ(c) (int)c.size()
#define PB(x) push_back(x)
#define F(i,L,R) for (int i = L; i < R; i++)
#define FF(i,L,R) for (int i = L; i <= R; i++)
#define FR(i,L,R) for (int i = L; i > R; i--)
#define FRF(i,L,R) for (int i = L; i >= R; i--)
#define FOREACH(i,t) for (typeof(t.begin()) i=t.begin(); i!=t.end(); i++)
#define ALL(p) p.begin(),p.end()
#define ALLR(p) p.rbegin(),p.rend()
#define SET(p) memset(p, -1, sizeof(p))
#define CLR(p) memset(p, 0, sizeof(p))
#define MEM(p, v) memset(p, v, sizeof(p))
#define CPY(d, s) memcpy(d, s, sizeof(s))
#define getI(a) scanf("%d", &a)
#define getII(a,b) scanf("%d %d", &a, &b)
#define getIII(a,b,c) scanf("%d %d %d", &a, &b, &c)
#define getL(a) scanf("%lld",&a)
#define getLL(a,b) scanf("%lld %lld",&a,&b)
#define getLLL(a,b,c) scanf("%lld %lld %lld",&a,&b,&c)
#define getC(n) scanf("%c",&n)
#define getF(n) scanf("%lf",&n)
#define getS(n) scanf("%s",n)
#define vi vector < int >
#define vii vector < vector < int > >
#define pii pair< int, int >
#define psi pair< string, int >
#define ff first
#define ss second
#define ll long long
#define ull unsigned long long
#define ui unsigned int
#define us unsigned short
#define ld long double
template< class T > inline T _abs(T n) { return ( (n) < 0 ? -(n) : (n) ); }
template< class T > inline T _max(T a, T b) { return ( ! ( (a) < (b) ) ? (a) : (b) ) ; }
template< class T > inline T _min(T a, T b) { return ( ( (a) < (b) ) ? (a) : (b) ) ; }
template< class T > inline T _swap(T &a, T &b) { T temp=a;a=b;b=temp;}
template< class T > inline T gcd(T a, T b) { return (b) == 0 ? (a) : gcd( (b), ( (a) % (b) ) ) ; }
template< class T > inline T lcm(T a, T b) { return ( (a) / gcd( (a), (b) ) * (b) ); }
//******************DELETE****************
#define shubhashis
#ifdef shubhashis
#define debug(args...) {dbg,args; cerr<<endl;}
#else
#define debug(args...) // Just strip off all debug tokens
#endif
struct debugger{
template<typename T> debugger& operator , (const T& v){
cerr<<v<<" ";
return *this;
}
}dbg;
//******************DELETE****************
struct data
{
int a,b,c;
}a[34];
int n;
int dp[3004][3004];
int test(int x,int y)
{
int flg=0;
int p1=0,p2=0,p3=0,p4=0,p5=0,p6=0,p=0;
if(dp[x][y]!=-1) return dp[x][y];
for(int i=0;i<n;i++)
{
if(a[i].a<x && a[i].b<y)
{
p1=a[i].c+test(a[i].a,a[i].b);
}
if(a[i].b<x && a[i].a<y)
{
p2=a[i].c+test(a[i].b,a[i].a);
}
if(a[i].a<x && a[i].c<y)
{
p3=a[i].b+test(a[i].a,a[i].c);
}
if(a[i].c<x && a[i].a<y)
{
p4=a[i].b+test(a[i].c,a[i].a);
}
if(a[i].b<x && a[i].c<y)
{
p5=a[i].a+test(a[i].b,a[i].c);
}
if(a[i].c<x && a[i].b<y)
{
p6=a[i].a+test(a[i].c,a[i].b);
}
p=max(p,max(p1,max(p2,max(p3,max(p4,max(p5,p6))))));
}
return (dp[x][y]=p);
}
int main() {
//READ("in.txt");
//WRITE("out.txt");
while(~getI(n))
{
SET(dp);
if(n==0) break;
for(int i=0;i<n;i++)
{
getIII(a[i].a,a[i].b,a[i].c);
}
int res=0;
for(int i=0;i<n;i++)
{
int r1=a[i].c+test(a[i].a,a[i].b);
int r2=a[i].b+test(a[i].a,a[i].c);
int r3=a[i].a+test(a[i].b,a[i].c);
int r4=a[i].c+test(a[i].b,a[i].a);
int r5=a[i].b+test(a[i].c,a[i].a);
int r6=a[i].a+test(a[i].c,a[i].b);
int r=max(r1,max(r2,max(r3,max(r4,max(r5,r6)))));
res=max(res,r);
}
printf("%d\n",res);
}
return 0;
}
|
/**
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
*/
#include <bits/stdc++.h>
using namespace std;
int main() {
string s;
cin >> s;
int ret = 0;
auto vowels = set<char>{'a','e','i','o','u'};
auto isvowel = [&](char c) { return vowels.count(c); };
for (auto c: s)
ret += isalpha(c) ? isvowel(c) : (c-'0')%2;
cout << ret;
}
|
// Copyright 2021 RobosoftAI Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <smacc2/smacc.hpp>
namespace sm_dance_bot_lite
{
namespace f_pattern_states
{
enum class TDirection
{
LEFT,
RIGHT
};
// FORWARD DECLARATIONS OF INNER STATES
template <typename SS>class StiFPatternRotate1;
template <typename SS>class StiFPatternForward1;
template <typename SS>class StiFPatternReturn1;
template <typename SS>class StiFPatternRotate2;
template <typename SS>class StiFPatternForward2;
template <typename SS>class StiFPatternStartLoop;
} // namespace f_pattern_states
} // namespace sm_dance_bot_lite
namespace sm_dance_bot_lite
{
namespace SS4
{
using namespace f_pattern_states;
// STATE DECLARATION
struct SsFPattern1
: smacc2::SmaccState<SsFPattern1, MsDanceBotRunMode, StiFPatternStartLoop<SsFPattern1>>
{
public:
using SmaccState::SmaccState;
// TRANSITION TABLE
typedef mpl::list<
Transition<EvLoopEnd<StiFPatternStartLoop<SsFPattern1>>, StNavigateReverse4, ENDLOOP> //,
>reactions;
// STATE VARIABLES
// superstate parameters
static constexpr float ray_lenght_meters() { return 3.25; }
static constexpr float pitch_lenght_meters() { return 0.75; }
static constexpr int total_iterations() { return 10; }
static constexpr TDirection direction() { return TDirection::RIGHT; }
// superstate state variables
int iteration_count;
// STATE FUNCTIONS
static void staticConfigure()
{
//configure_orthogonal<OrObstaclePerception, CbLidarSensor>();
}
void runtimeConfigure() { iteration_count = 0; }
}; // namespace SS4
// FORWARD DECLARATION FOR THE SUPERSTATE
} // namespace SS4
} // namespace sm_dance_bot_lite
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_forward_1.hpp>
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_forward_2.hpp>
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_loop_start.hpp>
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_return_1.hpp>
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_rotate_1.hpp>
#include <sm_dance_bot_lite/states/f_pattern_states/sti_fpattern_rotate_2.hpp>
|
#include "crypto.h"
#include "ssl.h" // for OpenSSLException
#include "../assert.h"
#include "../streams/random.h"
namespace Mordor {
#define SSL_CHECK(x) if (!(x)) MORDOR_THROW_EXCEPTION(OpenSSLException()); else (void)0
const std::string CryptoStream::RANDOM_IV;
CryptoStream::CryptoStream(Stream::ptr p, const EVP_CIPHER *cipher, const std::string &key,
const std::string &iv, Direction dir, Operation op, bool own) :
MutatingFilterStream(p, own),
m_iv(iv),
m_dir(dir),
m_op(op),
m_eof(false),
m_iv_to_extract(0)
{
if (m_dir == INFER) {
MORDOR_ASSERT( parent()->supportsRead() ^ parent()->supportsWrite() );
m_dir = parent()->supportsWrite() ? WRITE : READ;
}
if (m_op == AUTO) {
m_op = (m_dir == WRITE) ? ENCRYPT : DECRYPT;
}
EVP_CIPHER_CTX_init(&m_ctx);
try
{
// do preliminary initialization (everything except the IV)
SSL_CHECK( EVP_CipherInit_ex(&m_ctx, cipher, NULL, NULL, NULL, (m_op == ENCRYPT) ? 1 : 0) );
SSL_CHECK( EVP_CIPHER_CTX_set_key_length(&m_ctx, static_cast<int>(key.size())) );
SSL_CHECK( EVP_CipherInit_ex(&m_ctx, NULL, NULL, (const unsigned char *)key.c_str(), NULL, -1) );
m_blocksize = EVP_CIPHER_CTX_block_size(&m_ctx);
// generate an IV, if necessary
size_t iv_len = static_cast<size_t>(EVP_CIPHER_CTX_iv_length(&m_ctx));
if (&iv == &RANDOM_IV) {
if (m_op == ENCRYPT) {
RandomStream random;
random.read(m_buf, iv_len);
MORDOR_ASSERT(m_buf.readAvailable() == iv_len);
m_iv.assign((const char *)m_buf.readBuffer(iv_len, true).iov_base, iv_len);
init_iv();
// leave the IV in m_buf;
// read() will return it ahead of the ciphertext;
// write() will write it to the parent stream on its first call
} else {
// tell read() and write() how much data should be extracted for the iv
m_iv_to_extract = iv_len;
}
} else {
init_iv();
}
}
catch(...)
{
EVP_CIPHER_CTX_cleanup(&m_ctx);
throw;
}
}
void CryptoStream::init_iv()
{
// note: I used to verify that, if m_iv.empty(), EVP_CIPHER_CTX_iv_length returns 0
// however, some older versions of OpenSSL return a nonzero IV length for ECB mode
if (!m_iv.empty()) {
// make sure the size is correct
if (static_cast<size_t>(EVP_CIPHER_CTX_iv_length(&m_ctx)) != m_iv.size())
MORDOR_THROW_EXCEPTION(OpenSSLException("incorrect iv length"));
// feed openssl the IV
SSL_CHECK( EVP_CipherInit_ex(&m_ctx, NULL, NULL, NULL, (const unsigned char *)m_iv.c_str(), -1) );
// clear data we don't need anymore
m_iv.clear();
}
}
CryptoStream::~CryptoStream()
{
EVP_CIPHER_CTX_cleanup(&m_ctx);
}
void CryptoStream::close(CloseType type)
{
if (!m_eof && (type == Stream::WRITE || type == BOTH)) {
finalize();
m_eof = true;
}
if (ownsParent())
parent()->close(type);
}
size_t CryptoStream::read(Buffer &out, size_t len)
{
MORDOR_ASSERT( m_dir == READ );
size_t copied = 0;
for(;;)
{
// copy out [de]crypted data
size_t to_copy = (std::min)(m_buf.readAvailable(), len - copied);
if (to_copy > 0) {
out.copyIn(m_buf, to_copy);
m_buf.consume(to_copy);
copied += to_copy;
}
if (m_eof || copied == len)
return copied;
MORDOR_ASSERT( m_buf.readAvailable() == 0 );
// m_tmp has no content between calls; it's a member variable
// solely to reduce allocations/deallocations
MORDOR_ASSERT( m_tmp.readAvailable() == 0 );
size_t to_read = len - copied;
// make sure to read enough that we can make progress
to_read = (std::max)(to_read, 2 * m_blocksize + m_iv_to_extract);
while(to_read > 0) {
size_t read = parent()->read(m_tmp, to_read);
if (read == 0)
break;
to_read -= read;
}
// initialize the IV, if we haven't done that yet
if (m_iv_to_extract > 0) {
if (m_tmp.readAvailable() < m_iv_to_extract)
MORDOR_THROW_EXCEPTION(OpenSSLException("missing iv"));
m_iv.assign( (char *)m_tmp.readBuffer(m_iv_to_extract, true).iov_base,
m_iv_to_extract );
m_tmp.consume(m_iv_to_extract);
m_iv_to_extract = 0;
init_iv();
}
// encrypt/decrypt some data
cipher(m_tmp, m_buf, m_tmp.readAvailable());
m_tmp.consume(m_tmp.readAvailable());
// check for EOF
if (m_buf.readAvailable() == 0) {
final(m_buf);
m_eof = true;
}
}
}
// ciphers len bytes from src, skipping over skip bytes at the front of the buffer
size_t CryptoStream::cipher(const Buffer &src, Buffer &dst, size_t len, size_t skip)
{
MORDOR_ASSERT(skip <= len);
len -= skip;
if (len == 0)
return 0;
int outlen = static_cast<int>(len) + m_blocksize;
SSL_CHECK(EVP_CipherUpdate(&m_ctx,
(unsigned char *)dst.writeBuffer(len + m_blocksize, true).iov_base, &outlen,
(unsigned char *)src.readBuffer(len + skip, true).iov_base + skip, static_cast<int>(len)));
dst.produce(outlen);
return outlen;
}
// finalizes the cipher and writes the last few bytes to dst
size_t CryptoStream::final(Buffer &dst)
{
int outlen = m_blocksize;
SSL_CHECK(EVP_CipherFinal(&m_ctx,
(unsigned char *)dst.writeBuffer(m_blocksize, true).iov_base, &outlen));
dst.produce(outlen);
return outlen;
}
// writes and consumes entire buffer
void CryptoStream::write_buffer(Buffer &buffer)
{
size_t to_write = buffer.readAvailable();
while(to_write > 0) {
size_t written = parent()->write(buffer, to_write);
buffer.consume(written);
to_write -= written;
}
}
size_t CryptoStream::write(const Buffer &buffer, size_t len)
{
MORDOR_ASSERT( m_dir == WRITE );
size_t iv_skip = 0;
if (m_iv_to_extract > 0) {
// seed the IV, if we haven't done so yet
MORDOR_ASSERT(m_op == DECRYPT);
iv_skip = (std::min)(m_iv_to_extract, len);
m_buf.copyIn(buffer, iv_skip);
m_iv_to_extract -= iv_skip;
if (m_iv_to_extract > 0)
return len; // don't have the whole IV yet
// now we have an IV, so we can initialize the cipher
size_t iv_len = static_cast<size_t>(EVP_CIPHER_CTX_iv_length(&m_ctx));
MORDOR_ASSERT(m_buf.readAvailable() == iv_len);
m_iv.assign((char *)m_buf.readBuffer(iv_len, true).iov_base, iv_len);
m_buf.clear();
init_iv();
if (iv_skip == len)
return len; // have the IV but no payload yet
} else if (m_buf.readAvailable() > 0) {
// write the IV, if we haven't done so yet
MORDOR_ASSERT(m_op == ENCRYPT);
write_buffer(m_buf);
MORDOR_ASSERT( m_buf.readAvailable() == 0 );
}
// now cipher and write the payload
MORDOR_ASSERT( m_tmp.readAvailable() == 0 );
cipher(buffer, m_tmp, len, iv_skip);
write_buffer(m_tmp);
MORDOR_ASSERT( m_tmp.readAvailable() == 0 );
return len;
}
void CryptoStream::finalize()
{
if (!m_eof && m_dir == WRITE) {
// if we're encrypting, and we haven't written an IV
// (i.e., because the user never called write(),
// because the file is empty) then do that now
if (m_buf.readAvailable() > 0) {
MORDOR_ASSERT(m_op == ENCRYPT);
write_buffer(m_buf);
MORDOR_ASSERT( m_buf.readAvailable() == 0 );
}
// finalize the cipher (if we actually finished initializing it;
// if the caller never wrote the ciphertext with the leading IV,
// then we never even started)
if (m_iv_to_extract == 0) {
MORDOR_ASSERT( m_tmp.readAvailable() == 0 );
final(m_tmp);
write_buffer(m_tmp);
MORDOR_ASSERT( m_tmp.readAvailable() == 0 );
}
m_eof = true;
}
}
}
|
/***************************************************************************
I'm in no mood to write documentation now, so if you have any questions
about this driver, please address them to Pat Lawrence <pjl@ns.net>. I'll
be happy to help you out any way I can.
Crystal Castles memory map.
Address A A A A A A A A A A A A A A A A R D D D D D D D D Function
1 1 1 1 1 1 9 8 7 6 5 4 3 2 1 0 / 7 6 5 4 3 2 1 0
5 4 3 2 1 0 W
-------------------------------------------------------------------------------
0000 X X X X X X X X X X X X X X X X W X X X X X X X X X Coordinate
0001 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 W D D D D D D D D Y Coordinate
0002 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 R/W D D D D Bit Mode
0003-0BFF 0 0 0 0 A A A A A A A A A A A A R/W D D D D D D D D RAM (DRAM)
0C00-7FFF 0 A A A A A A A A A A A A A A A R/W D D D D D D D D Screen RAM
8000-8DFF 1 0 0 0 A A A A A A A A A A A A R/W D D D D D D D D RAM (STATIC)
8E00-8EFF 1 0 0 0 1 1 1 0 A A A A A A A A R/W D D D D D D D D MOB BUF 2
-------------------------------------------------------------------------------
8F00-8FFF 1 0 0 0 1 1 1 1 A A A A A A A A R/W D D D D D D D D MOB BUF 1
0 0 R/W D D D D D D D D MOB Picture
0 1 R/W D D D D D D D D MOB Vertical
1 0 R/W D D D D D D D D MOB Priority
1 1 R/W D D D D D D D D MOB Horizontal
-------------------------------------------------------------------------------
9000-90FF 1 0 0 1 0 0 X X A A A A A A A A R/W D D D D D D D D NOVRAM
9400-9401 1 0 0 1 0 1 0 X X X X X X X 0 A R TRAK-BALL 1
9402-9403 1 0 0 1 0 1 0 X X X X X X X 1 A R TRAK-BALL 2
9500-9501 1 0 0 1 0 1 0 X X X X X X X X A R TRAK-BALL 1 mirror
9600 1 0 0 1 0 1 1 X X X X X X X X X R IN0
R D COIN R
R D COIN L
R D COIN AUX
R D SLAM
R D SELF TEST
R D VBLANK
R D JMP1
R D JMP2
-------------------------------------------------------------------------------
9800-980F 1 0 0 1 1 0 0 X X X X X A A A A R/W D D D D D D D D CI/O 0
9A00-9A0F 1 0 0 1 1 0 1 X X X X X A A A A R/W D D D D D D D D CI/O 1
9A08 D D D Option SW
D SPARE
D SPARE
D SPARE
9C00 1 0 0 1 1 1 0 0 0 X X X X X X X W RECALL
-------------------------------------------------------------------------------
9C80 1 0 0 1 1 1 0 0 1 X X X X X X X W D D D D D D D D H Scr Ctr Load
9D00 1 0 0 1 1 1 0 1 0 X X X X X X X W D D D D D D D D V Scr Ctr Load
9D80 1 0 0 1 1 1 0 1 1 X X X X X X X W Int. Acknowledge
9E00 1 0 0 1 1 1 1 0 0 X X X X X X X W WDOG
1 0 0 1 1 1 1 0 1 X X X X A A A W D OUT0
9E80 0 0 0 W D Trak Ball Light P1
9E81 0 0 1 W D Trak Ball Light P2
9E82 0 1 0 W D Store Low
9E83 0 1 1 W D Store High
9E84 1 0 0 W D Spare
9E85 1 0 1 W D Coin Counter R
9E86 1 1 0 W D Coin Counter L
9E87 1 1 1 W D BANK0-BANK1
1 0 0 1 1 1 1 1 0 X X X X A A A W D OUT1
9F00 0 0 0 W D ^AX
9F01 0 0 1 W D ^AY
9F02 0 1 0 W D ^XINC
9F03 0 1 1 W D ^YINC
9F04 1 0 0 W D PLAYER2 (flip screen)
9F05 1 0 1 W D ^SIRE
9F06 1 1 0 W D BOTHRAM
9F07 1 1 1 W D BUF1/^BUF2 (sprite bank)
9F80-9FBF 1 0 0 1 1 1 1 1 1 X A A A A A A W D D D D D D D D COLORAM
A000-FFFF 1 A A A A A A A A A A A A A A A R D D D D D D D D Program ROM
***************************************************************************/
#include "driver.h"
#include "vidhrdw/generic.h"
extern unsigned char *screen_addr;
extern unsigned char *screen_inc;
extern unsigned char *screen_inc_enable;
extern unsigned char *sprite_bank;
extern unsigned char *ccastles_scrollx;
extern unsigned char *ccastles_scrolly;
void ccastles_paletteram_w(int offset,int data);
int ccastles_vh_start(void);
void ccastles_vh_stop(void);
void ccastles_vh_screenrefresh(struct osd_bitmap *bitmap,int full_refresh);
int ccastles_bitmode_r(int offset);
void ccastles_bitmode_w(int offset, int data);
void ccastles_flipscreen_w(int offset,int data);
static void ccastles_led_w(int offset,int data)
{
osd_led_w(offset,~data);
}
static void ccastles_bankswitch_w(int offset, int data)
{
unsigned char *RAM = Machine->memory_region[Machine->drv->cpu[0].memory_region];
if (data) { cpu_setbank(1,&RAM[0x10000]); }
else { cpu_setbank(1,&RAM[0xa000]); }
}
static struct MemoryReadAddress readmem[] =
{
{ 0x0000, 0x0001, MRA_RAM },
{ 0x0002, 0x0002, ccastles_bitmode_r },
{ 0x0003, 0x90ff, MRA_RAM }, /* All RAM */
{ 0x9400, 0x9400, input_port_2_r }, /* trackball y - player 1 */
{ 0x9402, 0x9402, input_port_2_r }, /* trackball y - player 2 */
{ 0x9500, 0x9500, input_port_2_r }, /* trackball y - player 1 mirror */
{ 0x9401, 0x9401, input_port_3_r }, /* trackball x - player 1 */
{ 0x9403, 0x9403, input_port_3_r }, /* trackball x - player 2 */
{ 0x9501, 0x9501, input_port_3_r }, /* trackball x - player 1 mirror */
{ 0x9600, 0x9600, input_port_0_r }, /* IN0 */
{ 0x9800, 0x980f, pokey1_r }, /* Random # generator on a Pokey */
{ 0x9a00, 0x9a0f, pokey2_r }, /* Random #, IN1 */
{ 0xa000, 0xdfff, MRA_BANK1 },
{ 0xe000, 0xffff, MRA_ROM }, /* ROMs/interrupt vectors */
{ -1 } /* end of table */
};
static struct MemoryWriteAddress writemem[] =
{
{ 0x0000, 0x0001, MWA_RAM, &screen_addr },
{ 0x0002, 0x0002, ccastles_bitmode_w },
{ 0x0003, 0x0bff, MWA_RAM },
{ 0x0c00, 0x7fff, MWA_RAM, &videoram },
{ 0x8000, 0x8dff, MWA_RAM },
{ 0x8e00, 0x8eff, MWA_RAM, &spriteram_2, &spriteram_size },
{ 0x8f00, 0x8fff, MWA_RAM, &spriteram },
{ 0x9000, 0x90ff, MWA_RAM }, /* NVRAM */
{ 0x9800, 0x980f, pokey1_w },
{ 0x9a00, 0x9a0f, pokey2_w },
{ 0x9c80, 0x9c80, MWA_RAM, &ccastles_scrollx },
{ 0x9d00, 0x9d00, MWA_RAM, &ccastles_scrolly },
{ 0x9d80, 0x9d80, MWA_NOP },
{ 0x9e00, 0x9e00, MWA_NOP },
{ 0x9e80, 0x9e81, ccastles_led_w },
{ 0x9e85, 0x9e86, MWA_NOP },
{ 0x9e87, 0x9e87, ccastles_bankswitch_w },
{ 0x9f00, 0x9f01, MWA_RAM, &screen_inc_enable },
{ 0x9f02, 0x9f03, MWA_RAM, &screen_inc },
{ 0x9f04, 0x9f04, ccastles_flipscreen_w },
{ 0x9f05, 0x9f06, MWA_RAM },
{ 0x9f07, 0x9f07, MWA_RAM, &sprite_bank },
{ 0x9f80, 0x9fbf, ccastles_paletteram_w },
{ 0xa000, 0xffff, MWA_ROM },
{ -1 } /* end of table */
};
INPUT_PORTS_START( input_ports )
PORT_START /* IN0 */
PORT_BIT ( 0x01, IP_ACTIVE_LOW, IPT_COIN2 )
PORT_BIT ( 0x02, IP_ACTIVE_LOW, IPT_COIN1 )
PORT_BIT ( 0x04, IP_ACTIVE_LOW, IPT_COIN3 )
PORT_BIT ( 0x08, IP_ACTIVE_LOW, IPT_TILT )
PORT_BITX( 0x10, 0x10, IPT_DIPSWITCH_NAME | IPF_TOGGLE, "Service Mode", OSD_KEY_F2, IP_JOY_NONE, 0 )
PORT_DIPSETTING( 0x10, "Off" )
PORT_DIPSETTING( 0x00, "On" )
PORT_BIT ( 0x20, IP_ACTIVE_HIGH, IPT_VBLANK )
PORT_BIT ( 0x40, IP_ACTIVE_LOW, IPT_BUTTON1 ) /* 1p Jump, non-cocktail start1 */
PORT_BIT ( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 | IPF_PLAYER2 ) /* 2p Jump, non-cocktail start2 */
PORT_START /* IN1 */
PORT_BIT ( 0x07, IP_ACTIVE_LOW, IPT_UNKNOWN )
PORT_BIT ( 0x08, IP_ACTIVE_LOW, IPT_START1 ) /* cocktail only */
PORT_BIT ( 0x10, IP_ACTIVE_LOW, IPT_START2 ) /* cocktail only */
PORT_DIPNAME (0x20, 0x00, "Cabinet", IP_KEY_NONE )
PORT_DIPSETTING ( 0x00, "Upright" )
PORT_DIPSETTING ( 0x20, "Cocktail" )
PORT_BIT ( 0xc0, IP_ACTIVE_HIGH, IPT_UNKNOWN )
PORT_START /* IN2 */
PORT_ANALOGX( 0xff, 0x7f, IPT_TRACKBALL_Y | IPF_REVERSE, 10, 0, 0, 0, OSD_KEY_UP, OSD_KEY_DOWN, OSD_JOY_UP, OSD_JOY_DOWN, 30 )
PORT_START /* IN3 */
PORT_ANALOGX( 0xff, 0x7f, IPT_TRACKBALL_X, 10, 0, 0, 0, OSD_KEY_LEFT, OSD_KEY_RIGHT, OSD_JOY_LEFT, OSD_JOY_RIGHT, 30 )
INPUT_PORTS_END
static struct GfxLayout ccastles_spritelayout =
{
8,16, /* 8*16 sprites */
256, /* 256 sprites */
4, /* 4 bits per pixel (the most significant bit is always 0) */
{ 0x2000*8+0, 0x2000*8+4, 0, 4 }, /* the three bitplanes are separated */
{ 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 },
{ 0*16, 1*16, 2*16, 3*16, 4*16, 5*16, 6*16, 7*16,
8*16, 9*16, 10*16, 11*16, 12*16, 13*16, 14*16, 15*16 },
32*8 /* every sprite takes 32 consecutive bytes */
};
/* there's nothing here, this is just a placeholder to let the video hardware */
/* pick the background color table. */
static struct GfxLayout fakelayout =
{
1,1,
0,
4, /* 4 bits per pixel */
{ 0 },
{ 0 },
{ 0 },
0
};
static struct GfxDecodeInfo gfxdecodeinfo[] =
{
{ 1, 0x0000, &ccastles_spritelayout, 0, 1 },
{ 0, 0, &fakelayout, 16, 1 },
{ -1 } /* end of array */
};
static struct POKEYinterface pokey_interface =
{
2, /* 2 chips */
1250000, /* 1.25 MHz??? */
50,
POKEY_DEFAULT_GAIN,
NO_CLIP,
/* The 8 pot handlers */
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
/* The allpot handler */
{ 0, input_port_1_r },
};
static struct MachineDriver machine_driver =
{
/* basic machine hardware */
{
{
CPU_M6502,
1500000, /* 1.5 Mhz */
0,
readmem,writemem,0,0,
interrupt,4
}
},
60, DEFAULT_REAL_60HZ_VBLANK_DURATION, /* frames per second, vblank duration */
1, /* single CPU, no need for interleaving */
0,
256, 232, { 0, 255, 0, 231 },
gfxdecodeinfo,
32, 32,
0,
VIDEO_TYPE_RASTER|VIDEO_MODIFIES_PALETTE,
0,
ccastles_vh_start,
ccastles_vh_stop,
ccastles_vh_screenrefresh,
/* sound hardware */
0,0,0,0,
{
{
SOUND_POKEY,
&pokey_interface
}
}
};
/***************************************************************************
Game driver(s)
***************************************************************************/
ROM_START(ccastles_rom)
ROM_REGION(0x14000) /* 64k for code */
ROM_LOAD( "ccastles.303", 0xA000, 0x2000, 0x10e39fce )
ROM_LOAD( "ccastles.304", 0xC000, 0x2000, 0x74510f72 )
ROM_LOAD( "ccastles.305", 0xE000, 0x2000, 0x9418cf8a )
ROM_LOAD( "ccastles.102", 0x10000, 0x2000, 0xf6ccfbd4 ) /* Bank switched ROMs */
ROM_LOAD( "ccastles.101", 0x12000, 0x2000, 0xe2e17236 ) /* containing level data. */
ROM_REGION_DISPOSE(0x4000) /* temporary space for graphics */
ROM_LOAD( "ccastles.107", 0x0000, 0x2000, 0x39960b7d )
ROM_LOAD( "ccastles.106", 0x2000, 0x2000, 0x9d1d89fc )
ROM_END
ROM_START(ccastle2_rom)
ROM_REGION(0x14000) /* 64k for code */
ROM_LOAD( "ccastles.203", 0xA000, 0x2000, 0x348a96f0 )
ROM_LOAD( "ccastles.204", 0xC000, 0x2000, 0xd48d8c1f )
ROM_LOAD( "ccastles.205", 0xE000, 0x2000, 0x0e4883cc )
ROM_LOAD( "ccastles.102", 0x10000, 0x2000, 0xf6ccfbd4 ) /* Bank switched ROMs */
ROM_LOAD( "ccastles.101", 0x12000, 0x2000, 0xe2e17236 ) /* containing level data. */
ROM_REGION_DISPOSE(0x4000) /* temporary space for graphics */
ROM_LOAD( "ccastles.107", 0x0000, 0x2000, 0x39960b7d )
ROM_LOAD( "ccastles.106", 0x2000, 0x2000, 0x9d1d89fc )
ROM_END
static int hiload(void)
{
/* Read the NVRAM contents from disk */
/* No check necessary */
void *f;
unsigned char *RAM = Machine->memory_region[Machine->drv->cpu[0].memory_region];
if ((f = osd_fopen(Machine->gamedrv->name,0,OSD_FILETYPE_HIGHSCORE,0)) != 0)
{
osd_fread(f,&RAM[0x9000],0x100);
osd_fclose(f);
}
return 1;
}
static void hisave(void)
{
void *f;
unsigned char *RAM = Machine->memory_region[Machine->drv->cpu[0].memory_region];
if ((f = osd_fopen(Machine->gamedrv->name,0,OSD_FILETYPE_HIGHSCORE,1)) != 0)
{
osd_fwrite(f,&RAM[0x9000],0x100);
osd_fclose(f);
}
}
struct GameDriver ccastles_driver =
{
__FILE__,
0,
"ccastles",
"Crystal Castles (set 1)",
"1983",
"Atari",
"Pat Lawrence\nChris Hardy\nSteve Clynes\nNicola Salmoria\nBrad Oliver",
0,
&machine_driver,
0,
ccastles_rom,
0, 0,
0,
0, /* sound_prom */
input_ports,
0, 0, 0,
ORIENTATION_DEFAULT,
hiload, hisave
};
struct GameDriver ccastle2_driver =
{
__FILE__,
&ccastles_driver,
"ccastle2",
"Crystal Castles (set 2)",
"1983",
"Atari",
"Pat Lawrence\nChris Hardy\nSteve Clynes\nNicola Salmoria\nBrad Oliver",
0,
&machine_driver,
0,
ccastle2_rom,
0, 0,
0,
0, /* sound_prom */
input_ports,
0, 0, 0,
ORIENTATION_DEFAULT,
hiload, hisave
};
|
#include "Isis.h"
#include "Application.h"
#include "IString.h"
#include "Portal.h"
#include "ProcessMosaic.h"
#include "SpecialPixel.h"
#include "Table.h"
using namespace Isis;
using namespace std;
void testIn(int iss, int isl, int isb, int ins = 0, int inl = 0, int inb = 0);
void testOut(int piSamples, int piLines, int piBands, int piPriority,
int originBand);
/**
* Unit test for ProcessMosaic.
*
* Tests for correct area drop, tracking origin, origin band,
* priorities input, mosaic and band, options to allow HS, LS
* and NULL pixels from input to mosaic, each time displaying
* the contents of the input and mosaic pixels for the area
* under consideration
*
* Also tests for exceptions like number of input and output images to
* be exactly one each, band cannot be priority if Track is set off and
* more
*
* @author 2009-10-14 Sharmila Prasad
*
* @internal
* @history 2018-06-06 Jeannie Backer - Removed file paths from error message written to
* test output.
*/
void IsisMain() {
Preference::Preferences(true);
qDebug() << "Testing ProcessMosaic Class ... ";
// Create the default output cube
Process p;
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
// ***********************************************************
// Drop a small area into the middle of the output
qDebug() << "Create output mosaic with Tracking set to True";
qDebug() << "1. Drop a small area into the middle of the output";
ProcessMosaic m1;
m1.SetTrackFlag(true);
m1.SetCreateFlag(true);
m1.SetImageOverlay(ProcessMosaic::PlaceImagesOnTop);
m1.SetInputCube("FROM", 1, 1, 1, 10, 5, 1);
Cube *mosaicCube1 = m1.SetOutputCube("TO");
m1.StartProcess(5, 2, 1); // This should be overwritten by the next StartProcess call
m1.StartProcess(2, 2, 1);
// Test for "Tracking" group in the mosaic cube
if (mosaicCube1->hasGroup("Tracking")) {
qDebug() << "";
qDebug() << "a. SUCCESS - \"Tracking\" Group Exists in [" << mosaicCube1->fileName() << "]";
}
else {
qDebug() << "";
qDebug() << "a. FAILURE - \"Tracking\" Group does not Exist in [" << mosaicCube1->fileName() << "]";
}
// Test for Tracking Table "InputImages" in the tracking cube
QString trackingBase = FileName(mosaicCube1->fileName()).removeExtension().expanded().split("/").last();
Cube *trackingCube1 = new Cube(FileName(trackingBase + "_tracking.cub"));
try {
Table trackTable(ProcessMosaic::TRACKING_TABLE_NAME);
trackingCube1->read(trackTable);
qDebug() << "b. SUCCESS - Track Table Exists in [" << trackingCube1->fileName() << "]";
qDebug().noquote() << Table::toString( trackTable, "\t" );
}
catch (IException&) {
qDebug() << "b. FAILURE - Track Table does not Exist in [" << trackingCube1->fileName() << "]";
}
m1.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(2, 2, 1, ProcessMosaic::PlaceImagesOnTop, 2);
remove("isisMosaic_01.cub");
remove("isisMosaic_01_tracking.cub");
qDebug() << "***********************************************************************************";
// ***********************************************************
// Drop 2,2,1 into the lower right corner of band 2
qDebug() << "2. Drop 2,2,1 into the lower right corner of band 2";
qDebug() << "Tracking is set to False";
ProcessMosaic m2;
m2.SetTrackFlag(false);
m2.SetCreateFlag(true);
m2.SetImageOverlay(ProcessMosaic::PlaceImagesOnTop);
m2.SetInputCube("FROM", 2, 2, 1, -1, -1, -1);
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
m2.SetOutputCube("TO");
m2.StartProcess(4, 4, 2);
m2.EndProcess();
testIn(2, 2, 1, 5, 5, 1);
testOut(4, 4, 1, ProcessMosaic::PlaceImagesOnTop, 0);
remove("isisMosaic_01.cub");
qDebug() << "***********************************************************************************";
// ***********************************************************
// Drop 3,3,1 into the upper right corner of band 1
qDebug() << "3. Drop 3,3,1 into the upper right corner of band 1";
ProcessMosaic m3;
m3.SetTrackFlag(true);
m3.SetCreateFlag(true);
m3.SetImageOverlay(ProcessMosaic::PlaceImagesBeneath);
m3.SetInputCube("FROM", 3, 3, 1, 10, 1, 1);
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
m3.SetOutputCube("TO");
m3.StartProcess(5, 1, 1);
m3.EndProcess();
testIn(3, 3, 1, 5, 5, 1);
testOut(5, 1, 1, ProcessMosaic::PlaceImagesBeneath, 2);
remove("isisMosaic_01.cub");
remove("isisMosaic_01_tracking.cub");
qDebug() << "***********************************************************************************";
// ***********************************************************
// Drop the first 3x3x1 the upper left corner
qDebug() << "4. Drop the first 3x3x1 to the upper left corner";
ProcessMosaic m4;
m4.SetTrackFlag(true);
m4.SetCreateFlag(true);
m4.SetImageOverlay(ProcessMosaic::PlaceImagesBeneath);
m4.SetInputCube("FROM", 1, 1, 1, 3, 3, 1);
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
m4.SetOutputCube("TO");
m4.StartProcess(1, 1, 1);
m4.EndProcess();
testIn(1, 1, 1, 3, 3, 1);
testOut(1, 1, 1, ProcessMosaic::PlaceImagesBeneath, 2);
qDebug() << "***********************************************************************************";
// Test for mosaic(beneath) priority
qDebug() << "5. Test for mosaic priority with existing mosaic";
ProcessMosaic m5;
m5.SetImageOverlay(ProcessMosaic::PlaceImagesBeneath);
m5.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m5.SetOutputCube("TO");
m5.StartProcess(1, 2, 1);
m5.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(1, 2, 1, ProcessMosaic::PlaceImagesBeneath, 2);
qDebug() << "***********************************************************************************";
// ***********************************************************
// Test for band priority using Keywords for band id
qDebug() << "6. Test for band priority with Keyname \"FilterName\" and value \"Red\" with "
"Criteria \"Greater\" than in an existing mosaic";
ProcessMosaic m6;
m6.SetTrackFlag(true);
m6.SetCreateFlag(true);
m6.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m6.SetBandKeyword("FilterName", "red");
m6.SetBandUseMaxValue(true);
m6.SetInputCube("FROM", 3, 3, 1, 10, 1, 1);
//p.SetOutputCube("TO", 5, 5, 3);
//p.EndProcess();
m6.SetOutputCube("TO");
m6.StartProcess(1, 1, 1);
m6.EndProcess();
testIn(3, 3, 1, 10, 1, 1);
testOut(1, 1, 1, ProcessMosaic::UseBandPlacementCriteria, 2);
qDebug() << "***********************************************************************************";
qDebug() << "7. Test for band priority for existing mosaic with Keyname \"OriginalBand\" and "
"value \"1\" and Criteria \"Lesser\" than";
ProcessMosaic m7;
m7.SetTrackFlag(true);
m7.SetCreateFlag(false);
m7.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m7.SetBandKeyword("OriginalBand", "1");
m7.SetBandUseMaxValue(false);
m7.SetHighSaturationFlag(false);
m7.SetLowSaturationFlag(false);
m7.SetNullFlag(false);
m7.SetInputCube("FROM", 1, 1, 1, 10, 1, 1);
m7.SetOutputCube("TO");
m7.StartProcess(1, 1, 1);
m7.EndProcess();
testIn(1, 1, 1, 10, 1, 1);
testOut(1, 1, 1, ProcessMosaic::UseBandPlacementCriteria, 2);
qDebug() << "***********************************************************************************";
// ***********************************************************
// Test for band priority using Band Number
qDebug() << "8. Test for band priority with existing mosaic and BandNumber set";
ProcessMosaic m8;
m8.SetTrackFlag(true);
m8.SetCreateFlag(false);
m8.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m8.SetBandNumber(1);
m8.SetBandUseMaxValue(false);
m8.SetHighSaturationFlag(true);
m8.SetLowSaturationFlag(false);
m8.SetNullFlag(false);
m8.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m8.SetOutputCube("TO");
m8.StartProcess(1, 3, 1);
m8.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(1, 3, 1, ProcessMosaic::UseBandPlacementCriteria, 2);
qDebug() << "***********************************************************************************";
// ***********************************************************
// Test for HS value set with existing mosaic
qDebug() << "9. Test for Null flag set with existing mosaic";
ProcessMosaic m9;
m9.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m9.SetBandNumber(1);
m9.SetBandUseMaxValue(true);
m9.SetHighSaturationFlag(false);
m9.SetLowSaturationFlag(false);
m9.SetNullFlag(true);
m9.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m9.SetOutputCube("TO");
m9.StartProcess(1, 2, 1);
m9.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(1, 2, 1, ProcessMosaic::UseBandPlacementCriteria, 2);
remove("isisMosaic_01.cub");
remove("isisMosaic_01_tracking.cub");
// ***********************************************************
// Test Average Priority
qDebug() << "";
qDebug() << "10. Test Average Priority";
// Create the default output cube
p.SetOutputCube("TO_AVG", 5, 5, 2);
p.EndProcess();
ProcessMosaic m10;
m10.SetTrackFlag(false);
m10.SetImageOverlay(ProcessMosaic::AverageImageWithMosaic);
m10.SetCreateFlag(true);
m10.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m10.SetOutputCube("TO_AVG");
m10.StartProcess(1, 1, 1);
m10.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(1, 1, 1, ProcessMosaic::AverageImageWithMosaic, 0);
m10.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m10.SetOutputCube("TO_AVG");
m10.SetTrackFlag(false);
m10.SetImageOverlay(ProcessMosaic::AverageImageWithMosaic);
m10.SetCreateFlag(false);
m10.StartProcess(-1, -1, 1);
m10.EndProcess();
testOut(1, 1, 1, ProcessMosaic::AverageImageWithMosaic, 0);
remove("isisMosaic_02.cub");
qDebug() << "****** End Average **********************";
// Test for band priority using Band Number
qDebug() << "11. Test for band priority with Tracking Off and BandNumber set";
ProcessMosaic m11;
m11.SetTrackFlag(false);
m11.SetCreateFlag(true);
m11.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m11.SetBandNumber(1);
m11.SetBandUseMaxValue(false);
m11.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
m11.SetOutputCube("TO");
m11.StartProcess(1, 3, 1);
m11.EndProcess();
testIn(1, 1, 1, 5, 5, 1);
testOut(1, 3, 1, ProcessMosaic::UseBandPlacementCriteria, 0);
ProcessMosaic m12;
m12.SetTrackFlag(false);
m12.SetCreateFlag(true);
m12.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m12.SetBandNumber(1);
m12.SetBandUseMaxValue(false);
m12.SetInputCube("FROM", 1, 1, 1, 5, 5, 1);
m12.SetOutputCube("TO");
m12.StartProcess(1, 1, 1);
m12.EndProcess();
testOut(1, 1, 1, ProcessMosaic::UseBandPlacementCriteria, 0);
qDebug() << "********* Test imagePositions() ********";
for (int i = 0; i <= m11.imagePositions().groups() - 1; i++) {
qDebug() << "Name: " << m11.imagePositions().group(i).name();
qDebug() << "File: " << FileName(m11.imagePositions().group(i).findKeyword("File")[0]).name();
qDebug() << "StartSample: " << m11.imagePositions().group(i).findKeyword("StartSample")[0];
qDebug() << "StartLine: " << m11.imagePositions().group(i).findKeyword("StartLine")[0];
}
qDebug() << "***********************************************************************************";
// ***********************************************************
// Testing Errors
// Try to open two input cubes
qDebug() << "";
qDebug() << "*** Test Error Handling ***";
qDebug() << "Test multiple input error";
try {
ProcessMosaic m;
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
// ***********************************************************
// Try to open two output cubes
qDebug() << "Test multiple output error";
try {
ProcessMosaic m;
m.SetOutputCube("TO");
m.SetOutputCube("TO");
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
// ***********************************************************
// Drop the input completly outside the output
qDebug() << "Test input does not overlap mosaic";
try {
ProcessMosaic m;
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.SetOutputCube("TO");
m.StartProcess(-20, 0, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
qDebug() << "Test input does not overlap mosaic";
try {
ProcessMosaic m;
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.SetOutputCube("TO");
m.StartProcess(54, 23, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
// ***********************************************************
// Don't open an input cube
qDebug() << "Test no input cube";
try {
ProcessMosaic m;
m.SetOutputCube("TO");
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
//***********************************************************
// Don't open an output cube
qDebug() << "Test no output cube";
try {
ProcessMosaic m;
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
//***********************************************************
// Band cannot be a priority if Track is not set
qDebug() << "Test Band cannot be a priority if Track is not set";
try {
ProcessMosaic m;
m.SetTrackFlag(false);
m.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m.SetBandNumber(1);
m.SetOutputCube("TO");
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
// ***********************************************************
// Test tracking with ontop priotirty and multiple bands
qDebug() << "Test tracking with ontop priotirty and multiple bands";
try {
ProcessMosaic m;
m.SetTrackFlag(true);
m.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m.SetBandNumber(10);
m.SetOutputCube("TO");
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
// ***********************************************************
// Test Band not found with Band as Priority
qDebug() << "Test Band not found with Band as Priority";
try {
ProcessMosaic m;
m.SetTrackFlag(false);
m.SetImageOverlay(ProcessMosaic::UseBandPlacementCriteria);
m.SetBandNumber(10);
m.SetOutputCube("TO");
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
e.print();
p.EndProcess();
qDebug() << "";
}
remove("isisMosaic_01.cub");
remove("isisMosaic_01_tracking.cub");
// ***********************************************************
// Testing errors that can occur
qDebug() << "***********************************************************************************";
qDebug() << "Test Pvl Group [BandBin] for mismatch between input cube and established mosaic";
qDebug() << " Create output mosaic";
qDebug() << " Modify Group [BandBin] so it will differ";
qDebug() << " Mosaic the same cube to verify proper error is thrown";
p.SetOutputCube("TO", 5, 5, 1);
p.EndProcess();
ProcessMosaic m13;
m13.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
Cube *c = m13.SetOutputCube("TO");
m13.StartProcess(1, 1, 1);
Pvl *pvl = c->label();
PvlKeyword &key = pvl->findKeyword("OriginalBand", Pvl::Traverse);
key[0] = "3";
m13.EndProcess();
try {
ProcessMosaic m;
m.SetOutputCube("TO");
m.SetInputCube("FROM", 1, 1, 1, -1, -1, -1);
m.StartProcess(1, 1, 1);
m.EndProcess();
}
catch (IException &e) {
QString message = e.toString();
qDebug().noquote() << message.replace(QRegExp("cube.*base/unitTestData"), "cube [base/unitTestData");
p.EndProcess();
qDebug() << "";
}
remove("isisMosaic_01.cub");
}
/**
* Display the contents of Input image with starting and number of
* samples, lines,bands
*
* @author sprasad (10/14/2009)
*
* @param iss - input starting sample
* @param isl - input starting line
* @param isb - input starting band
* @param ins - input number of samples
* @param inl - input number of lines
* @param inb - input number of bands
*/
void testIn(int iss, int isl, int isb, int ins, int inl, int inb) {
Cube cInCube;
UserInterface &ui = Isis::Application::GetUserInterface();
QString sFrom = ui.GetFileName("FROM");
cInCube.open(sFrom);
qDebug() << "";
qDebug() << "*** Input Image *** ";
if (ins == 0) ins = cInCube.sampleCount() - iss + 1;
if (inl == 0) inl = cInCube.lineCount() - isl + 1;
if (inb == 0) inb = cInCube.bandCount() - isb + 1;
qDebug() << "Stats " << isl << ", " << iss << ", " << isb << ", "
<< inl << ", " << ins << ", " << inb;
int iS;
Portal ciPortal(ins, 1, cInCube.pixelType());
for (int band = isb; band <= (isb + inb - 1); band++) {
for (int line = isl; line <= (isl + inl - 1); line++) {
iS = iss;
ciPortal.SetPosition(iss, line, band); //sample, line, band position
cInCube.read(ciPortal);
for (int iPixel = 0; iPixel < ciPortal.size(); iPixel++) {
if (iPixel == 5) {
qDebug() << "";
}
qDebug() << "(" << Isis::toString(line) << "," << Isis::toString(iS++) << ","
<< Isis::toString(band) << ")=" << Isis::toString((int)ciPortal[iPixel]);
}
qDebug() << "";
}
qDebug() << "";
}
cInCube.close();
}
/**
* Display the contents of Ouput image and display the sample, line and band
* stats for the mosaic being tested
*
* @author sprasad (10/14/2009)
*
* @param iss - input starting sample
* @param isl - input starting line
* @param isb - input starting band
*/
void testOut(int piSamples, int piLines,
int piBands, int piPriority, int originBand) {
Cube cOutCube;
Cube trackingCube;
UserInterface &ui = Isis::Application::GetUserInterface();
QString sTo;
if (piPriority == ProcessMosaic::AverageImageWithMosaic)
sTo = ui.GetFileName("TO_AVG");
else
sTo = ui.GetFileName("TO");
cOutCube.open(sTo);
int iBands = cOutCube.bandCount();
qDebug() << "";
qDebug() << "*** Mosaic Image *** ";
qDebug() << "Start Stats " << Isis::toString(piLines) << ", " << Isis::toString(piSamples)
<< ", " << Isis::toString(piBands);
qDebug() << "Total Bands=" << Isis::toString(iBands);
Portal coPortal(5, 1, cOutCube.pixelType());
int band = piBands;
while (band <= iBands) {
for (int line = 1; line <= 5; line++) {
coPortal.SetPosition(1, line, band); //sample, line, band position
cOutCube.read(coPortal);
for (int iPixel = 0; iPixel < coPortal.size(); iPixel++) {
int iDefault = 0;
int iFileIndexOffset = 0;
switch (SizeOf(cOutCube.pixelType())) {
case 1:
iDefault = NULL1;
iFileIndexOffset = -VALID_MIN1;
break;
case 2:
iDefault = NULL2;
iFileIndexOffset = -VALID_MIN2;
break;
case 4:
iDefault = INULL4;
iFileIndexOffset = -ProcessMosaic::FLOAT_STORE_INT_PRECISELY_MIN_VALUE;
break;
}
int iFileIndex = 0;
if (piPriority != ProcessMosaic::AverageImageWithMosaic && band == originBand &&
coPortal[iPixel] != iDefault) {
iFileIndex = (int)coPortal[iPixel] + iFileIndexOffset + 1;
}
if (band == originBand && piPriority != ProcessMosaic::AverageImageWithMosaic) {//orig band
qDebug() << "(" << Isis::toString(line) << "," << Isis::toString(iPixel + 1)
<< "," << Isis::toString(band) << ")=" << Isis::toString((int)coPortal[iPixel])
<< ", " << iFileIndex;
}
else {
qDebug() << "(" << Isis::toString(line) << "," << Isis::toString(iPixel + 1)
<< "," << Isis::toString(band) << ")=" << Isis::toString((int)coPortal[iPixel]);
}
}
qDebug() << "";
}
qDebug() << "";
band++;
if (band > iBands) {
break;
}
}
// Test the tracking cube
if (cOutCube.hasGroup("Tracking")) {
qDebug() << "";
qDebug() << "*** Tracking Cube *** ";
QString trackingBase = FileName(cOutCube.fileName()).removeExtension().expanded().split("/").last();
trackingCube.open(trackingBase + "_tracking.cub");
Portal trackingPortal(5, 1, trackingCube.pixelType());
for (int line = 1; line <= 5; line++) {
trackingPortal.SetPosition(1, line, 1); //sample, line, band position
trackingCube.read(trackingPortal);
for (int iPixel = 0; iPixel < trackingPortal.size(); iPixel++) {
QString pixelString;
QString fileIndex;
if (IsSpecial(trackingPortal[iPixel])) {
if (trackingPortal[iPixel] == Isis::Null) {
pixelString = "Null";
}
else if (trackingPortal[iPixel] == Isis::Lrs) {
pixelString = "Lrs";
}
else if (trackingPortal[iPixel] == Isis::Lis) {
pixelString = "Lis";
}
else if (trackingPortal[iPixel] == Isis::Hrs) {
pixelString = "Hrs";
}
else if (trackingPortal[iPixel] == Isis::His) {
pixelString = "His";
}
else {
pixelString = "Unknown";
}
fileIndex = "Unknown";
}
else {
pixelString = Isis::toString((unsigned int)trackingPortal[iPixel]);
fileIndex = Isis::toString((unsigned int)trackingPortal[iPixel] - 2);
}
qDebug() << "(" << Isis::toString(line)
<< "," << Isis::toString(iPixel + 1)
<< ")=" << pixelString
<< ", " << fileIndex;
}
qDebug() << "";
}
qDebug() << "";
}
cOutCube.close();
trackingCube.close();
}
|
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <numeric>
// UNSUPPORTED: c++98, c++03, c++11, c++14
// template<class InputIterator, class OutputIterator, class T, class BinaryOperation>
// OutputIterator
// inclusive_scan(InputIterator first, InputIterator last,
// OutputIterator result,
// BinaryOperation binary_op); // C++17
#include <numeric>
#include <vector>
#include <cassert>
#include <iostream>
#include "test_iterators.h"
template <class Iter1, class T, class Op, class Iter2>
void
test(Iter1 first, Iter1 last, Op op, Iter2 rFirst, Iter2 rLast)
{
std::vector<typename std::iterator_traits<Iter1>::value_type> v;
// Not in place
std::inclusive_scan(first, last, std::back_inserter(v), op);
assert(std::equal(v.begin(), v.end(), rFirst, rLast));
// In place
v.clear();
v.assign(first, last);
std::inclusive_scan(v.begin(), v.end(), v.begin(), op);
assert(std::equal(v.begin(), v.end(), rFirst, rLast));
}
template <class Iter>
void
test()
{
int ia[] = {1, 3, 5, 7, 9};
const int pRes[] = {1, 4, 9, 16, 25};
const int mRes[] = {1, 3, 15, 105, 945};
const unsigned sa = sizeof(ia) / sizeof(ia[0]);
static_assert(sa == sizeof(pRes) / sizeof(pRes[0])); // just to be sure
static_assert(sa == sizeof(mRes) / sizeof(mRes[0])); // just to be sure
for (unsigned int i = 0; i < sa; ++i ) {
test(Iter(ia), Iter(ia + i), std::plus<>(), pRes, pRes + i);
test(Iter(ia), Iter(ia + i), std::multiplies<>(), mRes, mRes + i);
}
}
int triangle(int n) { return n*(n+1)/2; }
// Basic sanity
void basic_tests()
{
{
std::vector<int> v(10);
std::fill(v.begin(), v.end(), 3);
std::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<>());
for (size_t i = 0; i < v.size(); ++i)
assert(v[i] == (int)(i+1) * 3);
}
{
std::vector<int> v(10);
std::iota(v.begin(), v.end(), 0);
std::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<>());
for (size_t i = 0; i < v.size(); ++i)
assert(v[i] == triangle(i));
}
{
std::vector<int> v(10);
std::iota(v.begin(), v.end(), 1);
std::inclusive_scan(v.begin(), v.end(), v.begin(), std::plus<>());
for (size_t i = 0; i < v.size(); ++i)
assert(v[i] == triangle(i + 1));
}
{
std::vector<int> v, res;
std::inclusive_scan(v.begin(), v.end(), std::back_inserter(res), std::plus<>());
assert(res.empty());
}
}
int main()
{
basic_tests();
// All the iterator categories
// test<input_iterator <const int*> >();
// test<forward_iterator <const int*> >();
// test<bidirectional_iterator<const int*> >();
// test<random_access_iterator<const int*> >();
// test<const int*>();
// test< int*>();
}
|
/*
* Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "JSDestructibleObject.h"
#include "JSCellInlines.h"
#include "SymbolTable.h"
namespace JSC {
const ClassInfo SharedSymbolTable::s_info = { "SharedSymbolTable", 0, 0, 0, CREATE_METHOD_TABLE(SharedSymbolTable) };
SymbolTableEntry& SymbolTableEntry::copySlow(const SymbolTableEntry& other)
{
ASSERT(other.isFat());
FatEntry* newFatEntry = new FatEntry(*other.fatEntry());
freeFatEntry();
m_bits = bitwise_cast<intptr_t>(newFatEntry);
return *this;
}
void SharedSymbolTable::destroy(JSCell* cell)
{
SharedSymbolTable* thisObject = jsCast<SharedSymbolTable*>(cell);
thisObject->SharedSymbolTable::~SharedSymbolTable();
}
void SymbolTableEntry::freeFatEntrySlow()
{
ASSERT(isFat());
delete fatEntry();
}
bool SymbolTableEntry::couldBeWatched()
{
if (!isFat())
return false;
WatchpointSet* watchpoints = fatEntry()->m_watchpoints.get();
if (!watchpoints)
return false;
return watchpoints->isStillValid();
}
void SymbolTableEntry::attemptToWatch()
{
FatEntry* entry = inflate();
if (!entry->m_watchpoints)
entry->m_watchpoints = adoptRef(new WatchpointSet(InitializedWatching));
}
bool* SymbolTableEntry::addressOfIsWatched()
{
ASSERT(couldBeWatched());
return fatEntry()->m_watchpoints->addressOfIsWatched();
}
void SymbolTableEntry::addWatchpoint(Watchpoint* watchpoint)
{
ASSERT(couldBeWatched());
fatEntry()->m_watchpoints->add(watchpoint);
}
void SymbolTableEntry::notifyWriteSlow()
{
WatchpointSet* watchpoints = fatEntry()->m_watchpoints.get();
if (!watchpoints)
return;
watchpoints->notifyWrite();
}
SymbolTableEntry::FatEntry* SymbolTableEntry::inflateSlow()
{
FatEntry* entry = new FatEntry(m_bits);
m_bits = bitwise_cast<intptr_t>(entry);
return entry;
}
} // namespace JSC
|
#include "intset.h"
#include "utils.h"
__thread thread_log_t* my_log;
node_l_t* new_node_l(skey_t key, svalue_t val, EpochThread epoch){
volatile node_l_t *the_node;
the_node = (node_l_t*)EpochAllocNode(epoch, sizeof(node_l_t));
the_node->key = key;
the_node->val = val;
the_node->next = NULL;
the_node->marked = 0;
_mm_sfence();
return the_node;
}
node_l_t*
new_node_and_set_next_l(skey_t key, svalue_t val, node_l_t* next, int initializing, EpochThread epoch)
{
volatile node_l_t *the_node;
the_node = (node_l_t*)EpochAllocNode(epoch, sizeof(node_l_t));
if (the_node == NULL)
{
perror("malloc @ new_node");
exit(1);
}
the_node->key = key;
the_node->val = val;
the_node->next = next;
the_node->marked = 0;
INIT_LOCK(ND_GET_LOCK(the_node));
write_data_wait((void*)the_node, CACHE_LINES_PER_NV_NODE);
return (node_l_t*) the_node;
}
intset_l_t *set_new_l(EpochThread epoch)
{
intset_l_t *set;
node_l_t *min, *max;
if ((set = (intset_l_t *)malloc(sizeof(intset_l_t))) == NULL)
{
perror("malloc");
exit(1);
}
max = new_node_and_set_next_l(KEY_MAX, 0, NULL, 1, epoch);
/* ssalloc_align_alloc(0); */
min = new_node_and_set_next_l(KEY_MIN, 0, max, 1, epoch);
set->head = min;
#if defined(LL_GLOBAL_LOCK)
set->lock = (volatile ptlock_t*) malloc(sizeof(ptlock_t));
if (set->lock == NULL)
{
perror("malloc");
exit(1);
}
GL_INIT_LOCK(set->lock);
#endif
MEM_BARRIER;
return set;
}
void
bucket_set_init(intset_l_t* set, ptlock_t* lock, EpochThread epoch)
{
EpochStart(epoch);
volatile node_l_t* max = new_node_and_set_next_l(KEY_MAX, 0, NULL, 1, epoch);
write_data_wait((void*)max, CACHE_LINES_PER_NV_NODE);
volatile node_l_t* min = new_node_and_set_next_l(KEY_MIN, 0, max, 1, epoch);
write_data_wait((void*)min, CACHE_LINES_PER_NV_NODE);
set->head = min;
#if defined(LL_GLOBAL_LOCK)
set->lock = lock;
GL_INIT_LOCK(set->lock);
#endif
write_data_wait((void*)set, 1);
MEM_BARRIER;
EpochEnd(epoch);
}
void
node_delete_l(node_l_t *node)
{
/*DESTROY_LOCK(&node->lock);*/
/*#if GC == 1*/
/*ssfree((void*) node);*/
/*#endif*/
}
void set_delete_l(intset_l_t *set)
{
/*node_l_t *node, *next;*/
/*node = set->head;*/
/*while (node != NULL) */
/*{*/
/*next = node->next;*/
/*DESTROY_LOCK(&node->lock);*/
/*[> free(node); <]*/
/*ssfree((void*) node); [> TODO : fix with ssmem <]*/
/*node = next;*/
/*}*/
/*ssfree(set);*/
}
int set_size_l(intset_l_t *set)
{
int size = 0;
node_l_t *node;
/* We have at least 2 elements */
node = set->head->next;
while (node->next != NULL)
{
size++;
node = node->next;
}
return size;
}
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <string>
// int compare(size_type pos, size_type n1, const charT *s) const;
// When back-deploying to macosx10.7, the RTTI for exception classes
// incorrectly provided by libc++.dylib is mixed with the one in
// libc++abi.dylib and exceptions are not caught properly.
// XFAIL: with_system_cxx_lib=macosx10.7
#include <string.hxx>
#include <stdexcept.hxx>
#include <cassert.hxx>
#include "min_allocator.h"
#include "test_macros.h"
int sign(int x)
{
if (x == 0)
return 0;
if (x < 0)
return -1;
return 1;
}
template <class S>
void
test(const S& s, typename S::size_type pos1, typename S::size_type n1,
const typename S::value_type* str, int x)
{
if (pos1 <= s.size())
assert(sign(s.compare(pos1, n1, str)) == sign(x));
#ifndef TEST_HAS_NO_EXCEPTIONS
else
{
try
{
TEST_IGNORE_NODISCARD s.compare(pos1, n1, str);
assert(false);
}
catch (std::out_of_range&)
{
assert(pos1 > s.size());
}
}
#endif
}
template <class S>
void test0()
{
test(S(""), 0, 0, "", 0);
test(S(""), 0, 0, "abcde", -5);
test(S(""), 0, 0, "abcdefghij", -10);
test(S(""), 0, 0, "abcdefghijklmnopqrst", -20);
test(S(""), 0, 1, "", 0);
test(S(""), 0, 1, "abcde", -5);
test(S(""), 0, 1, "abcdefghij", -10);
test(S(""), 0, 1, "abcdefghijklmnopqrst", -20);
test(S(""), 1, 0, "", 0);
test(S(""), 1, 0, "abcde", 0);
test(S(""), 1, 0, "abcdefghij", 0);
test(S(""), 1, 0, "abcdefghijklmnopqrst", 0);
test(S("abcde"), 0, 0, "", 0);
test(S("abcde"), 0, 0, "abcde", -5);
test(S("abcde"), 0, 0, "abcdefghij", -10);
test(S("abcde"), 0, 0, "abcdefghijklmnopqrst", -20);
test(S("abcde"), 0, 1, "", 1);
test(S("abcde"), 0, 1, "abcde", -4);
test(S("abcde"), 0, 1, "abcdefghij", -9);
test(S("abcde"), 0, 1, "abcdefghijklmnopqrst", -19);
test(S("abcde"), 0, 2, "", 2);
test(S("abcde"), 0, 2, "abcde", -3);
test(S("abcde"), 0, 2, "abcdefghij", -8);
test(S("abcde"), 0, 2, "abcdefghijklmnopqrst", -18);
test(S("abcde"), 0, 4, "", 4);
test(S("abcde"), 0, 4, "abcde", -1);
test(S("abcde"), 0, 4, "abcdefghij", -6);
test(S("abcde"), 0, 4, "abcdefghijklmnopqrst", -16);
test(S("abcde"), 0, 5, "", 5);
test(S("abcde"), 0, 5, "abcde", 0);
test(S("abcde"), 0, 5, "abcdefghij", -5);
test(S("abcde"), 0, 5, "abcdefghijklmnopqrst", -15);
test(S("abcde"), 0, 6, "", 5);
test(S("abcde"), 0, 6, "abcde", 0);
test(S("abcde"), 0, 6, "abcdefghij", -5);
test(S("abcde"), 0, 6, "abcdefghijklmnopqrst", -15);
test(S("abcde"), 1, 0, "", 0);
test(S("abcde"), 1, 0, "abcde", -5);
test(S("abcde"), 1, 0, "abcdefghij", -10);
test(S("abcde"), 1, 0, "abcdefghijklmnopqrst", -20);
test(S("abcde"), 1, 1, "", 1);
test(S("abcde"), 1, 1, "abcde", 1);
test(S("abcde"), 1, 1, "abcdefghij", 1);
test(S("abcde"), 1, 1, "abcdefghijklmnopqrst", 1);
test(S("abcde"), 1, 2, "", 2);
test(S("abcde"), 1, 2, "abcde", 1);
test(S("abcde"), 1, 2, "abcdefghij", 1);
test(S("abcde"), 1, 2, "abcdefghijklmnopqrst", 1);
test(S("abcde"), 1, 3, "", 3);
test(S("abcde"), 1, 3, "abcde", 1);
test(S("abcde"), 1, 3, "abcdefghij", 1);
test(S("abcde"), 1, 3, "abcdefghijklmnopqrst", 1);
test(S("abcde"), 1, 4, "", 4);
test(S("abcde"), 1, 4, "abcde", 1);
test(S("abcde"), 1, 4, "abcdefghij", 1);
test(S("abcde"), 1, 4, "abcdefghijklmnopqrst", 1);
test(S("abcde"), 1, 5, "", 4);
test(S("abcde"), 1, 5, "abcde", 1);
test(S("abcde"), 1, 5, "abcdefghij", 1);
test(S("abcde"), 1, 5, "abcdefghijklmnopqrst", 1);
test(S("abcde"), 2, 0, "", 0);
test(S("abcde"), 2, 0, "abcde", -5);
test(S("abcde"), 2, 0, "abcdefghij", -10);
test(S("abcde"), 2, 0, "abcdefghijklmnopqrst", -20);
test(S("abcde"), 2, 1, "", 1);
test(S("abcde"), 2, 1, "abcde", 2);
test(S("abcde"), 2, 1, "abcdefghij", 2);
test(S("abcde"), 2, 1, "abcdefghijklmnopqrst", 2);
test(S("abcde"), 2, 2, "", 2);
test(S("abcde"), 2, 2, "abcde", 2);
test(S("abcde"), 2, 2, "abcdefghij", 2);
test(S("abcde"), 2, 2, "abcdefghijklmnopqrst", 2);
test(S("abcde"), 2, 3, "", 3);
test(S("abcde"), 2, 3, "abcde", 2);
test(S("abcde"), 2, 3, "abcdefghij", 2);
test(S("abcde"), 2, 3, "abcdefghijklmnopqrst", 2);
test(S("abcde"), 2, 4, "", 3);
test(S("abcde"), 2, 4, "abcde", 2);
test(S("abcde"), 2, 4, "abcdefghij", 2);
test(S("abcde"), 2, 4, "abcdefghijklmnopqrst", 2);
test(S("abcde"), 4, 0, "", 0);
test(S("abcde"), 4, 0, "abcde", -5);
test(S("abcde"), 4, 0, "abcdefghij", -10);
test(S("abcde"), 4, 0, "abcdefghijklmnopqrst", -20);
test(S("abcde"), 4, 1, "", 1);
test(S("abcde"), 4, 1, "abcde", 4);
test(S("abcde"), 4, 1, "abcdefghij", 4);
test(S("abcde"), 4, 1, "abcdefghijklmnopqrst", 4);
test(S("abcde"), 4, 2, "", 1);
test(S("abcde"), 4, 2, "abcde", 4);
test(S("abcde"), 4, 2, "abcdefghij", 4);
test(S("abcde"), 4, 2, "abcdefghijklmnopqrst", 4);
test(S("abcde"), 5, 0, "", 0);
test(S("abcde"), 5, 0, "abcde", -5);
test(S("abcde"), 5, 0, "abcdefghij", -10);
test(S("abcde"), 5, 0, "abcdefghijklmnopqrst", -20);
test(S("abcde"), 5, 1, "", 0);
test(S("abcde"), 5, 1, "abcde", -5);
test(S("abcde"), 5, 1, "abcdefghij", -10);
test(S("abcde"), 5, 1, "abcdefghijklmnopqrst", -20);
}
template <class S>
void test1()
{
test(S("abcde"), 6, 0, "", 0);
test(S("abcde"), 6, 0, "abcde", 0);
test(S("abcde"), 6, 0, "abcdefghij", 0);
test(S("abcde"), 6, 0, "abcdefghijklmnopqrst", 0);
test(S("abcdefghij"), 0, 0, "", 0);
test(S("abcdefghij"), 0, 0, "abcde", -5);
test(S("abcdefghij"), 0, 0, "abcdefghij", -10);
test(S("abcdefghij"), 0, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 0, 1, "", 1);
test(S("abcdefghij"), 0, 1, "abcde", -4);
test(S("abcdefghij"), 0, 1, "abcdefghij", -9);
test(S("abcdefghij"), 0, 1, "abcdefghijklmnopqrst", -19);
test(S("abcdefghij"), 0, 5, "", 5);
test(S("abcdefghij"), 0, 5, "abcde", 0);
test(S("abcdefghij"), 0, 5, "abcdefghij", -5);
test(S("abcdefghij"), 0, 5, "abcdefghijklmnopqrst", -15);
test(S("abcdefghij"), 0, 9, "", 9);
test(S("abcdefghij"), 0, 9, "abcde", 4);
test(S("abcdefghij"), 0, 9, "abcdefghij", -1);
test(S("abcdefghij"), 0, 9, "abcdefghijklmnopqrst", -11);
test(S("abcdefghij"), 0, 10, "", 10);
test(S("abcdefghij"), 0, 10, "abcde", 5);
test(S("abcdefghij"), 0, 10, "abcdefghij", 0);
test(S("abcdefghij"), 0, 10, "abcdefghijklmnopqrst", -10);
test(S("abcdefghij"), 0, 11, "", 10);
test(S("abcdefghij"), 0, 11, "abcde", 5);
test(S("abcdefghij"), 0, 11, "abcdefghij", 0);
test(S("abcdefghij"), 0, 11, "abcdefghijklmnopqrst", -10);
test(S("abcdefghij"), 1, 0, "", 0);
test(S("abcdefghij"), 1, 0, "abcde", -5);
test(S("abcdefghij"), 1, 0, "abcdefghij", -10);
test(S("abcdefghij"), 1, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 1, 1, "", 1);
test(S("abcdefghij"), 1, 1, "abcde", 1);
test(S("abcdefghij"), 1, 1, "abcdefghij", 1);
test(S("abcdefghij"), 1, 1, "abcdefghijklmnopqrst", 1);
test(S("abcdefghij"), 1, 4, "", 4);
test(S("abcdefghij"), 1, 4, "abcde", 1);
test(S("abcdefghij"), 1, 4, "abcdefghij", 1);
test(S("abcdefghij"), 1, 4, "abcdefghijklmnopqrst", 1);
test(S("abcdefghij"), 1, 8, "", 8);
test(S("abcdefghij"), 1, 8, "abcde", 1);
test(S("abcdefghij"), 1, 8, "abcdefghij", 1);
test(S("abcdefghij"), 1, 8, "abcdefghijklmnopqrst", 1);
test(S("abcdefghij"), 1, 9, "", 9);
test(S("abcdefghij"), 1, 9, "abcde", 1);
test(S("abcdefghij"), 1, 9, "abcdefghij", 1);
test(S("abcdefghij"), 1, 9, "abcdefghijklmnopqrst", 1);
test(S("abcdefghij"), 1, 10, "", 9);
test(S("abcdefghij"), 1, 10, "abcde", 1);
test(S("abcdefghij"), 1, 10, "abcdefghij", 1);
test(S("abcdefghij"), 1, 10, "abcdefghijklmnopqrst", 1);
test(S("abcdefghij"), 5, 0, "", 0);
test(S("abcdefghij"), 5, 0, "abcde", -5);
test(S("abcdefghij"), 5, 0, "abcdefghij", -10);
test(S("abcdefghij"), 5, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 5, 1, "", 1);
test(S("abcdefghij"), 5, 1, "abcde", 5);
test(S("abcdefghij"), 5, 1, "abcdefghij", 5);
test(S("abcdefghij"), 5, 1, "abcdefghijklmnopqrst", 5);
test(S("abcdefghij"), 5, 2, "", 2);
test(S("abcdefghij"), 5, 2, "abcde", 5);
test(S("abcdefghij"), 5, 2, "abcdefghij", 5);
test(S("abcdefghij"), 5, 2, "abcdefghijklmnopqrst", 5);
test(S("abcdefghij"), 5, 4, "", 4);
test(S("abcdefghij"), 5, 4, "abcde", 5);
test(S("abcdefghij"), 5, 4, "abcdefghij", 5);
test(S("abcdefghij"), 5, 4, "abcdefghijklmnopqrst", 5);
test(S("abcdefghij"), 5, 5, "", 5);
test(S("abcdefghij"), 5, 5, "abcde", 5);
test(S("abcdefghij"), 5, 5, "abcdefghij", 5);
test(S("abcdefghij"), 5, 5, "abcdefghijklmnopqrst", 5);
test(S("abcdefghij"), 5, 6, "", 5);
test(S("abcdefghij"), 5, 6, "abcde", 5);
test(S("abcdefghij"), 5, 6, "abcdefghij", 5);
test(S("abcdefghij"), 5, 6, "abcdefghijklmnopqrst", 5);
test(S("abcdefghij"), 9, 0, "", 0);
test(S("abcdefghij"), 9, 0, "abcde", -5);
test(S("abcdefghij"), 9, 0, "abcdefghij", -10);
test(S("abcdefghij"), 9, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 9, 1, "", 1);
test(S("abcdefghij"), 9, 1, "abcde", 9);
test(S("abcdefghij"), 9, 1, "abcdefghij", 9);
test(S("abcdefghij"), 9, 1, "abcdefghijklmnopqrst", 9);
test(S("abcdefghij"), 9, 2, "", 1);
test(S("abcdefghij"), 9, 2, "abcde", 9);
test(S("abcdefghij"), 9, 2, "abcdefghij", 9);
test(S("abcdefghij"), 9, 2, "abcdefghijklmnopqrst", 9);
test(S("abcdefghij"), 10, 0, "", 0);
test(S("abcdefghij"), 10, 0, "abcde", -5);
test(S("abcdefghij"), 10, 0, "abcdefghij", -10);
test(S("abcdefghij"), 10, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 10, 1, "", 0);
test(S("abcdefghij"), 10, 1, "abcde", -5);
test(S("abcdefghij"), 10, 1, "abcdefghij", -10);
test(S("abcdefghij"), 10, 1, "abcdefghijklmnopqrst", -20);
test(S("abcdefghij"), 11, 0, "", 0);
test(S("abcdefghij"), 11, 0, "abcde", 0);
test(S("abcdefghij"), 11, 0, "abcdefghij", 0);
test(S("abcdefghij"), 11, 0, "abcdefghijklmnopqrst", 0);
}
template <class S>
void test2()
{
test(S("abcdefghijklmnopqrst"), 0, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 0, 0, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 0, 0, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 0, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 0, 1, "", 1);
test(S("abcdefghijklmnopqrst"), 0, 1, "abcde", -4);
test(S("abcdefghijklmnopqrst"), 0, 1, "abcdefghij", -9);
test(S("abcdefghijklmnopqrst"), 0, 1, "abcdefghijklmnopqrst", -19);
test(S("abcdefghijklmnopqrst"), 0, 10, "", 10);
test(S("abcdefghijklmnopqrst"), 0, 10, "abcde", 5);
test(S("abcdefghijklmnopqrst"), 0, 10, "abcdefghij", 0);
test(S("abcdefghijklmnopqrst"), 0, 10, "abcdefghijklmnopqrst", -10);
test(S("abcdefghijklmnopqrst"), 0, 19, "", 19);
test(S("abcdefghijklmnopqrst"), 0, 19, "abcde", 14);
test(S("abcdefghijklmnopqrst"), 0, 19, "abcdefghij", 9);
test(S("abcdefghijklmnopqrst"), 0, 19, "abcdefghijklmnopqrst", -1);
test(S("abcdefghijklmnopqrst"), 0, 20, "", 20);
test(S("abcdefghijklmnopqrst"), 0, 20, "abcde", 15);
test(S("abcdefghijklmnopqrst"), 0, 20, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 0, 20, "abcdefghijklmnopqrst", 0);
test(S("abcdefghijklmnopqrst"), 0, 21, "", 20);
test(S("abcdefghijklmnopqrst"), 0, 21, "abcde", 15);
test(S("abcdefghijklmnopqrst"), 0, 21, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 0, 21, "abcdefghijklmnopqrst", 0);
test(S("abcdefghijklmnopqrst"), 1, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 1, 1, "", 1);
test(S("abcdefghijklmnopqrst"), 1, 1, "abcde", 1);
test(S("abcdefghijklmnopqrst"), 1, 1, "abcdefghij", 1);
test(S("abcdefghijklmnopqrst"), 1, 1, "abcdefghijklmnopqrst", 1);
test(S("abcdefghijklmnopqrst"), 1, 9, "", 9);
test(S("abcdefghijklmnopqrst"), 1, 9, "abcde", 1);
test(S("abcdefghijklmnopqrst"), 1, 9, "abcdefghij", 1);
test(S("abcdefghijklmnopqrst"), 1, 9, "abcdefghijklmnopqrst", 1);
test(S("abcdefghijklmnopqrst"), 1, 18, "", 18);
test(S("abcdefghijklmnopqrst"), 1, 18, "abcde", 1);
test(S("abcdefghijklmnopqrst"), 1, 18, "abcdefghij", 1);
test(S("abcdefghijklmnopqrst"), 1, 18, "abcdefghijklmnopqrst", 1);
test(S("abcdefghijklmnopqrst"), 1, 19, "", 19);
test(S("abcdefghijklmnopqrst"), 1, 19, "abcde", 1);
test(S("abcdefghijklmnopqrst"), 1, 19, "abcdefghij", 1);
test(S("abcdefghijklmnopqrst"), 1, 19, "abcdefghijklmnopqrst", 1);
test(S("abcdefghijklmnopqrst"), 1, 20, "", 19);
test(S("abcdefghijklmnopqrst"), 1, 20, "abcde", 1);
test(S("abcdefghijklmnopqrst"), 1, 20, "abcdefghij", 1);
test(S("abcdefghijklmnopqrst"), 1, 20, "abcdefghijklmnopqrst", 1);
test(S("abcdefghijklmnopqrst"), 10, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 10, 1, "", 1);
test(S("abcdefghijklmnopqrst"), 10, 1, "abcde", 10);
test(S("abcdefghijklmnopqrst"), 10, 1, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 10, 1, "abcdefghijklmnopqrst", 10);
test(S("abcdefghijklmnopqrst"), 10, 5, "", 5);
test(S("abcdefghijklmnopqrst"), 10, 5, "abcde", 10);
test(S("abcdefghijklmnopqrst"), 10, 5, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 10, 5, "abcdefghijklmnopqrst", 10);
test(S("abcdefghijklmnopqrst"), 10, 9, "", 9);
test(S("abcdefghijklmnopqrst"), 10, 9, "abcde", 10);
test(S("abcdefghijklmnopqrst"), 10, 9, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 10, 9, "abcdefghijklmnopqrst", 10);
test(S("abcdefghijklmnopqrst"), 10, 10, "", 10);
test(S("abcdefghijklmnopqrst"), 10, 10, "abcde", 10);
test(S("abcdefghijklmnopqrst"), 10, 10, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 10, 10, "abcdefghijklmnopqrst", 10);
test(S("abcdefghijklmnopqrst"), 10, 11, "", 10);
test(S("abcdefghijklmnopqrst"), 10, 11, "abcde", 10);
test(S("abcdefghijklmnopqrst"), 10, 11, "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), 10, 11, "abcdefghijklmnopqrst", 10);
test(S("abcdefghijklmnopqrst"), 19, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 19, 1, "", 1);
test(S("abcdefghijklmnopqrst"), 19, 1, "abcde", 19);
test(S("abcdefghijklmnopqrst"), 19, 1, "abcdefghij", 19);
test(S("abcdefghijklmnopqrst"), 19, 1, "abcdefghijklmnopqrst", 19);
test(S("abcdefghijklmnopqrst"), 19, 2, "", 1);
test(S("abcdefghijklmnopqrst"), 19, 2, "abcde", 19);
test(S("abcdefghijklmnopqrst"), 19, 2, "abcdefghij", 19);
test(S("abcdefghijklmnopqrst"), 19, 2, "abcdefghijklmnopqrst", 19);
test(S("abcdefghijklmnopqrst"), 20, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 20, 0, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 20, 0, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 20, 0, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 20, 1, "", 0);
test(S("abcdefghijklmnopqrst"), 20, 1, "abcde", -5);
test(S("abcdefghijklmnopqrst"), 20, 1, "abcdefghij", -10);
test(S("abcdefghijklmnopqrst"), 20, 1, "abcdefghijklmnopqrst", -20);
test(S("abcdefghijklmnopqrst"), 21, 0, "", 0);
test(S("abcdefghijklmnopqrst"), 21, 0, "abcde", 0);
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghij", 0);
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghijklmnopqrst", 0);
}
int main(int, char**)
{
{
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
}
#if TEST_STD_VER >= 11
{
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
}
#endif
return 0;
}
|
/* The copyright in this software is being made available under the BSD
* License, included below. This software may be subject to other third party
* and contributor rights, including patent rights, and no such rights are
* granted under this license.
*
* Copyright (c) 2010-2020, ITU/ISO/IEC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file DecSlice.cpp
\brief slice decoder class
*/
#include "DecSlice.h"
#include "CommonLib/UnitTools.h"
#include "CommonLib/dtrace_next.h"
#include <vector>
//! \ingroup DecoderLib
//! \{
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
DecSlice::DecSlice()
{
}
DecSlice::~DecSlice()
{
}
void DecSlice::create()
{
}
void DecSlice::destroy()
{
}
void DecSlice::init( CABACDecoder* cabacDecoder, DecCu* pcCuDecoder )
{
m_CABACDecoder = cabacDecoder;
m_pcCuDecoder = pcCuDecoder;
}
void DecSlice::decompressSlice( Slice* slice, InputBitstream* bitstream, int debugCTU )
{
//-- For time output for each slice
slice->startProcessingTimer();
const SPS* sps = slice->getSPS();
Picture* pic = slice->getPic();
CABACReader& cabacReader = *m_CABACDecoder->getCABACReader( 0 );
// setup coding structure
CodingStructure& cs = *pic->cs;
cs.slice = slice;
cs.sps = sps;
cs.pps = slice->getPPS();
memcpy(cs.alfApss, slice->getAlfAPSs(), sizeof(cs.alfApss));
cs.lmcsAps = slice->getPicHeader()->getLmcsAPS();
cs.scalinglistAps = slice->getPicHeader()->getScalingListAPS();
cs.pcv = slice->getPPS()->pcv;
cs.chromaQpAdj = 0;
cs.picture->resizeSAO(cs.pcv->sizeInCtus, 0);
cs.resetPrevPLT(cs.prevPLT);
if (slice->getFirstCtuRsAddrInSlice() == 0)
{
cs.picture->resizeAlfCtuEnableFlag( cs.pcv->sizeInCtus );
cs.picture->resizeAlfCtbFilterIndex(cs.pcv->sizeInCtus);
cs.picture->resizeAlfCtuAlternative( cs.pcv->sizeInCtus );
}
const unsigned numSubstreams = slice->getNumberOfSubstreamSizes() + 1;
// init each couple {EntropyDecoder, Substream}
// Table of extracted substreams.
std::vector<InputBitstream*> ppcSubstreams( numSubstreams );
for( unsigned idx = 0; idx < numSubstreams; idx++ )
{
ppcSubstreams[idx] = bitstream->extractSubstream( idx+1 < numSubstreams ? ( slice->getSubstreamSize(idx) << 3 ) : bitstream->getNumBitsLeft() );
}
const unsigned widthInCtus = cs.pcv->widthInCtus;
const bool wavefrontsEnabled = cs.sps->getEntropyCodingSyncEnabledFlag();
const bool entryPointPresent = cs.sps->getEntryPointsPresentFlag();
cabacReader.initBitstream( ppcSubstreams[0] );
cabacReader.initCtxModels( *slice );
// Quantization parameter
pic->m_prevQP[0] = pic->m_prevQP[1] = slice->getSliceQp();
CHECK( pic->m_prevQP[0] == std::numeric_limits<int>::max(), "Invalid previous QP" );
DTRACE( g_trace_ctx, D_HEADER, "=========== POC: %d ===========\n", slice->getPOC() );
if (slice->getSliceType() != I_SLICE && slice->getRefPic(REF_PIC_LIST_0, 0)->numSubpics > 1)
{
clipMv = clipMvInSubpic;
}
else
{
clipMv = clipMvInPic;
}
// for every CTU in the slice segment...
unsigned subStrmId = 0;
for( unsigned ctuIdx = 0; ctuIdx < slice->getNumCtuInSlice(); ctuIdx++ )
{
const unsigned ctuRsAddr = slice->getCtuAddrInSlice(ctuIdx);
const unsigned ctuXPosInCtus = ctuRsAddr % widthInCtus;
const unsigned ctuYPosInCtus = ctuRsAddr / widthInCtus;
const unsigned tileColIdx = slice->getPPS()->ctuToTileCol( ctuXPosInCtus );
const unsigned tileRowIdx = slice->getPPS()->ctuToTileRow( ctuYPosInCtus );
const unsigned tileXPosInCtus = slice->getPPS()->getTileColumnBd( tileColIdx );
const unsigned tileYPosInCtus = slice->getPPS()->getTileRowBd( tileRowIdx );
const unsigned tileColWidth = slice->getPPS()->getTileColumnWidth( tileColIdx );
const unsigned tileRowHeight = slice->getPPS()->getTileRowHeight( tileRowIdx );
const unsigned tileIdx = slice->getPPS()->getTileIdx( ctuXPosInCtus, ctuYPosInCtus);
const unsigned maxCUSize = sps->getMaxCUWidth();
Position pos( ctuXPosInCtus*maxCUSize, ctuYPosInCtus*maxCUSize) ;
UnitArea ctuArea(cs.area.chromaFormat, Area( pos.x, pos.y, maxCUSize, maxCUSize ) );
const SubPic &curSubPic = slice->getPPS()->getSubPicFromPos(pos);
// padding/restore at slice level
if (slice->getPPS()->getNumSubPics()>=2 && curSubPic.getTreatedAsPicFlag() && ctuIdx==0)
{
int subPicX = (int)curSubPic.getSubPicLeft();
int subPicY = (int)curSubPic.getSubPicTop();
int subPicWidth = (int)curSubPic.getSubPicWidthInLumaSample();
int subPicHeight = (int)curSubPic.getSubPicHeightInLumaSample();
for (int rlist = REF_PIC_LIST_0; rlist < NUM_REF_PIC_LIST_01; rlist++)
{
int n = slice->getNumRefIdx((RefPicList)rlist);
for (int idx = 0; idx < n; idx++)
{
Picture *refPic = slice->getRefPic((RefPicList)rlist, idx);
if (!refPic->getSubPicSaved() && refPic->numSubpics > 1)
{
refPic->saveSubPicBorder(refPic->getPOC(), subPicX, subPicY, subPicWidth, subPicHeight);
refPic->extendSubPicBorder(refPic->getPOC(), subPicX, subPicY, subPicWidth, subPicHeight);
refPic->setSubPicSaved(true);
}
}
}
}
DTRACE_UPDATE( g_trace_ctx, std::make_pair( "ctu", ctuRsAddr ) );
cabacReader.initBitstream( ppcSubstreams[subStrmId] );
// set up CABAC contexts' state for this CTU
if( ctuXPosInCtus == tileXPosInCtus && ctuYPosInCtus == tileYPosInCtus )
{
if( ctuIdx != 0 ) // if it is the first CTU, then the entropy coder has already been reset
{
cabacReader.initCtxModels( *slice );
cs.resetPrevPLT(cs.prevPLT);
}
pic->m_prevQP[0] = pic->m_prevQP[1] = slice->getSliceQp();
}
else if( ctuXPosInCtus == tileXPosInCtus && wavefrontsEnabled )
{
// Synchronize cabac probabilities with top CTU if it's available and at the start of a line.
if( ctuIdx != 0 ) // if it is the first CTU, then the entropy coder has already been reset
{
cabacReader.initCtxModels( *slice );
cs.resetPrevPLT(cs.prevPLT);
}
if( cs.getCURestricted( pos.offset(0, -1), pos, slice->getIndependentSliceIdx(), tileIdx, CH_L ) )
{
// Top is available, so use it.
cabacReader.getCtx() = m_entropyCodingSyncContextState;
cs.setPrevPLT(m_palettePredictorSyncState);
}
pic->m_prevQP[0] = pic->m_prevQP[1] = slice->getSliceQp();
}
bool updateBcwCodingOrder = cs.slice->getSliceType() == B_SLICE && ctuIdx == 0;
if(updateBcwCodingOrder)
{
resetBcwCodingOrder(true, cs);
}
if ((cs.slice->getSliceType() != I_SLICE || cs.sps->getIBCFlag()) && ctuXPosInCtus == tileXPosInCtus)
{
cs.motionLut.lut.resize(0);
cs.motionLut.lutIbc.resize(0);
cs.resetIBCBuffer = true;
}
if( !cs.slice->isIntra() )
{
pic->mctsInfo.init( &cs, getCtuAddr( ctuArea.lumaPos(), *( cs.pcv ) ) );
}
if( ctuRsAddr == debugCTU )
{
break;
}
cabacReader.coding_tree_unit( cs, ctuArea, pic->m_prevQP, ctuRsAddr );
m_pcCuDecoder->decompressCtu( cs, ctuArea );
if( ctuXPosInCtus == tileXPosInCtus && wavefrontsEnabled )
{
m_entropyCodingSyncContextState = cabacReader.getCtx();
cs.storePrevPLT(m_palettePredictorSyncState);
}
if( ctuIdx == slice->getNumCtuInSlice()-1 )
{
unsigned binVal = cabacReader.terminating_bit();
CHECK( !binVal, "Expecting a terminating bit" );
#if DECODER_CHECK_SUBSTREAM_AND_SLICE_TRAILING_BYTES
cabacReader.remaining_bytes( false );
#endif
}
else if( ( ctuXPosInCtus + 1 == tileXPosInCtus + tileColWidth ) &&
( ctuYPosInCtus + 1 == tileYPosInCtus + tileRowHeight || wavefrontsEnabled ) )
{
// The sub-stream/stream should be terminated after this CTU.
// (end of slice-segment, end of tile, end of wavefront-CTU-row)
unsigned binVal = cabacReader.terminating_bit();
CHECK( !binVal, "Expecting a terminating bit" );
if( entryPointPresent )
{
#if DECODER_CHECK_SUBSTREAM_AND_SLICE_TRAILING_BYTES
cabacReader.remaining_bytes( true );
#endif
subStrmId++;
}
}
if (slice->getPPS()->getNumSubPics() >= 2 && curSubPic.getTreatedAsPicFlag() && ctuIdx == (slice->getNumCtuInSlice() - 1))
// for last Ctu in the slice
{
int subPicX = (int)curSubPic.getSubPicLeft();
int subPicY = (int)curSubPic.getSubPicTop();
int subPicWidth = (int)curSubPic.getSubPicWidthInLumaSample();
int subPicHeight = (int)curSubPic.getSubPicHeightInLumaSample();
for (int rlist = REF_PIC_LIST_0; rlist < NUM_REF_PIC_LIST_01; rlist++)
{
int n = slice->getNumRefIdx((RefPicList)rlist);
for (int idx = 0; idx < n; idx++)
{
Picture *refPic = slice->getRefPic((RefPicList)rlist, idx);
if (refPic->getSubPicSaved())
{
refPic->restoreSubPicBorder(refPic->getPOC(), subPicX, subPicY, subPicWidth, subPicHeight);
refPic->setSubPicSaved(false);
}
}
}
}
}
// deallocate all created substreams, including internal buffers.
for( auto substr: ppcSubstreams )
{
delete substr;
}
slice->stopProcessingTimer();
}
//! \}
|
// Copyright (c) 2011-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "qvaluecombobox.h"
QValueComboBox::QValueComboBox(QWidget *parent) :
QComboBox(parent), role(Qt::UserRole)
{
connect(this, SIGNAL(currentIndexChanged(int)), this, SLOT(handleSelectionChanged(int)));
}
QVariant QValueComboBox::value() const
{
return itemData(currentIndex(), role);
}
void QValueComboBox::setValue(const QVariant &value)
{
setCurrentIndex(findData(value, role));
}
void QValueComboBox::setRole(int _role)
{
this->role = _role;
}
void QValueComboBox::handleSelectionChanged(int idx)
{
Q_EMIT valueChanged();
}
|
/* Copyright © 2017 Apple Inc. All rights reserved.
*
* Use of this source code is governed by a BSD-3-clause license that can
* be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
*/
#ifndef TURI_ACTIVITY_CLASSIFIER_H_
#define TURI_ACTIVITY_CLASSIFIER_H_
#include <core/logging/table_printer/table_printer.hpp>
#include <model_server/lib/extensions/ml_model.hpp>
#include <core/data/sframe/gl_sframe.hpp>
#include <toolkits/activity_classification/ac_data_iterator.hpp>
#include <toolkits/coreml_export/mlmodel_wrapper.hpp>
#include <ml/neural_net/compute_context.hpp>
#include <ml/neural_net/model_backend.hpp>
#include <ml/neural_net/model_spec.hpp>
namespace turi {
namespace activity_classification {
class EXPORT activity_classifier: public ml_model_base {
public:
// TODO: Move this model-spec generation code into a separate file, ideally
// in the neural_net directory.
static std::unique_ptr<neural_net::model_spec> init_model(
const std::string& target, const std::vector<std::string>& features,
size_t prediction_window, size_t num_classes, bool use_random_init,
int random_seed);
static std::tuple<gl_sframe, gl_sframe> random_split_by_session(
gl_sframe data, std::string session_id_column_name, float fraction,
size_t seed);
// ml_model_base interface
void init_options(const std::map<std::string, flexible_type>& opts) override;
size_t get_version() const override;
void save_impl(oarchive& oarc) const override;
void load_version(iarchive& iarc, size_t version) override;
// Interface exposed via Unity server
void train(gl_sframe data, std::string target_column_name,
std::string session_id_column_name, variant_type validation_data,
std::map<std::string, flexible_type> opts);
gl_sarray predict(gl_sframe data, std::string output_type);
gl_sframe predict_per_window(gl_sframe data, std::string output_type);
gl_sframe classify(gl_sframe data, std::string output_frequency);
gl_sframe predict_topk(gl_sframe data, std::string output_type, size_t k,
std::string output_frequency);
variant_map_type evaluate(gl_sframe data, std::string metric);
std::shared_ptr<coreml::MLModelWrapper> export_to_coreml(
std::string filename, std::string short_description,
std::map<std::string, flexible_type> additional_user_defined);
void import_from_custom_model(variant_map_type model_data, size_t version);
// Support for iterative training.
virtual void init_training(gl_sframe data, std::string target_column_name,
std::string session_id_column_name,
variant_type validation_data,
std::map<std::string, flexible_type> opts);
virtual void resume_training(gl_sframe data, variant_type validation_data);
virtual void iterate_training();
virtual void synchronize_training();
virtual void finalize_training();
BEGIN_CLASS_MEMBER_REGISTRATION("activity_classifier")
IMPORT_BASE_CLASS_REGISTRATION(ml_model_base);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::train, "data", "target",
"session_id", "validation_data", "options");
register_defaults("train",
{{"validation_data", to_variant(std::string("auto"))},
{"options",
to_variant(std::map<std::string, flexible_type>())}});
REGISTER_CLASS_MEMBER_DOCSTRING(
activity_classifier::train,
"----------\n"
"data : SFrame\n"
" Input data which consists of `sessions` of data where each session "
"is\n"
" a sequence of data. The data must be in `stacked` format, grouped "
"by\n"
" session. Within each session, the data is assumed to be sorted\n"
" temporally. Columns in `features` will be used to train a model "
"that\n"
" will make a prediction using labels in the `target` column.\n"
"target : string\n"
" Name of the column containing the target variable. The values in "
"this\n"
" column must be of string or integer type.\n"
"session_id : string\n"
" Name of the column that contains a unique ID for each session.\n"
"validatation_data : SFrame or string\n"
" A dataset for monitoring the model's generalization performance to\n"
" prevent the model from overfitting to the training data.\n"
"\n"
" For each row of the progress table, accuracy is measured over the\n"
" provided training dataset and the `validation_data`. The format of\n"
" this SFrame must be the same as the training set.\n"
"\n"
" When set to 'auto', a validation set is automatically sampled from "
"the\n"
" training data (if the training data has > 100 sessions).\n"
"options : dict\n"
"\n"
"Options\n"
"-------\n"
"features : list[string]\n"
" Name of the columns containing the input features that will be "
"used\n"
" for classification. If not set, all columns except `session_id` "
"and\n"
" `target` will be used.\n"
"prediction_window : int\n"
" Number of time units between predictions. For example, if your "
"input\n"
" data is sampled at 100Hz, and the `prediction_window` is set to "
"100\n"
" (the default), then this model will make a prediction every 1 "
"second.\n"
"max_iterations : int\n"
" Maximum number of iterations/epochs made over the data during the\n"
" training phase. The default is 10 iterations.\n"
"batch_size : int\n"
" Number of sequence chunks used per training step. Must be greater "
"than\n"
" the number of GPUs in use. The default is 32.\n"
"random_seed : int\n"
" The given seed is used for random weight initialization and "
"sampling\n"
" during training\n");
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::init_training, "data",
"target", "session_id", "validation_data",
"options");
register_defaults("init_training",
{{"validation_data", to_variant(std::string("auto"))},
{"options",
to_variant(std::map<std::string, flexible_type>())}});
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::resume_training, "data",
"validation_data");
register_defaults("resume_training",
{{"validation_data", to_variant(std::string("auto"))}});
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::iterate_training);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::synchronize_training);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::finalize_training);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::predict, "data",
"output_type");
register_defaults("predict", {{"output_type", std::string("")}});
REGISTER_CLASS_MEMBER_DOCSTRING(
activity_classifier::predict,
"----------\n"
"data : SFrame\n"
" Dataset of new observations. Must include columns with the same\n"
" names as the features used for model training, but does not require\n"
" a target column. Additional columns are ignored.\n"
"output_type : {'class', 'probability_vector'}, optional\n"
" Form of each prediction which is one of:\n"
" - 'probability_vector': Prediction probability associated with each\n"
" class as a vector. The probability of the first class (sorted\n"
" alphanumerically by name of the class in the training set) is in\n"
" position 0 of the vector, the second in position 1 and so on.\n"
" - 'class': Class prediction. This returns the class with maximum\n"
" probability.\n"
);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::predict_per_window,
"data", "output_type");
register_defaults("predict_per_window", {{"output_type", std::string("")}});
REGISTER_CLASS_MEMBER_DOCSTRING(
activity_classifier::predict_per_window,
"----------\n"
"data : SFrame\n"
" Dataset of new observations. Must include columns with the same\n"
" names as the features used for model training, but does not "
"require\n"
" a target column. Additional columns are ignored.\n"
"output_type : {'class', 'probability_vector'}, optional\n"
" Form of each prediction which is one of:\n"
" - 'probability_vector': Prediction probability associated with "
"each\n"
" class as a vector. The probability of the first class (sorted\n"
" alphanumerically by name of the class in the training set) is in\n"
" position 0 of the vector, the second in position 1 and so on. \n"
" A probability_vector is given per prediction_window. \n"
" - 'class': Class prediction. This returns the class with maximum\n"
" probability per prediction_window.\n");
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::classify, "data",
"output_frequency");
register_defaults("classify", {{"output_frequency", "per_row"}});
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::predict_topk, "data",
"output_type", "k", "output_frequency");
register_defaults("predict_topk", {{"output_type", "probability"},
{"k", 3},
{"output_frequency", "per_row"}});
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::evaluate, "data",
"metric");
register_defaults("evaluate", {{"metric", std::string("auto")}});
REGISTER_CLASS_MEMBER_DOCSTRING(
activity_classifier::evaluate,
"----------\n"
"data : SFrame\n"
" Dataset of new observations. Must include columns with the same\n"
" names as the features used for model training, but does not require\n"
" a target column. Additional columns are ignored.\n"
"metric : str, optional\n"
" Name of the evaluation metric. Possible values are:\n"
" - 'auto' : Returns all available metrics\n"
" - 'accuracy' : Classification accuracy (micro average)\n"
" - 'auc' : Area under the ROC curve (macro average)\n"
" - 'precision' : Precision score (macro average)\n"
" - 'recall' : Recall score (macro average)\n"
" - 'f1_score' : F1 score (macro average)\n"
" - 'log_loss' : Log loss\n"
" - 'confusion_matrix' : An SFrame with counts of possible\n"
" prediction/true label combinations.\n"
" - 'roc_curve' : An SFrame containing information needed for an\n"
" ROC curve\n"
);
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::export_to_coreml,
"filename", "short_description", "additional_user_defined");
register_defaults("export_to_coreml",
{{"short_description", ""},
{"additional_user_defined", to_variant(std::map<std::string, flexible_type>())}});
REGISTER_CLASS_MEMBER_FUNCTION(activity_classifier::import_from_custom_model,
"model_data", "version");
END_CLASS_MEMBER_REGISTRATION
protected:
// Override points allowing subclasses to inject dependencies
// Factory for data_iterator
virtual std::unique_ptr<data_iterator> create_iterator(
gl_sframe data, bool requires_labels, bool infer_class_labels,
bool is_train, bool use_data_augmentation) const;
// Factory for compute_context
virtual std::unique_ptr<neural_net::compute_context> create_compute_context()
const;
// Returns the initial neural network to train
virtual std::unique_ptr<neural_net::model_spec> init_model(
bool use_random_init) const;
virtual std::tuple<gl_sframe, gl_sframe> init_data(
gl_sframe data, variant_type validation_data,
std::string session_id_column_name) const;
virtual std::tuple<float, float> compute_validation_metrics(
size_t prediction_window, size_t num_classes, size_t batch_size);
virtual void init_table_printer(bool has_validation);
// Returns an SFrame where each row corresponds to one prediction, and
// containing four columns: "session_id" indicating the session ID shared by
// the samples in the prediction window, "prediction_id" indicating the index
// of the prediction window within the session, "preds" containing the class
// probability vector for the prediction window, and "num_samples" indicating
// the number of corresponding rows from the original SFrame (at most the
// prediction window size).
virtual gl_sframe perform_inference(data_iterator* data) const;
// Utility code
template <typename T>
T read_state(const std::string& key) const {
return variant_get_value<T>(get_state().at(key));
}
private:
const neural_net::model_spec* read_model_spec() const;
// Whether to include loss in the progress table, in addition to accuracy
bool show_loss_ = true;
// Primary representation for the trained model.
// TODO: Replace model_spec with a Checkpoint class that encapsulates
// serialization.
mutable bool nn_spec_synchronized_ = false;
std::unique_ptr<neural_net::model_spec> nn_spec_;
// Primary dependencies for training. These should be nonnull while training
// is in progress.
gl_sframe training_data_; // TODO: Avoid storing gl_sframe AND data_iterator.
gl_sframe validation_data_;
std::unique_ptr<data_iterator> training_data_iterator_;
std::unique_ptr<data_iterator> validation_data_iterator_;
std::unique_ptr<neural_net::compute_context> training_compute_context_;
std::unique_ptr<neural_net::model_backend> training_model_;
// Nonnull while training is in progress, if progress printing is enabled.
std::unique_ptr<table_printer> training_table_printer_;
};
} // namespace activity_classification
} // namespace turi
#endif //TURI_ACTIVITY_CLASSIFIER_H_
|
// Copyright 1998-2015 Epic Games, Inc. All Rights Reserved.
#include "MessagingDebuggerPrivatePCH.h"
#define LOCTEXT_NAMESPACE "SMessagingEndpointDetails"
/* SMessagingEndpointDetails interface
*****************************************************************************/
void SMessagingEndpointDetails::Construct( const FArguments& InArgs, const FMessagingDebuggerModelRef& InModel, const TSharedRef<ISlateStyle>& InStyle )
{
Model = InModel;
Style = InStyle;
ChildSlot
[
SNew(SVerticalBox)
+ SVerticalBox::Slot()
.AutoHeight()
.Padding(4.0f, 2.0f)
[
SNew(SGridPanel)
.FillColumn(1, 1.0f)
// received messages count
+ SGridPanel::Slot(0, 0)
.Padding(0.0f, 4.0f)
[
SNew(STextBlock)
.Text(LOCTEXT("EndpointDetailsReceivedMessagesLabel", "Messages Received:"))
]
+ SGridPanel::Slot(1, 0)
.HAlign(HAlign_Right)
.Padding(0.0f, 4.0f)
[
SNew(STextBlock)
.Text(this, &SMessagingEndpointDetails::HandleEndpointDetailsReceivedMessagesText)
]
// sent messages count
+ SGridPanel::Slot(0, 1)
.Padding(0.0f, 4.0f)
[
SNew(STextBlock)
.Text(LOCTEXT("EndpointDetailsReceivedLabel", "Messages Sent:"))
]
+ SGridPanel::Slot(1, 1)
.HAlign(HAlign_Right)
.Padding(0.0f, 4.0f)
[
SNew(STextBlock)
.Text(this, &SMessagingEndpointDetails::HandleEndpointDetailsSentMessagesText)
]
]
+ SVerticalBox::Slot()
.FillHeight(1.0f)
.Padding(0.0f, 8.0f, 0.0f, 0.0f)
[
SNew(SBorder)
.BorderImage(InStyle->GetBrush("GroupBorder"))
.Padding(0.0f)
[
// address list
SAssignNew(AddressListView, SListView<FMessageTracerAddressInfoPtr>)
.ItemHeight(24.0f)
.ListItemsSource(&AddressList)
.SelectionMode(ESelectionMode::None)
.OnGenerateRow(this, &SMessagingEndpointDetails::HandleAddressListGenerateRow)
.HeaderRow
(
SNew(SHeaderRow)
+ SHeaderRow::Column("Address")
.DefaultLabel(FText::FromString(TEXT("Addresses")))
.FillWidth(1.0f)
+ SHeaderRow::Column("TimeRegistered")
.DefaultLabel(LOCTEXT("AddressListTimeRegisteredColumnHeader", "Time Registered"))
.FixedWidth(112.0f)
.HAlignCell(HAlign_Right)
.HAlignHeader(HAlign_Right)
+ SHeaderRow::Column("TimeUnregistered")
.DefaultLabel(LOCTEXT("AddressListTimeUnregisteredColumnHeader", "Time Unregistered"))
.FixedWidth(112.0f)
.HAlignCell(HAlign_Right)
.HAlignHeader(HAlign_Right)
)
]
]
];
}
/* SCompoundWidget overrides
*****************************************************************************/
void SMessagingEndpointDetails::Tick( const FGeometry& AllottedGeometry, const double InCurrentTime, const float InDeltaTime )
{
RefreshAddressInfo();
}
/* SMessagingMessageDetails implementation
*****************************************************************************/
void SMessagingEndpointDetails::RefreshAddressInfo()
{
FMessageTracerEndpointInfoPtr SelectedEndpoint = Model->GetSelectedEndpoint();
if (SelectedEndpoint.IsValid())
{
SelectedEndpoint->AddressInfos.GenerateValueArray(AddressList);
}
else
{
AddressList.Reset();
}
AddressListView->RequestListRefresh();
}
/* SMessagingEndpointDetails event handlers
*****************************************************************************/
TSharedRef<ITableRow> SMessagingEndpointDetails::HandleAddressListGenerateRow( FMessageTracerAddressInfoPtr AddressInfo, const TSharedRef<STableViewBase>& OwnerTable )
{
return SNew(SMessagingAddressTableRow, OwnerTable, Model.ToSharedRef())
.AddressInfo(AddressInfo)
.Style(Style);
}
FText SMessagingEndpointDetails::HandleEndpointDetailsReceivedMessagesText() const
{
FMessageTracerEndpointInfoPtr SelectedEndpoint = Model->GetSelectedEndpoint();
if (SelectedEndpoint.IsValid())
{
return FText::AsNumber(SelectedEndpoint->ReceivedMessages.Num());
}
return FText::GetEmpty();
}
FText SMessagingEndpointDetails::HandleEndpointDetailsSentMessagesText() const
{
FMessageTracerEndpointInfoPtr SelectedEndpoint = Model->GetSelectedEndpoint();
if (SelectedEndpoint.IsValid())
{
return FText::AsNumber(SelectedEndpoint->SentMessages.Num());
}
return FText::GetEmpty();
}
#undef LOCTEXT_NAMESPACE
|
// Copyright Carl Philipp Reh 2006 - 2019.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <sge/image/size_type.hpp>
#include <sge/image/color/element_count.hpp>
#include <sge/image/color/format.hpp>
#include <fcppt/assert/unreachable.hpp>
sge::image::size_type sge::image::color::element_count(sge::image::color::format const _format)
{
switch (_format)
{
case sge::image::color::format::a8:
case sge::image::color::format::l8:
case sge::image::color::format::r32f:
return 1;
case sge::image::color::format::la8:
return 2;
case sge::image::color::format::rgb8:
case sge::image::color::format::bgr8:
case sge::image::color::format::rgb32f:
case sge::image::color::format::bgr32f:
case sge::image::color::format::srgb8:
case sge::image::color::format::sbgr8:
return 3;
case sge::image::color::format::rgba8:
case sge::image::color::format::rgbx8:
case sge::image::color::format::bgra8:
case sge::image::color::format::bgrx8:
case sge::image::color::format::rgba32f:
case sge::image::color::format::bgra32f:
case sge::image::color::format::srgba8:
case sge::image::color::format::sbgra8:
return 4;
}
FCPPT_ASSERT_UNREACHABLE;
}
|
/*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2017-2021 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#include <inviwopy/pyproperties.h>
#include <inviwo/core/properties/propertyfactory.h>
#include <inviwopy/inviwopy.h>
#include <inviwo/core/properties/constraintbehavior.h>
#include <inviwo/core/properties/buttonproperty.h>
#include <inviwo/core/properties/buttongroupproperty.h>
#include <inviwo/core/properties/transferfunctionproperty.h>
#include <inviwo/core/properties/isovalueproperty.h>
#include <inviwo/core/properties/isotfproperty.h>
#include <inviwo/core/properties/stringproperty.h>
#include <inviwo/core/properties/fileproperty.h>
#include <inviwo/core/properties/directoryproperty.h>
#include <inviwo/core/properties/filepatternproperty.h>
#include <inviwo/core/properties/boolproperty.h>
#include <inviwo/core/properties/boolcompositeproperty.h>
#include <inviwo/core/properties/propertyeditorwidget.h>
#include <inviwo/core/util/stdextensions.h>
#include <inviwo/core/util/colorconversion.h>
#include <inviwo/core/datastructures/tfprimitive.h>
//#include <inviwo/core/datastructures/tfprimitiveset.h>
#include <pybind11/functional.h>
#include <fmt/format.h>
namespace py = pybind11;
namespace inviwo {
template <typename P, typename... Extra>
using PyPropertyClass = py::class_<P, Extra..., PropertyPtr<P>>;
void exposeProperties(py::module& m) {
py::enum_<ConstraintBehavior>(m, "ConstraintBehavior")
.value("Editable", ConstraintBehavior::Editable)
.value("Mutable", ConstraintBehavior::Mutable)
.value("Immutable", ConstraintBehavior::Immutable)
.value("Ignore", ConstraintBehavior::Ignore);
py::enum_<InvalidationLevel>(m, "InvalidationLevel")
.value("Valid", InvalidationLevel::Valid)
.value("InvalidOutput", InvalidationLevel::InvalidOutput)
.value("InvalidResources", InvalidationLevel::InvalidResources);
py::class_<PropertySemantics>(m, "PropertySemantics")
.def(py::init())
.def(py::init<std::string>(), py::arg("semantic"))
.def("getString", &PropertySemantics::getString)
// clang-format off
.def_property_readonly_static("Default", [](py::object) { return PropertySemantics::Default; })
.def_property_readonly_static("Text", [](py::object) { return PropertySemantics::Text; })
.def_property_readonly_static("SpinBox", [](py::object) { return PropertySemantics::SpinBox; })
.def_property_readonly_static("Color", [](py::object) { return PropertySemantics::Color; })
.def_property_readonly_static("LightPosition", [](py::object) { return PropertySemantics::LightPosition; })
.def_property_readonly_static("TextEditor", [](py::object) { return PropertySemantics::TextEditor; })
.def_property_readonly_static("Multiline", [](py::object) { return PropertySemantics::Multiline; })
.def_property_readonly_static("ImageEditor", [](py::object) { return PropertySemantics::ImageEditor; })
.def_property_readonly_static("ShaderEditor", [](py::object) { return PropertySemantics::ShaderEditor; })
.def_property_readonly_static("PythonEditor", [](py::object) { return PropertySemantics::PythonEditor; })
// clang-format on
.def("__repr__", [](const PropertySemantics& s) {
return fmt::format("<PropertySemantics: '{}'>", s.getString());
});
py::class_<PropertyFactory>(m, "PropertyFactory")
.def("hasKey", [](PropertyFactory* pf, std::string key) { return pf->hasKey(key); })
.def_property_readonly("keys", [](PropertyFactory* pf) { return pf->getKeys(); })
.def("create", [](PropertyFactory* pf, std::string key) { return pf->create(key); });
py::class_<PropertyWidget>(m, "PropertyWidget")
.def_property_readonly("editorWidget", &PropertyWidget::getEditorWidget,
py::return_value_policy::reference)
.def_property_readonly("property", &PropertyWidget::getProperty,
py::return_value_policy::reference);
py::class_<PropertyEditorWidget>(m, "PropertyEditorWidget")
.def_property("visible", &PropertyEditorWidget::isVisible,
&PropertyEditorWidget::setVisible)
.def_property("dimensions", &PropertyEditorWidget::getDimensions,
&PropertyEditorWidget::setDimensions)
.def_property("position", &PropertyEditorWidget::getPosition,
&PropertyEditorWidget::setPosition);
PyPropertyClass<Property>(m, "Property")
.def_property("identifier", &Property::getIdentifier, &Property::setIdentifier)
.def_property("displayName", &Property::getDisplayName, &Property::setDisplayName)
.def_property("readOnly", &Property::getReadOnly, &Property::setReadOnly)
.def_property("visible", &Property::getVisible, &Property::setVisible)
.def_property("semantics", &Property::getSemantics, &Property::setSemantics)
.def_property_readonly("classIdentifierForWidget", &Property::getClassIdentifierForWidget)
.def_property_readonly("path", &Property::getPath)
.def_property_readonly("invalidationLevel", &Property::getInvalidationLevel)
.def_property_readonly("widgets", &Property::getWidgets)
.def_property_readonly("isModified", &Property::isModified)
.def("hasWidgets", &Property::hasWidgets)
.def("setCurrentStateAsDefault", &Property::setCurrentStateAsDefault)
.def("resetToDefaultState", &Property::resetToDefaultState)
.def("onChange", [](Property* p, std::function<void()> func) { p->onChange(func); })
.def("visibilityDependsOn",
[](Property* p, Property* other, std::function<bool(Property&)> func) {
p->visibilityDependsOn(*other, func);
})
.def("readonlyDependsOn",
[](Property* p, Property* other, std::function<bool(Property&)> func) {
p->readonlyDependsOn(*other, func);
});
PyPropertyClass<CompositeProperty, Property, PropertyOwner>(m, "CompositeProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new CompositeProperty(identifier, displayName, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidResources,
py::arg("semantics") = PropertySemantics::Default)
.def("setCollapsed", &CompositeProperty::setCollapsed)
.def("isCollapsed", &CompositeProperty::isCollapsed)
.def_property("collapsed", &BoolCompositeProperty::isCollapsed,
&BoolCompositeProperty::setCollapsed)
.def(
"__getattr__",
[](CompositeProperty& po, const std::string& key) {
if (auto prop = po.getPropertyByIdentifier(key)) {
return prop;
} else {
throw py::attribute_error{"CompositeProperty (" + po.getPath() +
") does not have a property with identifier: '" +
key + "'"};
}
},
py::return_value_policy::reference);
PyPropertyClass<BoolCompositeProperty, CompositeProperty, PropertyOwner>(
m, "BoolCompositeProperty")
.def(
py::init([](const std::string& identifier, const std::string& displayName, bool checked,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new BoolCompositeProperty(identifier, displayName, checked,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("checked"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidResources,
py::arg("semantics") = PropertySemantics::Default)
.def("isChecked", &BoolCompositeProperty::isChecked)
.def("setChecked", &BoolCompositeProperty::setChecked)
.def_property("checked", &BoolCompositeProperty::isChecked,
&BoolCompositeProperty::setChecked)
.def("__bool__", &BoolCompositeProperty::isChecked);
PyPropertyClass<BaseOptionProperty, Property>(m, "BaseOptionProperty")
.def_property_readonly("clearOptions", &BaseOptionProperty::clearOptions)
.def_property_readonly("size", &BaseOptionProperty::size)
.def_property("selectedIndex", &BaseOptionProperty::getSelectedIndex,
&BaseOptionProperty::setSelectedIndex)
.def_property("selectedIdentifier", &BaseOptionProperty::getSelectedIdentifier,
&BaseOptionProperty::setSelectedIdentifier)
.def_property("selectedDisplayName", &BaseOptionProperty::getSelectedDisplayName,
&BaseOptionProperty::setSelectedDisplayName)
.def("isSelectedIndex", &BaseOptionProperty::isSelectedIndex)
.def("isSelectedIdentifier", &BaseOptionProperty::isSelectedIdentifier)
.def("isSelectedDisplayName", &BaseOptionProperty::isSelectedDisplayName)
.def_property_readonly("identifiers", &BaseOptionProperty::getIdentifiers)
.def_property_readonly("displayName", &BaseOptionProperty::getDisplayNames);
using OptionPropetyTypes = std::tuple<double, float, int, std::string>;
using MinMaxPropertyTypes = std::tuple<float, double, size_t, glm::i64, int>;
using OrdinalPropetyTypes = std::tuple<float, int, size_t, glm::i64, double, vec2, vec3, vec4,
dvec2, dvec3, dvec4, ivec2, ivec3, ivec4, size2_t,
size3_t, size4_t, mat2, mat3, mat4, dmat2, dmat3, dmat4>;
util::for_each_type<OrdinalPropetyTypes>{}(OrdinalPropertyHelper{}, m);
util::for_each_type<OrdinalPropetyTypes>{}(OrdinalRefPropertyHelper{}, m);
util::for_each_type<OptionPropetyTypes>{}(OptionPropertyHelper{}, m);
util::for_each_type<MinMaxPropertyTypes>{}(MinMaxHelper{}, m);
PyPropertyClass<TransferFunctionProperty, Property>(m, "TransferFunctionProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
const TransferFunction& value, VolumeInport* volumeInport,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new TransferFunctionProperty(identifier, displayName, value, volumeInport,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value"),
py::arg("inport") = nullptr,
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def_property("mask", &TransferFunctionProperty::getMask,
&TransferFunctionProperty::setMask)
.def_property("zoomH", &TransferFunctionProperty::getZoomH,
&TransferFunctionProperty::setZoomH)
.def_property("zoomV", &TransferFunctionProperty::getZoomV,
&TransferFunctionProperty::setZoomV)
.def("save",
[](TransferFunctionProperty* tf, std::string filename) { tf->get().save(filename); })
.def("load",
[](TransferFunctionProperty* tf, std::string filename) { tf->get().load(filename); })
.def("clear", [](TransferFunctionProperty& tp) { tp.get().clear(); })
.def_property(
"value",
py::cpp_function(
[](TransferFunctionProperty& tp) -> TransferFunction& { return tp.get(); },
py::return_value_policy::reference_internal),
py::overload_cast<const TransferFunction&>(&TransferFunctionProperty::set))
.def("add", [](TransferFunctionProperty& tp, double value,
const vec4& color) { tp.get().add(value, color); })
.def("add", [](TransferFunctionProperty& tp, const dvec2& pos) { tp.get().add(pos); })
.def("add", [](TransferFunctionProperty& tp, const TFPrimitiveData& v) { tp.get().add(v); })
.def("add", [](TransferFunctionProperty& tp,
const std::vector<TFPrimitiveData>& values) { tp.get().add(values); })
.def("setValues",
[](TransferFunctionProperty& tp, const std::vector<TFPrimitiveData>& values) {
tp.get().clear();
tp.get().add(values);
})
.def("getValues",
[](TransferFunctionProperty& tp) -> std::vector<TFPrimitiveData> {
return tp.get().get();
})
.def("__repr__", [](const TransferFunctionProperty& tp) {
std::ostringstream oss;
oss << "<TransferFunctionProperty: " << tp.get().size() << " TF points";
for (auto& p : tp.get()) {
oss << "\n " << p.getPosition() << ", " << color::rgba2hex(p.getColor());
}
oss << ">";
return oss.str();
});
PyPropertyClass<IsoValueProperty>(m, "IsoValueProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
const IsoValueCollection& value, VolumeInport* volumeInport,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new IsoValueProperty(identifier, displayName, value, volumeInport,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value"),
py::arg("inport") = nullptr,
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def_property("zoomH", &IsoValueProperty::getZoomH, &IsoValueProperty::setZoomH)
.def_property("zoomV", &IsoValueProperty::getZoomV, &IsoValueProperty::setZoomV)
.def("save", [](IsoValueProperty* ivp, std::string filename) { ivp->get().save(filename); })
.def("load", [](IsoValueProperty* ivp, std::string filename) { ivp->get().load(filename); })
.def("clear", [](IsoValueProperty& ivp) { ivp.get().clear(); })
.def_property(
"value",
py::cpp_function([](IsoValueProperty& tp) -> IsoValueCollection& { return tp.get(); },
py::return_value_policy::reference_internal),
py::overload_cast<const IsoValueCollection&>(&IsoValueProperty::set))
.def("add", [](IsoValueProperty& ivp, double value,
const vec4& color) { ivp.get().add(value, color); })
.def("add", [](IsoValueProperty& ivp, const dvec2& pos) { ivp.get().add(pos); })
.def("add", [](IsoValueProperty& ivp, const TFPrimitiveData& v) { ivp.get().add(v); })
.def("add", [](IsoValueProperty& ivp,
const std::vector<TFPrimitiveData>& values) { ivp.get().add(values); })
.def("setValues",
[](IsoValueProperty& ivp, const std::vector<TFPrimitiveData>& values) {
ivp.get().clear();
ivp.get().add(values);
})
.def("getValues",
[](IsoValueProperty& ivp) -> std::vector<TFPrimitiveData> { return ivp.get().get(); })
.def("__repr__", [](const IsoValueProperty& ivp) {
std::ostringstream oss;
oss << "<IsoValueProperty: " << ivp.get().size() << " isovalues";
for (auto& p : ivp.get()) {
oss << "\n " << p.getPosition() << ", " << color::rgba2hex(p.getColor());
}
oss << ">";
return oss.str();
});
PyPropertyClass<IsoTFProperty>(m, "IsoTFProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
const IsoValueCollection& isovalues, const TransferFunction& tf,
VolumeInport* volumeInport, InvalidationLevel invalidationLevel,
PropertySemantics semantics) {
return new IsoTFProperty(identifier, displayName, isovalues, tf, volumeInport,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("isovalues"), py::arg("tf"),
py::arg("inport") = nullptr,
py::arg("invalidationLevel") = InvalidationLevel::InvalidResources,
py::arg("semantics") = PropertySemantics::Default)
.def(py::init([](const std::string& identifier, const std::string& displayName,
VolumeInport* volumeInport, InvalidationLevel invalidationLevel,
PropertySemantics semantics) {
return new IsoTFProperty(identifier, displayName, volumeInport, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("inport"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidResources,
py::arg("semantics") = PropertySemantics::Default)
.def_property_readonly(
"isovalues",
py::cpp_function([](IsoTFProperty& tp) -> IsoValueProperty& { return tp.isovalues_; },
py::return_value_policy::reference_internal))
.def_property_readonly(
"tf",
py::cpp_function([](IsoTFProperty& tp) -> TransferFunctionProperty& { return tp.tf_; },
py::return_value_policy::reference_internal))
.def_property("mask", &IsoTFProperty::getMask, &IsoTFProperty::setMask)
.def_property("zoomH", &IsoTFProperty::getZoomH, &IsoTFProperty::setZoomH)
.def_property("zoomV", &IsoTFProperty::getZoomV, &IsoTFProperty::setZoomV);
PyPropertyClass<StringProperty, Property> strProperty(m, "StringProperty");
strProperty.def(py::init([](const std::string& identifier, const std::string& displayName,
const std::string& value, InvalidationLevel invalidationLevel,
PropertySemantics semantics) {
return new StringProperty(identifier, displayName, value, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value") = "",
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default);
pyTemplateProperty<std::string, StringProperty>(strProperty);
py::enum_<AcceptMode>(m, "AcceptMode")
.value("Open", AcceptMode::Open)
.value("Save", AcceptMode::Save);
py::enum_<FileMode>(m, "FileMode")
.value("AnyFile", FileMode::AnyFile)
.value("ExistingFile", FileMode::ExistingFile)
.value("Directory", FileMode::Directory)
.value("ExistingFiles", FileMode::ExistingFiles)
.value("DirectoryOnly", FileMode::DirectoryOnly);
py::class_<FileExtension>(m, "FileExtension")
.def(py::init<>())
.def(py::init<std::string, std::string>(), py::arg("ext"), py::arg("desc"))
.def("toString", &FileExtension::toString)
.def("empty", &FileExtension::empty)
.def("matchesAll", &FileExtension::matchesAll)
.def("matches", &FileExtension::matches)
.def_static("all", &FileExtension::all)
.def_readwrite("extension", &FileExtension::extension_)
.def_readwrite("description", &FileExtension::description_);
PyPropertyClass<FileProperty, Property> fileProperty(m, "FileProperty");
fileProperty
.def(py::init([](const std::string& identifier, const std::string& displayName,
const std::string& value, const std::string& contentType,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new FileProperty(identifier, displayName, value, contentType,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value") = "",
py::arg("contentType") = "default",
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def("requestFile", &FileProperty::requestFile)
.def("addNameFilter",
static_cast<void (FileProperty::*)(std::string)>(&FileProperty::addNameFilter))
.def("addNameFilter",
static_cast<void (FileProperty::*)(FileExtension)>(&FileProperty::addNameFilter))
.def("clearNameFilters", &FileProperty::clearNameFilters)
.def("getNameFilters", &FileProperty::getNameFilters)
.def_property("acceptMode", &FileProperty::getAcceptMode, &FileProperty::setAcceptMode)
.def_property("fileMode", &FileProperty::getFileMode, &FileProperty::setFileMode)
.def_property("contentType", &FileProperty::getContentType, &FileProperty::setContentType)
.def_property("selectedExtension", &FileProperty::getSelectedExtension,
&FileProperty::setSelectedExtension);
pyTemplateProperty<std::string, FileProperty>(fileProperty);
PyPropertyClass<DirectoryProperty, FileProperty> dirProperty(m, "DirectoryProperty");
dirProperty.def(py::init([](const std::string& identifier, const std::string& displayName,
const std::string& value, const std::string& contentType,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new DirectoryProperty(identifier, displayName, value, contentType,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value") = "",
py::arg("contentType") = "default",
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default);
pyTemplateProperty<std::string, DirectoryProperty>(dirProperty);
PyPropertyClass<FilePatternProperty, CompositeProperty>(m, "FilePatternProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
const std::string& pattern, const std::string& directory,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new FilePatternProperty(identifier, displayName, pattern, directory,
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("pattern") = "",
py::arg("directory") = "",
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def_property_readonly("filePattern", &FilePatternProperty::getFilePattern)
.def_property_readonly("filePatternPath", &FilePatternProperty::getFilePatternPath)
.def_property_readonly("fileList", &FilePatternProperty::getFileList)
.def_property_readonly("fileIndices", &FilePatternProperty::getFileIndices)
.def_property_readonly("outOfRangeMatches", &FilePatternProperty::hasOutOfRangeMatches)
.def_property_readonly("rangeSelection", &FilePatternProperty::hasRangeSelection)
.def_property_readonly("range",
[](FilePatternProperty* p) {
return std::make_tuple(p->getMinRange(), p->getMaxRange());
})
.def_property("selectedExtension", &FilePatternProperty::getSelectedExtension,
&FilePatternProperty::setSelectedExtension)
.def("addNameFilter", static_cast<void (FilePatternProperty::*)(std::string)>(
&FilePatternProperty::addNameFilter))
.def("addNameFilter", static_cast<void (FilePatternProperty::*)(FileExtension)>(
&FilePatternProperty::addNameFilter))
.def("clearNameFilters", &FilePatternProperty::clearNameFilters);
PyPropertyClass<BoolProperty, Property> boolProperty(m, "BoolProperty");
boolProperty
.def(py::init([](const std::string& identifier, const std::string& displayName, bool value,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new BoolProperty(identifier, displayName, value, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("value") = false,
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def("__bool__", &BoolProperty::get);
pyTemplateProperty<bool, BoolProperty>(boolProperty);
PyPropertyClass<ButtonProperty, Property>(m, "ButtonProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new ButtonProperty(identifier, displayName, invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def(py::init([](const std::string& identifier, const std::string& displayName,
std::function<void()> action, InvalidationLevel invalidationLevel,
PropertySemantics semantics) {
return new ButtonProperty(identifier, displayName, action, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("action"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def("press", &ButtonProperty::pressButton);
py::class_<ButtonGroupProperty::Button>(m, "ButtonGroupPropertyButton")
.def(py::init<std::optional<std::string>, std::optional<std::string>,
std::optional<std::string>, std::function<void()>>());
PyPropertyClass<ButtonGroupProperty, Property>(m, "ButtonGroupProperty")
.def(py::init([](const std::string& identifier, const std::string& displayName,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new ButtonGroupProperty(identifier, displayName, invalidationLevel,
semantics);
}),
py::arg("identifier"), py::arg("displayName"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def(py::init([](const std::string& identifier, const std::string& displayName,
std::vector<ButtonGroupProperty::Button> buttons,
InvalidationLevel invalidationLevel, PropertySemantics semantics) {
return new ButtonGroupProperty(identifier, displayName, std::move(buttons),
invalidationLevel, semantics);
}),
py::arg("identifier"), py::arg("displayName"), py::arg("buttons"),
py::arg("invalidationLevel") = InvalidationLevel::InvalidOutput,
py::arg("semantics") = PropertySemantics::Default)
.def("press", &ButtonGroupProperty::pressButton);
}
} // namespace inviwo
|
#include "lua.h"
#include "../../include/lua/lua.hpp"
#include "../assets/assets.h"
#include "../assets/scene.h"
#include "../gameobject/gameobject.h"
#include "../gameobject/geometry.h"
#include "../gameobject/material.h"
#include "../gameobject/camera.h"
#include "../gameobject/rigidbody.h"
#include "../colliders/boxcollider.h"
#include "../gameobject/particle_system.h"
#include "../gameobject/script.h"
#include "../controller/camera_rts.h"
#include "../controller/camera_facing.h"
#include "../geometry/mesh.h"
#include "../gameobject/uipanel.h"
#include <sstream>
#include <QTime>
namespace LuaLib {
/// Print variables
int Print(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
std::stringstream ss;
for (int i = 1; i <= argc; i++) {
switch (lua_type(L, i)) {
case LUA_TNIL:
ss << "nil";
break;
case LUA_TBOOLEAN:
ss << (lua_toboolean((lua_State *)state, i) ? "true" : "false");
break;
case LUA_TNUMBER:
ss << (float)lua_tonumber((lua_State *)state, i);
break;
case LUA_TSTRING:
ss << "\"" << lua_tostring((lua_State *)state, i) << "\"";
break;
case LUA_TTABLE:
ss << "array";
break;
case LUA_TFUNCTION:
ss << "function";
break;
case LUA_TLIGHTUSERDATA:
ss << (void *)lua_topointer((lua_State *)state, i);
break;
default:
ss << "unknow";
}
if (i < argc)
ss << " ";
}
qInfo() << ss.str().c_str();
return 0;
}
/// Split a string
int Split(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
std::string str = lua_tostring(L, 1);
char sep = ' ';
if (argc > 1)
sep = lua_tostring(L, 2)[0];
std::string elem;
int k = 0;
for (unsigned int i = 0, sz = (unsigned int)str.size(); i < sz; i++) {
if (str[i] != sep)
elem += str[i];
else {
k++;
lua_pushstring(L, elem.c_str());
elem.clear();
}
}
lua_pushstring(L, elem.c_str());
return k + 1;
}
/// Get a prefab by name
///
/// Parameters :
/// - prefab asset name
///
/// Return prefab pointer if success and nil otherwise
int GetPrefab(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
GameObject * prefab = asset->getData<GameObject>();
if (prefab != 0)
lua_pushlightuserdata(L, (void *)prefab);
else
lua_pushnil(L);
return 1;
}
/// Get time in seconds
int GetTime(void * state) {
lua_pushnumber((lua_State *)state, (float)Scene::startedTime.msecsTo(QTime::currentTime()) * 0.001f);
return 1;
}
////////////////////////
////// GAMEOBJECT //////
////////////////////////
/// Add a component to an object
///
/// Parameters :
/// - asset name / gameobject pointer
/// - component name
///
/// Return asset pointer if success and nil otherwise
int GameObject_AddComponent(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>()) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
std::string comp = lua_tostring(L, 2);
void * result = 0;
if (comp == "Transform")
result = (void *)gm->addComponent<Transform>();
else if (comp == "Geometry")
result = (void *)gm->addComponent<Geometry>();
else if (comp == "Material")
result = (void *)gm->addComponent<Material>();
else if (comp == "Camera")
result = (void *)gm->addComponent<Camera>();
else if (comp == "CameraRTSController")
result = (void *)gm->addComponent<CameraRTSController>();
else if (comp == "CameraFacingController")
result = (void *)gm->addComponent<CameraFacingController>();
else if (comp == "Rigidbody")
result = (void *)gm->addComponent<Rigidbody>();
else if (comp == "BoxCollider")
result = (void *)gm->addComponent<BoxCollider>();
else if (comp == "ParticleSystem")
result = (void *)gm->addComponent<ParticleSystem>();
else if (comp == "UIPanel")
result = (void *)gm->addComponent<UIPanel>();
else if (comp == "Script")
result = (void *)gm->addComponent<ScriptComponent>();
else {
Asset * scpt = Asset::Find(comp.c_str());
if (scpt == 0 || scpt->getData<std::string>() == 0) {
lua_pushnil(L);
return 1;
}
result = (void *)gm->addComponent<ScriptComponent>();
if (((ScriptComponent *)result)->assign(comp.c_str())) {
lua_pushnil(L);
return 1;
}
}
if (result == 0)
lua_pushnil(L);
else
lua_pushlightuserdata(L, result);
return 1;
}
/// Get a component of an object
///
/// Parameters :
/// - asset name / gameobject pointer
/// - component name
///
/// Return asset pointer if success and nil otherwise
int GameObject_GetComponent(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>()) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
std::string comp = lua_tostring(L, 2);
void * result = 0;
if (comp == "Transform")
result = (void *)gm->getComponent<Transform>();
else if (comp == "Geometry")
result = (void *)gm->getComponent<Geometry>();
else if (comp == "Material")
result = (void *)gm->getComponent<Material>();
else if (comp == "Camera")
result = (void *)gm->getComponent<Camera>();
else if (comp == "RTSCameraController")
result = (void *)gm->getComponent<CameraRTSController>();
else if (comp == "CameraFacingController")
result = (void *)gm->getComponent<CameraFacingController>();
else if (comp == "Rigidbody")
result = (void *)gm->getComponent<Rigidbody>();
else if (comp == "BoxCollider")
result = (void *)gm->getComponent<BoxCollider>();
else if (comp == "ParticleSystem")
result = (void *)gm->getComponent<ParticleSystem>();
else if (comp == "UIPanel")
result = (void *)gm->getComponent<UIPanel>();
else if (comp == "Script")
result = (void *)gm->getComponent<ScriptComponent>();
else {
for (auto scpt : gm->getComponents<ScriptComponent>()) {
if (scpt->getScriptName() == comp) {
result = (void *)scpt;
break;
}
}
}
if (result == 0)
lua_pushnil(L);
else
lua_pushlightuserdata(L, result);
return 1;
}
/// Instanciate a gameobject
///
/// Parameters :
/// - prefab asset name / prefab pointer
/// - parent gameobject pointer (optional)
///
/// Return gameobject pointer if success and nil otherwise
int GameObject_Instanciate(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
GameObject * result = new GameObject();
gm->clone(result);
if (argc < 2)
Scene::main->addGameObject(result);
else
((GameObject *)lua_topointer(L, 2))->addChild(result);
lua_pushlightuserdata(L, (void *)result);
return 1;
}
/// Copy a gameobject
///
/// Parameters :
/// - gameobject pointer to assign
/// - gameobject pointer to copy
///
/// Return true if success and false otherwise
int GameObject_Copy(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
GameObject * gm_src = (GameObject *)lua_topointer(L, 1);
GameObject * gm_dest = (GameObject *)lua_topointer(L, 2);
gm_dest->clone(gm_src);
lua_pushboolean(L, 1);
return 1;
}
/// Find a gameobject on scene root
///
/// Parameters :
/// - gameobject name
///
/// Return gameobject pointer if success and nil otherwise
int GameObject_Find(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = Scene::main->findGameObject(lua_tostring(L, 1));
if (gm == 0)
lua_pushnil(L);
else
lua_pushlightuserdata(L, (void *)gm);
return 1;
}
/// Add a child in a gameobject
///
/// Parameters :
/// - gameobject pointer
/// - gameobject child pointer / gameobject name
///
/// Return gameobject child pointer if success and nil otherwise
int GameObject_AddChild(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
GameObject * gm = (GameObject *)lua_topointer(L, 1);
if (gm == 0) {
lua_pushnil(L);
return 1;
}
GameObject * result = 0;
if (lua_type(L, 2) == LUA_TSTRING) {
result = new GameObject();
result->name = lua_tostring(L, 2);
gm->addChild(result);
}
else {
result = (GameObject *)lua_topointer(L, 2);
gm->addChild(result);
}
lua_pushlightuserdata(L, (void *)result);
return 1;
}
/// Get a child in a gameobject
///
/// Parameters :
/// - gameobject pointer
/// - gameobject name
///
/// Return gameobject child pointer if success and nil otherwise
int GameObject_GetChild(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
GameObject * gm = (GameObject *)lua_topointer(L, 1);
if (gm == 0) {
lua_pushnil(L);
return 1;
}
GameObject * result = gm->getChild(lua_tostring(L, 2));
lua_pushlightuserdata(L, (void *)result);
return 1;
}
/// Get children in a gameobject
///
/// Parameters :
/// - gameobject pointer
/// - gameobjects name
///
/// Return gameobject children pointers if success and nil otherwise
int GameObject_GetChildren(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
GameObject * gm = (GameObject *)lua_topointer(L, 1);
if (gm == 0) {
lua_pushnil(L);
return 1;
}
std::vector<GameObject *> result = gm->getChildren(lua_tostring(L, 2));
for (int i = 0; i < result.size(); i++)
lua_pushlightuserdata(L, (void *)result[i]);
return (int)result.size();
}
/// Get gameobject name
///
/// Parameters :
/// - asset name / gameobject pointer
///
/// Return name if success and nil otherwise
int GameObject_GetName(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
lua_pushstring(L, gm->name.c_str());
return 1;
}
/// Set gameobject name
///
/// Parameters :
/// - asset name / gameobject pointer
/// - name
///
/// Return true if success and false otherwise
int GameObject_SetName(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
gm->name = lua_tostring(L, 2);
lua_pushboolean(L, 1);
return 1;
}
/// Get gameobject position
///
/// Parameters :
/// - asset name / gameobject pointer
///
/// Return x, y, z if success and nil otherwise
int GameObject_GetPosition(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
QVector3D pos = gm->transform().position();
lua_pushnumber(L, pos.x());
lua_pushnumber(L, pos.y());
lua_pushnumber(L, pos.z());
return 3;
}
/// Get gameobject rotation
///
/// Parameters :
/// - asset name / gameobject pointer
///
/// Return x, y, z if success and nil otherwise
int GameObject_GetRotation(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
QQuaternion rot = gm->transform().rotation();
lua_pushnumber(L, rot.x());
lua_pushnumber(L, rot.y());
lua_pushnumber(L, rot.z());
return 3;
}
/// Get gameobject scale
///
/// Parameters :
/// - asset name / gameobject pointer
///
/// Return x, y, z if success and nil otherwise
int GameObject_GetScale(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
QVector3D scale = gm->transform().scale();
lua_pushnumber(L, scale.x());
lua_pushnumber(L, scale.y());
lua_pushnumber(L, scale.z());
return 3;
}
/// Set gameobject position
///
/// Parameters :
/// - asset name / gameobject pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int GameObject_SetPosition(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
gm->transform().setPosition(QVector3D(lua_tonumber(L, 2), lua_tonumber(L, 3), lua_tonumber(L, 4)));
lua_pushboolean(L, 1);
return 1;
}
/// Set gameobject rotation
///
/// Parameters :
/// - asset name / gameobject pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int GameObject_SetRotation(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
gm->transform().setRotation(QQuaternion::fromEulerAngles(QVector3D(lua_tonumber(L, 2), lua_tonumber(L, 3), lua_tonumber(L, 4))));
lua_pushboolean(L, 1);
return 1;
}
/// Set gameobject scale
///
/// Parameters :
/// - asset name / gameobject pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int GameObject_SetScale(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
GameObject * gm = 0;
if (lua_type(L, 1) == LUA_TSTRING) {
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<GameObject>() == 0) {
lua_pushnil(L);
return 1;
}
gm = asset->getData<GameObject>();
}
else
gm = (GameObject *)lua_topointer(L, 1);
gm->transform().setScale(QVector3D(lua_tonumber(L, 2), lua_tonumber(L, 3), lua_tonumber(L, 4)));
lua_pushboolean(L, 1);
return 1;
}
///////////////////////
////// COMPONENT //////
///////////////////////
/// Get gameobject of a component
///
/// Parameters :
/// - component pointer
///
/// Return gameobject if success and nil otherwise
int Component_GetGameObject(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
Component * comp = (Component *)lua_topointer(L, 1);
lua_pushlightuserdata(L, (void *)&comp->gameObject());
return 1;
}
//////////////////////
////// GEOMETRY //////
//////////////////////
/// Assign mesh geometry
///
/// Parameters :
/// - geometry pointer
/// - mesh asset name
///
/// Return true if success and false otherwise
int Geometry_Assign(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
Geometry * geometry = (Geometry *)lua_topointer(L, 1);
if (geometry == 0 || !geometry->assignMesh(lua_tostring(L, 2)))
lua_pushboolean(L, 0);
else
lua_pushboolean(L, 1);
return 1;
}
//////////////////////
////// MATERIAL //////
//////////////////////
/// Assign texture material
///
/// Parameters :
/// - material pointer
/// - texture asset name
///
/// Return true if success and false otherwise
int Material_Assign(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
Material * mat = (Material *)lua_topointer(L, 1);
if (mat == 0 || !mat->assignTexture(lua_tostring(L, 2)))
lua_pushboolean(L, 0);
else
lua_pushboolean(L, 1);
return 1;
}
////////////////////
////// CAMERA //////
////////////////////
/// Get main camera
int Camera_GetMain(void * state) {
lua_pushlightuserdata((lua_State *)state, (void *)Camera::mainCamera);
return 1;
}
/// Get camera ray by screen coordinates
///
/// Parameters :
/// - x coordinate
/// - y coordinate
///
/// Return x, y, z, dir_x, dir_y, dir_z if success and nil otherwise
int Camera_GetRay(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
Ray ray = Camera::mainCamera->getRay((int)lua_tonumber(L, 1), (int)lua_tonumber(L, 2));
lua_pushnumber(L, ray.origin.x());
lua_pushnumber(L, ray.origin.y());
lua_pushnumber(L, ray.origin.z());
lua_pushnumber(L, ray.direction.x());
lua_pushnumber(L, ray.direction.y());
lua_pushnumber(L, ray.direction.z());
return 6;
}
///////////////////////
////// RIGIDBODY //////
///////////////////////
/// Get rigidbody gravity
///
/// Parameters :
/// - rigidbody pointer
///
/// Return x, y, z if success and nil otherwise
int Rigidbody_GetGravity(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
Rigidbody * rb = (Rigidbody *)lua_topointer(L, 1);
if (rb == 0)
lua_pushnil(L);
lua_pushnumber(L, rb->gravity.x());
lua_pushnumber(L, rb->gravity.y());
lua_pushnumber(L, rb->gravity.z());
return 3;
}
/// Set rigidbody gravity
///
/// Parameters :
/// - rigidbody pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int Rigidbody_SetGravity(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
Rigidbody * rb = (Rigidbody *)lua_topointer(L, 1);
if (rb == 0)
lua_pushboolean(L, 0);
rb->gravity.setX(lua_tonumber(L, 2));
rb->gravity.setY(lua_tonumber(L, 3));
rb->gravity.setZ(lua_tonumber(L, 4));
lua_pushboolean(L, 1);
return 1;
}
//////////////////////////
////// BOX COLLIDER //////
//////////////////////////
/// Get box collider offset
///
/// Parameters :
/// - box collider pointer
///
/// Return x, y, z if success and nil otherwise
int BoxCollider_GetOffset(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
BoxCollider * bc = (BoxCollider *)lua_topointer(L, 1);
if (bc == 0)
lua_pushnil(L);
lua_pushnumber(L, bc->offset.x());
lua_pushnumber(L, bc->offset.y());
lua_pushnumber(L, bc->offset.z());
return 3;
}
/// Set box collider offset
///
/// Parameters :
/// - box collider pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int BoxCollider_SetOffset(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
BoxCollider * bc = (BoxCollider *)lua_topointer(L, 1);
if (bc == 0)
lua_pushboolean(L, 0);
bc->offset.setX(lua_tonumber(L, 2));
bc->offset.setY(lua_tonumber(L, 3));
bc->offset.setZ(lua_tonumber(L, 4));
lua_pushboolean(L, 1);
return 1;
}
/// Get box collider size
///
/// Parameters :
/// - box collider pointer
///
/// Return x, y, z if success and nil otherwise
int BoxCollider_GetSize(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
BoxCollider * bc = (BoxCollider *)lua_topointer(L, 1);
if (bc == 0)
lua_pushnil(L);
lua_pushnumber(L, bc->size.x());
lua_pushnumber(L, bc->size.y());
lua_pushnumber(L, bc->size.z());
return 3;
}
/// Set box collider size
///
/// Parameters :
/// - box collider pointer
/// - x coordinate
/// - y coordinate
/// - z coordinate
///
/// Return true if success and false otherwise
int BoxCollider_SetSize(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 4) {
lua_pushboolean(L, 0);
return 1;
}
BoxCollider * bc = (BoxCollider *)lua_topointer(L, 1);
if (bc == 0)
lua_pushboolean(L, 0);
bc->size.setX(lua_tonumber(L, 2));
bc->size.setY(lua_tonumber(L, 3));
bc->size.setZ(lua_tonumber(L, 4));
lua_pushboolean(L, 1);
return 1;
}
/////////////////////////////
////// PARTICLE SYSTEM //////
/////////////////////////////
/// Assign particule emitter
///
/// Parameters :
/// - material pointer
/// - mesh asset name
///
/// Return true if success and false otherwise
int ParticleSystem_AssignEmitter(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0 || !ps->assignEmitter(lua_tostring(L, 2)))
lua_pushboolean(L, 0);
else
lua_pushboolean(L, 1);
return 1;
}
/// Assign particule prefab
///
/// Parameters :
/// - material pointer
/// - prefab asset name
///
/// Return true if success and false otherwise
int ParticleSystem_AssignParticule(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0 || !ps->assignParticle(lua_tostring(L, 2)))
lua_pushboolean(L, 0);
else
lua_pushboolean(L, 1);
return 1;
}
/// Get particule duration
///
/// Parameters :
/// - particule system pointer
///
/// Return duration if success and nil otherwise
int ParticleSystem_GetParticuleDuration(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0)
lua_pushnil(L);
lua_pushnumber(L, ps->ParticleDuration);
return 1;
}
/// Get particule frequency
///
/// Parameters :
/// - particule system pointer
///
/// Return frequency if success and nil otherwise
int ParticleSystem_GetParticuleFrequency(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushnil(L);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0)
lua_pushnil(L);
lua_pushnumber(L, ps->ParticleFrequency);
return 1;
}
/// Set particule duration
///
/// Parameters :
/// - particule system pointer
/// - duration
///
/// Return true if success and false otherwise
int ParticleSystem_SetParticuleDuration(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0)
lua_pushboolean(L, 0);
ps->ParticleDuration = lua_tonumber(L, 2);
lua_pushboolean(L, 1);
return 1;
}
/// Set particule frequency
///
/// Parameters :
/// - particule system pointer
/// - frequency
///
/// Return true if success and false otherwise
int ParticleSystem_SetParticuleFrequency(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushboolean(L, 0);
return 1;
}
ParticleSystem * ps = (ParticleSystem *)lua_topointer(L, 1);
if (ps == 0)
lua_pushboolean(L, 0);
ps->ParticleFrequency = lua_tonumber(L, 2);
lua_pushboolean(L, 1);
return 1;
}
////////////////////
////// SCRIPT //////
////////////////////
/// Get script variable
///
/// Parameters :
/// - script pointer
/// - variable name
///
/// Return variable if success and nil otherwise
int Script_GetVariable(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
ScriptComponent * scpt = (ScriptComponent *)lua_topointer(L, 1);
if (scpt == 0) {
lua_pushnil(L);
return 1;
}
LuaScript lscpt_dest(state);
lscpt_dest.pushVariable(scpt->script.getVariable(lua_tostring(L, 2)));
return 1;
}
/// Get script variable
///
/// Parameters :
/// - script pointer
/// - function name
///
/// Return function return if success and nil otherwise
int Script_CallFunction(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 2) {
lua_pushnil(L);
return 1;
}
ScriptComponent * scpt = (ScriptComponent *)lua_topointer(L, 1);
if (scpt == 0) {
lua_pushnil(L);
return 1;
}
LuaScript lscpt_dest(state);
std::vector<LuaScript::Variable> args;
for (int i = 0, sz = argc - 2; i < sz; i++)
args.push_back(lscpt_dest.getVariable(i + 3));
std::vector<LuaScript::Variable> results = scpt->script.callFunction(lua_tostring(L, 2), args.data(), argc - 2);
for (int i = 0; i < results.size(); i++)
lscpt_dest.pushVariable(results[i]);
return (int)results.size();
}
////////////////////
////// UIPANEL //////
////////////////////
/// Set UIPanel position
///
/// Parameters :
/// - ui panel pointer
/// - x coordinate
/// - y coordinate
///
/// Return true if success and false otherwise
int UIPanel_SetPosition(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 3) {
lua_pushboolean(L, 0);
return 1;
}
UIPanel * panel = (UIPanel *)lua_topointer(L, 1);
panel->position.setX(lua_tonumber(L, 2));
panel->position.setY(lua_tonumber(L, 3));
lua_pushboolean(L, 1);
return 1;
}
////////////////////
////// SCRIPT //////
////////////////////
/// Send raycast and return first touched object
///
/// Parameters :
/// - x coordinate
/// - y coordinate
/// - z coordinate
/// - dir_x coordinate
/// - dir_y coordinate
/// - dir_z coordinate
///
/// Return (collider, x, y, z) * NB_COLLIDED if success and nil otherwise
int Physics_Raycast(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 6) {
lua_pushnil(L);
return 1;
}
Ray ray(QVector3D(lua_tonumber(L, 1), lua_tonumber(L, 2), lua_tonumber(L, 3)),
QVector3D(lua_tonumber(L, 4), lua_tonumber(L, 5), lua_tonumber(L, 6)));
ray.direction.normalize();
std::vector<std::pair<Collider *, float>> res = Collider::Raycast(ray);
for (unsigned int i = 0; i < res.size(); i++) {
lua_pushlightuserdata(L, (void *)&res[i].first->gameObject());
QVector3D point = ray.origin + ray.direction * res[i].second;
lua_pushnumber(L, point.x());
lua_pushnumber(L, point.y());
lua_pushnumber(L, point.z());
}
return (int)(res.size() * 4);
}
///////////////////
////// SOUND //////
///////////////////
/// Play sound
///
/// Parameters :
/// - sound asset name
/// - volume (optional)
///
/// Return true if success and false otherwise
int Sound_Play(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushboolean(L, 0);
return 1;
}
Asset * asset = Asset::Find(lua_tostring(L, 1));
if (asset == 0 || asset->getData<QUrl>() == 0) {
lua_pushboolean(L, 0);
return 1;
}
Scene::player.setMedia(*asset->getData<QUrl>());
if (argc > 1)
Scene::player.setVolume(lua_tointeger(L, 2));
Scene::player.play();
lua_pushboolean(L, 1);
return 1;
}
/// Stop sound
int Sound_Stop(void * state) {
Scene::player.stop();
return 0;
}
/// Get volume
int Sound_GetVolume(void * state) {
lua_tonumber((lua_State *)state, Scene::player.volume());
return 1;
}
/// Set volume
///
/// Parameter :
/// - volume
///
/// Return true if success and false otherwise
int Sound_SetVolume(void * state) {
lua_State * L = (lua_State *)state;
int argc = lua_gettop(L);
if (argc < 1) {
lua_pushboolean(L, 0);
return 1;
}
Scene::player.setVolume(lua_tointeger(L, 1));
lua_pushboolean(L, 1);
return 1;
}
}
void LuaScript::loadLibScript() {
lua_State * L = (lua_State *)state;
lua_pushcfunction(L, (lua_CFunction)LuaLib::Print);
lua_setglobal(L, "print");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Split);
lua_setglobal(L, "split");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GetPrefab);
lua_setglobal(L, "GetPrefab");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GetTime);
lua_setglobal(L, "GetTime");
/// GAMEOBJECT ///
lua_createtable(L, 0, 0);
int id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_AddComponent);
lua_setfield(L, id, "AddComponent");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetComponent);
lua_setfield(L, id, "GetComponent");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_Instanciate);
lua_setfield(L, id, "Instanciate");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_Copy);
lua_setfield(L, id, "Copy");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_Find);
lua_setfield(L, id, "Find");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_AddChild);
lua_setfield(L, id, "AddChild");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetChild);
lua_setfield(L, id, "GetChild");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetChildren);
lua_setfield(L, id, "GetChildren");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetName);
lua_setfield(L, id, "GetName");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_SetName);
lua_setfield(L, id, "SetName");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetPosition);
lua_setfield(L, id, "GetPosition");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetRotation);
lua_setfield(L, id, "GetRotation");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_GetScale);
lua_setfield(L, id, "GetScale");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_SetPosition);
lua_setfield(L, id, "SetPosition");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_SetRotation);
lua_setfield(L, id, "SetRotation");
lua_pushcfunction(L, (lua_CFunction)LuaLib::GameObject_SetScale);
lua_setfield(L, id, "SetScale");
lua_setglobal(L, "GameObject");
/// COMPONENT ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Component_GetGameObject);
lua_setfield(L, id, "GetGameObject");
lua_setglobal(L, "Component");
/// GEOMETRY ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Geometry_Assign);
lua_setfield(L, id, "Assign");
lua_setglobal(L, "Geometry");
/// MATERIAl ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Material_Assign);
lua_setfield(L, id, "Assign");
lua_setglobal(L, "Material");
/// CAMERA ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Camera_GetMain);
lua_setfield(L, id, "GetMain");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Camera_GetRay);
lua_setfield(L, id, "GetRay");
lua_setglobal(L, "Camera");
/// RIGIDBODY ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Rigidbody_GetGravity);
lua_setfield(L, id, "GetGravity");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Rigidbody_SetGravity);
lua_setfield(L, id, "SetGravity");
lua_setglobal(L, "Rigidbody");
/// BOX COLLIDER ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::BoxCollider_GetOffset);
lua_setfield(L, id, "GetOffset");
lua_pushcfunction(L, (lua_CFunction)LuaLib::BoxCollider_SetOffset);
lua_setfield(L, id, "SetOffset");
lua_pushcfunction(L, (lua_CFunction)LuaLib::BoxCollider_GetSize);
lua_setfield(L, id, "GetSize");
lua_pushcfunction(L, (lua_CFunction)LuaLib::BoxCollider_SetSize);
lua_setfield(L, id, "SetSize");
lua_setglobal(L, "BoxCollider");
/// PARTICULE SYSTEM ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_AssignEmitter);
lua_setfield(L, id, "AssignEmitter");
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_AssignParticule);
lua_setfield(L, id, "AssignParticle");
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_GetParticuleDuration);
lua_setfield(L, id, "GetParticleDuration");
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_GetParticuleFrequency);
lua_setfield(L, id, "GetParticleFrequency");
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_SetParticuleDuration);
lua_setfield(L, id, "SetParticleDuration");
lua_pushcfunction(L, (lua_CFunction)LuaLib::ParticleSystem_SetParticuleFrequency);
lua_setfield(L, id, "SetParticleFrequency");
lua_setglobal(L, "ParticleSystem");
/// SCRIPT ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Script_GetVariable);
lua_setfield(L, id, "GetVariable");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Script_CallFunction);
lua_setfield(L, id, "CallFunction");
lua_setglobal(L, "Script");
/// PHYSICS ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Physics_Raycast);
lua_setfield(L, id, "Raycast");
lua_setglobal(L, "Physics");
/// UIPANEL ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::UIPanel_SetPosition);
lua_setfield(L, id, "SetPosition");
lua_setglobal(L, "UIPanel");
/// SOUND ///
lua_createtable(L, 0, 0);
id = lua_gettop(L);
lua_pushcfunction(L, (lua_CFunction)LuaLib::Sound_Play);
lua_setfield(L, id, "Play");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Sound_Stop);
lua_setfield(L, id, "Stop");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Sound_GetVolume);
lua_setfield(L, id, "GetVolume");
lua_pushcfunction(L, (lua_CFunction)LuaLib::Sound_SetVolume);
lua_setfield(L, id, "SetVolume");
lua_setglobal(L, "Sound");
}
|
// Copyright 2022 The Google Research Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "scann/hashes/internal/asymmetric_hashing_impl.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <utility>
#include "absl/random/distributions.h"
#include "scann/data_format/datapoint.h"
#include "scann/distance_measures/one_to_many/one_to_many.h"
#include "scann/hashes/internal/asymmetric_hashing_postprocess.h"
#include "scann/oss_wrappers/scann_random.h"
#include "scann/oss_wrappers/scann_status.h"
#include "scann/projection/chunking_projection.h"
#include "scann/utils/common.h"
#include "scann/utils/gmm_utils.h"
#include "scann/utils/noise_shaping_utils.h"
#include "scann/utils/top_n_amortized_constant.h"
#include "scann/utils/types.h"
namespace research_scann {
namespace asymmetric_hashing_internal {
class ParallelPerpendicularDistance : public DistanceMeasure {
public:
SCANN_DECLARE_DISTANCE_MEASURE_VIRTUAL_METHODS(NOT_SPECIALLY_OPTIMIZED);
void set_parallel_cost_multiplier(double x) { parallel_cost_multiplier_ = x; }
private:
template <typename T>
SCANN_INLINE double GetDistanceDenseImpl(
const DatapointPtr<T>& x_dptr, const DatapointPtr<T>& y_dptr) const {
DCHECK(x_dptr.IsDense());
DCHECK(y_dptr.IsDense());
auto x = x_dptr.values_slice();
auto y = y_dptr.values_slice();
DCHECK_EQ(x.size(), y.size());
double parallel_error = 0.0;
double residual_squared_norm = 0.0;
for (size_t i : IndicesOf(x)) {
const double residual_coord =
static_cast<double>(y[i]) - static_cast<double>(x[i]);
parallel_error += residual_coord * x[i];
residual_squared_norm += Square(residual_coord);
}
parallel_error = Square(parallel_error);
const double perpendicular_error = residual_squared_norm - parallel_error;
return parallel_cost_multiplier_ * parallel_error + perpendicular_error;
}
template <typename T>
SCANN_INLINE double GetDistanceSparseImpl(
const DatapointPtr<T>& x_dptr, const DatapointPtr<T>& y_dptr) const {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template <typename T>
SCANN_INLINE double GetDistanceHybridImpl(
const DatapointPtr<T>& x_dptr, const DatapointPtr<T>& y_dptr) const {
LOG(FATAL) << "NOT IMPLEMENTED";
}
double parallel_cost_multiplier_ = 1.0;
};
SCANN_DEFINE_DISTANCE_MEASURE_VIRTUAL_METHODS(ParallelPerpendicularDistance,
numeric_limits<size_t>::max());
string_view ParallelPerpendicularDistance::name() const {
return "ParallelPerpendicularDistance";
}
namespace {
double ComputeNormBiasCorrection(const DenseDataset<double>& db,
DatapointPtr<double> center,
ConstSpan<DatapointIndex> cluster_members) {
double mean_norm = 0.0;
for (DatapointIndex idx : cluster_members) {
mean_norm += std::sqrt(SquaredL2Norm(db[idx]));
}
mean_norm /= cluster_members.size();
const double center_norm = std::sqrt(SquaredL2Norm(center));
return (center_norm == 0.0) ? 1.0 : (mean_norm / center_norm);
}
} // namespace
template <typename T>
StatusOr<vector<DenseDataset<double>>> AhImpl<T>::TrainAsymmetricHashing(
const TypedDataset<T>& dataset, const TrainingOptionsT& opts,
shared_ptr<ThreadPool> pool) {
if (dataset.empty()) {
return InvalidArgumentError("Cannot train AH on an empty dataset.");
}
ChunkedDatapoint<double> chunked_vec;
if (opts.preprocessing_function()) {
TF_ASSIGN_OR_RETURN(Datapoint<T> preprocessed,
opts.preprocessing_function()(dataset[0]));
SCANN_RETURN_IF_ERROR(
opts.projector()->ProjectInput(preprocessed.ToPtr(), &chunked_vec));
} else {
SCANN_RETURN_IF_ERROR(
opts.projector()->ProjectInput(dataset[0], &chunked_vec));
}
int32_t num_blocks = chunked_vec.size();
vector<DenseDataset<double>> chunked_dataset(num_blocks);
const float sampling_fraction =
opts.config().has_expected_sample_size()
? std::min(1.0,
static_cast<double>(opts.config().expected_sample_size()) /
static_cast<double>(dataset.size()))
: opts.config().sampling_fraction();
if (sampling_fraction == 1.0) {
for (int32_t i = 0; i < num_blocks; ++i) {
DenseDataset<double>& ds = chunked_dataset[i];
ds.set_dimensionality(chunked_vec[i].dimensionality());
ds.Reserve(dataset.size());
}
}
MTRandom rng(kDeterministicSeed * (opts.config().sampling_seed() + 1));
vector<DatapointIndex> sample;
for (DatapointIndex i = 0; i < dataset.size(); ++i) {
if (absl::Uniform<double>(rng, 0, 1.0) < sampling_fraction) {
sample.push_back(i);
}
}
if (sample.size() > opts.config().max_sample_size()) {
std::shuffle(sample.begin(), sample.end(), rng);
sample.resize(opts.config().max_sample_size());
std::sort(sample.begin(), sample.end());
}
if (sample.size() < opts.config().num_clusters_per_block()) {
return InvalidArgumentError(absl::StrCat(
"Number of clusters per block (",
opts.config().num_clusters_per_block(),
") is greater than asymmetric hashing training data size (",
sample.size(), ")."));
}
auto append_chunked_blocks = [&] {
for (size_t j = 0; j < num_blocks; ++j) {
chunked_dataset[j].AppendOrDie(chunked_vec[j], "");
}
};
if (opts.preprocessing_function()) {
for (DatapointIndex i : sample) {
TF_ASSIGN_OR_RETURN(Datapoint<T> preprocessed,
opts.preprocessing_function()(dataset[i]));
SCANN_RETURN_IF_ERROR(
opts.projector()->ProjectInput(preprocessed.ToPtr(), &chunked_vec));
append_chunked_blocks();
}
} else {
for (DatapointIndex i : sample) {
SCANN_RETURN_IF_ERROR(
opts.projector()->ProjectInput(dataset[i], &chunked_vec));
append_chunked_blocks();
}
}
auto quantization_distance = opts.quantization_distance();
GmmUtils::Options gmm_opts;
gmm_opts.seed = opts.config().clustering_seed();
gmm_opts.max_iterations = opts.config().max_clustering_iterations();
gmm_opts.epsilon = opts.config().clustering_convergence_tolerance();
gmm_opts.parallelization_pool = std::move(pool);
if (!std::isnan(opts.config().noise_shaping_threshold()) &&
opts.config().use_noise_shaped_training()) {
gmm_opts.parallel_cost_multiplier = ComputeParallelCostMultiplier(
opts.config().noise_shaping_threshold(), 1.0, dataset.dimensionality());
auto d = make_shared<ParallelPerpendicularDistance>();
d->set_parallel_cost_multiplier(gmm_opts.parallel_cost_multiplier);
quantization_distance = d;
}
GmmUtils gmm(quantization_distance, gmm_opts);
vector<DenseDataset<double>> all_centers(num_blocks);
for (size_t i : Seq(num_blocks)) {
DenseDataset<double> centers;
vector<vector<DatapointIndex>> subpartitions;
SCANN_RETURN_IF_ERROR(gmm.GenericKmeans(
chunked_dataset[i], opts.config().num_clusters_per_block(), ¢ers,
&subpartitions));
if (opts.config().use_norm_biasing_correction()) {
for (size_t center_idx : IndicesOf(centers)) {
const double norm_bias_correction = ComputeNormBiasCorrection(
chunked_dataset[i], centers[center_idx], subpartitions[center_idx]);
for (double& d : centers.mutable_data(center_idx)) {
d *= norm_bias_correction;
}
}
}
chunked_dataset[i].clear();
chunked_dataset[i].ShrinkToFit();
vector<uint32_t> centers_permutation(centers.size());
std::iota(centers_permutation.begin(), centers_permutation.end(), 0U);
std::sort(centers_permutation.begin(), centers_permutation.end(),
[&subpartitions](uint32_t a, uint32_t b) {
return subpartitions[a].size() > subpartitions[b].size();
});
constexpr size_t kAssumedCacheLineSize = 64;
constexpr size_t kFloatsPerCacheLine =
kAssumedCacheLineSize / sizeof(float);
const uint64_t cache_lines_per_row =
std::max(static_cast<size_t>(1), centers.size() / kFloatsPerCacheLine);
const size_t num_rotate =
((i / 2) % cache_lines_per_row) * kFloatsPerCacheLine;
std::rotate(centers_permutation.begin(),
centers_permutation.begin() + num_rotate,
centers_permutation.end());
if (i & 1) {
std::reverse(centers_permutation.begin(), centers_permutation.end());
}
for (uint32_t j : centers_permutation) {
all_centers[i].AppendOrDie(centers[j], "");
}
}
return std::move(all_centers);
}
template <typename T>
Status AhImpl<T>::IndexDatapoint(const DatapointPtr<T>& input,
const ChunkingProjection<T>& projection,
const DistanceMeasure& quantization_distance,
ConstSpan<DenseDataset<FloatT>> centers,
MutableSpan<uint8_t> result) {
DCHECK(!centers.empty());
ChunkedDatapoint<FloatT> projected;
SCANN_RETURN_IF_ERROR(projection.ProjectInput(input, &projected));
DCHECK_LE(projected.size(), result.size());
vector<float> distances(centers[0].size());
DCHECK_GE(distances.size(), 1);
DCHECK_LE(distances.size(), 256);
for (size_t i = 0; i < projected.size(); ++i) {
DCHECK_EQ(centers[0].size(), centers[i].size());
size_t closest = 0;
const DatapointPtr<FloatT> projected_ptr = projected[i];
const DenseDataset<FloatT>& cur_centers = centers[i];
const size_t centers_size = cur_centers.size();
if (projected_ptr.IsSparse()) {
double closest_distance = numeric_limits<double>::infinity();
for (size_t j = 0; j < centers_size; ++j) {
const double distance = quantization_distance.GetDistanceHybrid(
projected_ptr, cur_centers[j]);
if (ABSL_PREDICT_FALSE(distance < closest_distance)) {
closest_distance = distance;
closest = j;
}
}
} else {
DCHECK_EQ(distances.size(), cur_centers.size());
DenseDistanceOneToMany(quantization_distance, projected_ptr, cur_centers,
MutableSpan<float>(distances));
auto min_it = std::min_element(distances.begin(), distances.end());
closest = min_it - distances.begin();
}
result[i] = closest;
}
return OkStatus();
}
template <typename T>
Status AhImpl<T>::IndexDatapoint(const DatapointPtr<T>& input,
const ChunkingProjection<T>& projection,
const DistanceMeasure& quantization_distance,
ConstSpan<DenseDataset<FloatT>> centers,
Datapoint<uint8_t>* result) {
DatapointIndex result_size = centers.size();
DCHECK_EQ(result_size, projection.num_blocks());
result->clear();
result->mutable_values()->resize(result_size, 0);
return AhImpl<T>::IndexDatapoint(input, projection, quantization_distance,
centers, result->mutable_values_slice());
}
namespace {
template <typename T>
T Square(T x) {
return x * x;
}
double ComputeParallelCostMultiplier(double t, double squared_l2_norm,
DimensionIndex dims) {
const double parallel_cost = Square(t) / squared_l2_norm;
const double perpendicular_cost =
(1.0 - Square(t) / squared_l2_norm) / (dims - 1.0);
return parallel_cost / perpendicular_cost;
}
struct SubspaceResidualStats {
double residual_norm = 0.0;
double parallel_residual_component = 0.0;
};
template <typename T>
SubspaceResidualStats ComputeResidualStatsForCluster(
ConstSpan<T> maybe_residual_dptr, ConstSpan<T> original_dptr,
double inv_norm, ConstSpan<FloatingTypeFor<T>> quantized) {
DCHECK_EQ(maybe_residual_dptr.size(), quantized.size());
const size_t dims = maybe_residual_dptr.size();
SubspaceResidualStats result;
for (size_t i : Seq(dims)) {
const double residual_coordinate =
static_cast<double>(maybe_residual_dptr[i]) -
static_cast<double>(quantized[i]);
result.residual_norm += Square(residual_coordinate);
result.parallel_residual_component +=
residual_coordinate * original_dptr[i] * inv_norm;
}
return result;
}
template <typename T>
StatusOr<vector<std::vector<SubspaceResidualStats>>> ComputeResidualStats(
DatapointPtr<T> maybe_residual_dptr, DatapointPtr<T> original_dptr,
ConstSpan<DenseDataset<FloatingTypeFor<T>>> centers,
const ChunkingProjection<T>& projection) {
const size_t num_subspaces = centers.size();
DCHECK_GE(num_subspaces, 1);
vector<std::vector<SubspaceResidualStats>> result(num_subspaces);
vector<std::vector<SubspaceResidualStats>> residual_stats(num_subspaces);
const size_t num_clusters_per_block = centers[0].size();
using FloatT = FloatingTypeFor<T>;
ChunkedDatapoint<FloatT> maybe_residual_dptr_chunked;
ChunkedDatapoint<FloatT> original_dptr_chunked;
SCANN_RETURN_IF_ERROR(projection.ProjectInput(maybe_residual_dptr,
&maybe_residual_dptr_chunked));
SCANN_RETURN_IF_ERROR(
projection.ProjectInput(original_dptr, &original_dptr_chunked));
SCANN_RET_CHECK_EQ(maybe_residual_dptr_chunked.size(), num_subspaces);
SCANN_RET_CHECK_EQ(original_dptr_chunked.size(), num_subspaces);
double chunked_norm = 0.0;
for (size_t subspace_idx : Seq(num_subspaces)) {
for (FloatT x : original_dptr_chunked[subspace_idx].values_slice()) {
chunked_norm += Square<double>(x);
}
}
chunked_norm = std::sqrt(chunked_norm);
double inverse_chunked_norm = 1.0 / chunked_norm;
for (size_t subspace_idx : Seq(num_subspaces)) {
auto& cur_subspace_residual_stats = residual_stats[subspace_idx];
cur_subspace_residual_stats.resize(num_clusters_per_block);
const DenseDataset<FloatingTypeFor<T>>& cur_subspace_centers =
centers[subspace_idx];
for (size_t cluster_idx : Seq(num_clusters_per_block)) {
ConstSpan<FloatingTypeFor<T>> center =
cur_subspace_centers[cluster_idx].values_slice();
ConstSpan<FloatT> maybe_residual_dptr_span =
maybe_residual_dptr_chunked[subspace_idx].values_slice();
ConstSpan<FloatT> original_dptr_span =
original_dptr_chunked[subspace_idx].values_slice();
cur_subspace_residual_stats[cluster_idx] = ComputeResidualStatsForCluster(
maybe_residual_dptr_span, original_dptr_span, inverse_chunked_norm,
center);
}
}
return residual_stats;
}
void InitializeToMinResidualNorm(
ConstSpan<std::vector<SubspaceResidualStats>> residual_stats,
MutableSpan<uint8_t> result) {
DCHECK_EQ(result.size(), residual_stats.size());
for (size_t subspace_idx : IndicesOf(residual_stats)) {
auto it = std::min_element(
residual_stats[subspace_idx].begin(),
residual_stats[subspace_idx].end(),
[](const SubspaceResidualStats& a, const SubspaceResidualStats& b) {
return a.residual_norm < b.residual_norm;
});
result[subspace_idx] = it - residual_stats[subspace_idx].begin();
}
}
double ComputeParallelResidualComponent(
ConstSpan<uint8_t> quantized,
ConstSpan<std::vector<SubspaceResidualStats>> residual_stats) {
double result = 0.0;
for (size_t subspace_idx : IndicesOf(quantized)) {
const uint8_t cluster_idx = quantized[subspace_idx];
result +=
residual_stats[subspace_idx][cluster_idx].parallel_residual_component;
}
return result;
}
struct CoordinateDescentResult {
uint8_t new_center_idx = 0;
double cost_delta = 0.0;
double new_parallel_residual_component = 0.0;
};
CoordinateDescentResult OptimizeSingleSubspace(
ConstSpan<SubspaceResidualStats> cur_subspace_residual_stats,
const uint8_t cur_center_idx, const double parallel_residual_component,
const double parallel_cost_multiplier) {
CoordinateDescentResult result;
result.new_center_idx = cur_center_idx;
result.new_parallel_residual_component = parallel_residual_component;
const double old_subspace_residual_norm =
cur_subspace_residual_stats[cur_center_idx].residual_norm;
const double old_subspace_parallel_component =
cur_subspace_residual_stats[cur_center_idx].parallel_residual_component;
for (size_t new_center_idx : IndicesOf(cur_subspace_residual_stats)) {
if (new_center_idx == cur_center_idx) continue;
const SubspaceResidualStats& rs =
cur_subspace_residual_stats[new_center_idx];
const double new_parallel_residual_component =
parallel_residual_component - old_subspace_parallel_component +
rs.parallel_residual_component;
const double parallel_norm_delta = Square(new_parallel_residual_component) -
Square(parallel_residual_component);
if (parallel_norm_delta > 0.0) continue;
const double residual_norm_delta =
rs.residual_norm - old_subspace_residual_norm;
const double perpendicular_norm_delta =
residual_norm_delta - parallel_norm_delta;
const double cost_delta = parallel_cost_multiplier * parallel_norm_delta +
perpendicular_norm_delta;
if (cost_delta < result.cost_delta) {
result.new_center_idx = new_center_idx;
result.cost_delta = cost_delta;
result.new_parallel_residual_component = new_parallel_residual_component;
}
}
return result;
}
template <typename T>
Status CoordinateDescentAHQuantize(
DatapointPtr<T> maybe_residual_dptr, DatapointPtr<T> original_dptr,
ConstSpan<DenseDataset<FloatingTypeFor<T>>> centers,
const ChunkingProjection<T>& projection, double threshold,
MutableSpan<uint8_t> result, int* num_changes = nullptr,
double* residual_ptr = nullptr, double* parallel_residual_ptr = nullptr) {
SCANN_RET_CHECK_EQ(result.size(), centers.size());
SCANN_RET_CHECK_EQ(maybe_residual_dptr.dimensionality(),
original_dptr.dimensionality());
TF_ASSIGN_OR_RETURN(auto residual_stats,
ComputeResidualStats(maybe_residual_dptr, original_dptr,
centers, projection));
const double parallel_cost_multiplier = ComputeParallelCostMultiplier(
threshold, SquaredL2Norm(original_dptr), original_dptr.dimensionality());
InitializeToMinResidualNorm(residual_stats, result);
double parallel_residual_component =
ComputeParallelResidualComponent(result, residual_stats);
vector<uint16_t> subspace_idxs(result.size());
std::iota(subspace_idxs.begin(), subspace_idxs.end(), 0U);
vector<double> subspace_residual_norms(result.size());
for (size_t subspace_idx : IndicesOf(result)) {
const uint8_t cluster_idx = result[subspace_idx];
subspace_residual_norms[subspace_idx] =
residual_stats[subspace_idx][cluster_idx].residual_norm;
}
std::vector<uint8_t> result_sorted(result.begin(), result.end());
ZipSortBranchOptimized(
std::greater<double>(), subspace_residual_norms.begin(),
subspace_residual_norms.end(), result_sorted.begin(), result_sorted.end(),
subspace_idxs.begin(), subspace_idxs.end());
enum { kMaxRounds = 10 };
bool cur_round_changes = true;
if (num_changes) *num_changes = 0;
for (int round = 0; cur_round_changes && round < kMaxRounds; ++round) {
cur_round_changes = false;
for (size_t i : IndicesOf(subspace_idxs)) {
const size_t subspace_idx = subspace_idxs[i];
ConstSpan<SubspaceResidualStats> cur_subspace_residual_stats =
residual_stats[subspace_idx];
const uint8_t cur_center_idx = result_sorted[i];
auto subspace_result = OptimizeSingleSubspace(
cur_subspace_residual_stats, cur_center_idx,
parallel_residual_component, parallel_cost_multiplier);
if (subspace_result.new_center_idx != cur_center_idx) {
if (num_changes) ++*num_changes;
parallel_residual_component =
subspace_result.new_parallel_residual_component;
result_sorted[i] = subspace_result.new_center_idx;
cur_round_changes = true;
}
}
}
double final_residual_norm = 0.0;
for (size_t i : IndicesOf(result_sorted)) {
const size_t subspace_idx = subspace_idxs[i];
const uint8_t center_idx = result_sorted[i];
result[subspace_idx] = center_idx;
final_residual_norm +=
residual_stats[subspace_idx][center_idx].residual_norm;
}
if (residual_ptr) *residual_ptr = final_residual_norm;
if (parallel_residual_ptr) {
*parallel_residual_ptr = Square(parallel_residual_component);
}
return OkStatus();
}
} // namespace
template <typename T>
Status AhImpl<T>::IndexDatapointNoiseShaped(
const DatapointPtr<T>& maybe_residual_dptr,
const DatapointPtr<T>& original_dptr,
const ChunkingProjection<T>& projection,
ConstSpan<DenseDataset<FloatingTypeFor<T>>> centers, double threshold,
MutableSpan<uint8_t> result) {
return CoordinateDescentAHQuantize<T>(maybe_residual_dptr, original_dptr,
centers, projection, threshold, result);
}
template <typename T>
StatusOr<vector<float>> AhImpl<T>::CreateRawFloatLookupTable(
const DatapointPtr<T>& query, const ChunkingProjection<T>& projection,
const DistanceMeasure& lookup_distance,
ConstSpan<DenseDataset<FloatT>> centers, int32_t num_clusters_per_block) {
ChunkedDatapoint<FloatT> projected;
SCANN_RETURN_IF_ERROR(projection.ProjectInput(query, &projected));
SCANN_RET_CHECK_EQ(centers.size(), projected.size());
vector<float> result(num_clusters_per_block * projected.size());
float* result_row_start = result.data();
for (size_t i = 0; i < centers.size();
++i, result_row_start += num_clusters_per_block) {
const DatapointPtr<FloatT> projected_ptr = projected[i];
const DenseDataset<FloatT>& cur_centers = centers[i];
if (projected_ptr.IsSparse()) {
for (size_t j = 0; j < num_clusters_per_block; ++j) {
result_row_start[j] = static_cast<float>(
lookup_distance.GetDistanceHybrid(projected_ptr, cur_centers[j]));
}
} else {
if (lookup_distance.specially_optimized_distance_tag() ==
DistanceMeasure::LIMITED_INNER_PRODUCT) {
DenseDistanceOneToMany(
DotProductDistance(), projected_ptr, cur_centers,
MutableSpan<float>(result_row_start, num_clusters_per_block));
} else {
DenseDistanceOneToMany(
lookup_distance, projected_ptr, cur_centers,
MutableSpan<float>(result_row_start, num_clusters_per_block));
}
}
}
return std::move(result);
}
namespace {
float ComputeMultiplierByQuantile(ConstSpan<float> raw_lookup, float quantile,
int32_t max_integer_value) {
const size_t k = raw_lookup.size() * (1.0 - quantile) + 1;
if (k == 1) {
const float max_abs_lookup_element = std::max(
std::sqrt(numeric_limits<float>::epsilon()), MaxAbsValue(raw_lookup));
return max_integer_value / max_abs_lookup_element;
} else {
DCHECK_LT(quantile, 1.0f);
TopNAmortizedConstant<float> tn(k);
for (auto& elem : raw_lookup) {
tn.push(std::abs(elem));
}
return max_integer_value / tn.exact_bottom();
}
}
template <typename T, typename Lambda>
inline vector<T> ConvertLookupToFixedPointImpl(ConstSpan<float> raw_lookup,
Lambda convert_to_int_lambda,
float multiplier) {
constexpr T kBias = FixedPointBias<T>();
vector<T> result(raw_lookup.size());
for (size_t i = 0; i < raw_lookup.size(); ++i) {
result[i] = convert_to_int_lambda(raw_lookup[i] * multiplier) + kBias;
}
return result;
}
} // namespace
template <typename T>
vector<T> ConvertLookupToFixedPoint(
ConstSpan<float> raw_lookup,
const AsymmetricHasherConfig::FixedPointLUTConversionOptions&
conversion_options,
float* multiplier) {
DCHECK_GT(conversion_options.multiplier_quantile(), 0.0f);
DCHECK_LE(conversion_options.multiplier_quantile(), 1.0f);
using SignedT = make_signed_t<T>;
*multiplier = ComputeMultiplierByQuantile(
raw_lookup, conversion_options.multiplier_quantile(),
numeric_limits<SignedT>::max());
constexpr int kRound =
AsymmetricHasherConfig::FixedPointLUTConversionOptions::ROUND;
if (conversion_options.multiplier_quantile() == 1.0f) {
if (conversion_options.float_to_int_conversion_method() == kRound) {
return ConvertLookupToFixedPointImpl<T>(
raw_lookup, [](float f) { return std::lround(f); }, *multiplier);
} else {
return ConvertLookupToFixedPointImpl<T>(
raw_lookup, [](float f) { return static_cast<SignedT>(f); },
*multiplier);
}
} else {
auto compress_to_bounds = [](float f) {
f = std::min<float>(f, numeric_limits<SignedT>::max());
return std::max<float>(f, numeric_limits<SignedT>::min());
};
if (conversion_options.float_to_int_conversion_method() == kRound) {
return ConvertLookupToFixedPointImpl<T>(
raw_lookup,
[&](float f) {
return static_cast<SignedT>(std::lround(compress_to_bounds(f)));
},
*multiplier);
} else {
return ConvertLookupToFixedPointImpl<T>(
raw_lookup,
[&](float f) { return static_cast<SignedT>(compress_to_bounds(f)); },
*multiplier);
}
}
}
template vector<uint8_t> ConvertLookupToFixedPoint<uint8_t>(
ConstSpan<float> raw_lookup,
const AsymmetricHasherConfig::FixedPointLUTConversionOptions&,
float* multiplier);
template vector<uint16_t> ConvertLookupToFixedPoint<uint16_t>(
ConstSpan<float> raw_lookup,
const AsymmetricHasherConfig::FixedPointLUTConversionOptions&,
float* multiplier);
bool CanUseInt16Accumulator(ConstSpan<uint8_t> lookup_table,
size_t num_blocks) {
const size_t num_centers_per_block = lookup_table.size() / num_blocks;
DCHECK_EQ(lookup_table.size() % num_blocks, 0);
if (num_centers_per_block != 16) return false;
constexpr uint8_t kBias = FixedPointBias<uint8_t>();
constexpr size_t kMinMin =
numeric_limits<int16_t>::min() / numeric_limits<int8_t>::min();
constexpr size_t kMaxMax =
numeric_limits<int16_t>::max() / numeric_limits<int8_t>::max();
constexpr size_t kGuaranteedToWork = (kMaxMax < kMinMin) ? kMaxMax : kMinMin;
if (num_blocks <= kGuaranteedToWork) return true;
int32_t sum_of_maxes = 0, sum_of_mins = 0;
auto block_start = lookup_table.begin();
for (size_t block = num_blocks; block != 0;
--block, block_start += num_centers_per_block) {
int8_t max_val = numeric_limits<int8_t>::min();
int8_t min_val = numeric_limits<int8_t>::max();
for (size_t i = 0; i < 16; ++i) {
const int8_t unbiased = block_start[i] - kBias;
max_val = std::max<int8_t>(unbiased, max_val);
min_val = std::min<int8_t>(unbiased, min_val);
}
sum_of_mins += static_cast<int32_t>(min_val);
sum_of_maxes += static_cast<int32_t>(max_val);
}
return sum_of_maxes <= numeric_limits<int16_t>::max() &&
sum_of_mins >= numeric_limits<int16_t>::min();
}
vector<uint8_t> CreatePackedDataset(
const DenseDataset<uint8_t>& hashed_database) {
vector<uint8_t> packed_dataset;
if (hashed_database.empty()) {
return packed_dataset;
}
DimensionIndex num_blocks = hashed_database[0].nonzero_entries();
packed_dataset.resize(num_blocks * ((hashed_database.size() + 31) & (~31)) /
2);
DatapointIndex k = 0;
for (; k < hashed_database.size() / 32; ++k) {
size_t start = k * 16 * num_blocks;
for (size_t j = 0; j < num_blocks; ++j) {
for (size_t m = 0; m < 16; m++) {
uint8_t u0 = hashed_database[k * 32 + m].values()[j];
uint8_t u1 = hashed_database[k * 32 + m + 16].values()[j];
packed_dataset[start + j * 16 + m] = u1 * 16 + u0;
}
}
}
if (k * 32 < hashed_database.size()) {
size_t start = k * 16 * num_blocks;
for (size_t j = 0; j < num_blocks; ++j) {
for (size_t m = 0; m < 16; m++) {
DatapointIndex dp_idx = k * 32 + m;
dp_idx = dp_idx >= hashed_database.size() ? (hashed_database.size() - 1)
: dp_idx;
uint8_t u0 = hashed_database[dp_idx].values()[j];
dp_idx = k * 32 + m + 16;
dp_idx = dp_idx >= hashed_database.size() ? (hashed_database.size() - 1)
: dp_idx;
uint8_t u1 = hashed_database[dp_idx].values()[j];
packed_dataset[start + j * 16 + m] = u1 * 16 + u0;
}
}
}
return packed_dataset;
}
template class UnrestrictedIndexIterator<6, IdentityPostprocessFunctor>;
template class UnrestrictedIndexIterator<6, AddBiasFunctor>;
template class UnrestrictedIndexIterator<6, LimitedInnerFunctor>;
template class PopulateDistancesIterator<6, IdentityPostprocessFunctor>;
template class PopulateDistancesIterator<6, AddBiasFunctor>;
template class PopulateDistancesIterator<6, LimitedInnerFunctor>;
SCANN_INSTANTIATE_TYPED_CLASS(, AhImpl);
} // namespace asymmetric_hashing_internal
} // namespace research_scann
|
/**
* \file TestCsvReader.cpp
* \author Karsten Rink
* \date 2015-04-09
*
* \copyright
* Copyright (c) 2012-2016, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*
*
*/
#include <cstdio>
#include "gtest/gtest.h"
#include "BaseLib/BuildInfo.h"
#include "GeoLib/IO/CsvInterface.h"
#include "GeoLib/Point.h"
class CsvInterfaceTest : public ::testing::Test
{
public:
CsvInterfaceTest()
: _file_name(BaseLib::BuildInfo::tests_tmp_path+"test.csv")
{
std::ofstream out(_file_name);
out << "id\tx\ty\tz\tname\tvalue1\tvalue_two\n";
out << "0\t642015.538\t5724666.445\t391.759\ttest_a\t11.05303121\t436.913\t133\n";
out << "1\t642015.49\t724667.426\t391.85\ttest_b\t51.65503659\n";
out << "2\t642015.379\t5724668.424\t391.914\ttest_c\t437.068\t135\t2\n";
out << "3\t642015.318\t5724669.411\t392.033\ttest_d\t51.65505447\t11.05302923\n";
out << "4\t642015.275\t5724670.403\t392.172\ttest_e\t437.326\t137\t392.172\n";
out << "5\t642015.288\t5724671.407\t392.232\ttest_f\n";
out << "6\t642015.231\t5724672.403\t392.281\ttest_g\t\t437.435\n";
out << "7\t642015.232\t5724673.384\t392.385\ttest_h\t11.05302961\t437.539\n";
out << "8\t642015.153\t5724674.372\t392.428\ttest_i\t51.65509909\t11.05302887\n";
out << "9\t642015.137\t5724675.377\t392.485\ttest_j\t51.65510812\t11.05302905\n";
out.close();
}
~CsvInterfaceTest()
{
std::remove(_file_name.c_str());
}
protected:
int _result;
std::string _file_name;
};
/// Reading 3D points
TEST_F(CsvInterfaceTest, SimpleReadPoints)
{
std::vector<GeoLib::Point*> points;
std::vector<GeoLib::Point*> points2;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points);
ASSERT_EQ(0, _result);
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points2, "x", "y", "z");
ASSERT_EQ(0, _result);
ASSERT_TRUE(points.size() == 10);
ASSERT_TRUE(points2.size() == 10);
for (std::size_t i=0; i<points.size(); ++i)
{
ASSERT_TRUE((*points[i])[1] == (*points2[i])[0]);
ASSERT_TRUE((*points[i])[2] == (*points2[i])[1]);
}
for (auto p : points)
delete p;
for (auto p : points2)
delete p;
}
/// Dealing with unconvertable data types
TEST_F(CsvInterfaceTest, StringInPointColumn)
{
std::vector<GeoLib::Point*> points;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points, "x", "y", "name");
ASSERT_EQ(10, _result);
ASSERT_TRUE(points.empty());
}
/// Dealing with not existing columns
TEST_F(CsvInterfaceTest, WrongColumnName)
{
std::vector<GeoLib::Point*> points;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points, "x", "y", "wrong_column_name");
ASSERT_EQ(-1, _result);
ASSERT_TRUE(points.empty());
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points, "wrong_column_name", "y", "id");
ASSERT_EQ(-1, _result);
ASSERT_TRUE(points.empty());
}
/// Dealing with missing values
TEST_F(CsvInterfaceTest, MissingValues)
{
std::vector<GeoLib::Point*> points;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points, "z", "value1", "value_two");
ASSERT_EQ(3, _result);
ASSERT_EQ(7, points.size());
ASSERT_NEAR(437.539, (*points[4])[2], std::numeric_limits<double>::epsilon());
for (auto p : points)
delete p;
}
/// Reading 2D points
TEST_F(CsvInterfaceTest, Points2D)
{
std::vector<GeoLib::Point*> points;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points, "x", "y");
ASSERT_EQ(0, _result);
ASSERT_EQ(10, points.size());
for (std::size_t i=0; i<points.size(); ++i)
ASSERT_NEAR(0, (*points[i])[2], std::numeric_limits<double>::epsilon());
for (auto p : points)
delete p;
}
/// Dealing with non-sequential column order
TEST_F(CsvInterfaceTest, CoordinateOrder)
{
std::vector<GeoLib::Point*> points1;
std::vector<GeoLib::Point*> points2;
std::vector<GeoLib::Point*> points3;
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points1, "id", "y", "z");
ASSERT_EQ(0, _result);
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points2, "id", "z", "y");
ASSERT_EQ(0, _result);
_result = GeoLib::IO::CsvInterface::readPoints(_file_name, '\t', points3, "y", "id", "z");
ASSERT_EQ(0, _result);
ASSERT_EQ(10, points1.size());
ASSERT_EQ(10, points2.size());
ASSERT_EQ(10, points3.size());
for (std::size_t i=0; i<points1.size(); ++i)
{
ASSERT_EQ((*points1[i])[1], (*points2[i])[2]);
ASSERT_EQ((*points1[i])[2], (*points2[i])[1]);
ASSERT_EQ((*points3[i])[0], (*points2[i])[2]);
ASSERT_EQ((*points3[i])[1], (*points2[i])[0]);
ASSERT_EQ((*points1[i])[0], (*points3[i])[1]);
ASSERT_EQ((*points1[i])[1], (*points3[i])[0]);
}
for (auto p : points1)
delete p;
for (auto p : points2)
delete p;
for (auto p : points3)
delete p;
}
/// Getting single columns
TEST_F(CsvInterfaceTest, GetColumn)
{
std::vector<std::string> names;
_result = GeoLib::IO::CsvInterface::readColumn<std::string>(_file_name, '\t', names, "name");
ASSERT_EQ(0, _result);
ASSERT_EQ(10, names.size());
std::vector<double> values;
_result = GeoLib::IO::CsvInterface::readColumn<double>(_file_name, '\t', values, "value_two");
ASSERT_EQ(2, _result);
ASSERT_EQ(8, values.size());
}
/// Dealing with non-existing column
TEST_F(CsvInterfaceTest, NonExistingColumn)
{
std::vector<double> values;
_result = GeoLib::IO::CsvInterface::readColumn<double>(_file_name, '\t', values, "value2");
ASSERT_EQ(-1, _result);
ASSERT_TRUE(values.empty());
}
/// Dealing with wrong data type
TEST_F(CsvInterfaceTest, WrongDataType)
{
std::vector<double> values;
_result = GeoLib::IO::CsvInterface::readColumn<double>(_file_name, '\t', values, "name");
ASSERT_EQ(10, _result);
ASSERT_TRUE(values.empty());
std::vector<std::string> names;
_result = GeoLib::IO::CsvInterface::readColumn<std::string>(_file_name, '\t', names, "value1");
ASSERT_EQ(2, _result);
ASSERT_EQ(8, names.size());
}
|
#if _DEBUG
TCHAR test[] = _T("This is a test of CDumpContext::HexDump\n");
afxDump.HexDump(_T("."), (BYTE*)test, sizeof(test), 20);
#endif
|
/*
* The Apache Software License, Version 1.1
*
* Copyright (c) 1999-2000 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Xerces" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache\@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation, and was
* originally based on software copyright (c) 1999, International
* Business Machines, Inc., http://www.ibm.com . For more information
* on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*/
/*
* $Log: Win32TransService.hpp,v $
* Revision 1.1 2003/11/12 01:58:42 AnJingBin
* *** empty log message ***
*
* Revision 1.1 2003/10/23 20:58:27 AnJingBin
* *** empty log message ***
*
* Revision 1.1.1.1 2002/02/01 22:22:37 peiyongz
* sane_include
*
* Revision 1.10 2000/05/09 00:22:45 andyh
* Memory Cleanup. XMLPlatformUtils::Terminate() deletes all lazily
* allocated memory; memory leak checking tools will no longer report
* that leaks exist. (DOM GetElementsByTagID temporarily removed
* as part of this.)
*
* Revision 1.9 2000/03/18 00:00:04 roddey
* Initial updates for two way transcoding support
*
* Revision 1.8 2000/03/07 23:45:36 roddey
* First cut for additions to Win32 xcode. Based very loosely on a
* prototype from Eric Ulevik.
*
* Revision 1.7 2000/03/02 19:55:36 roddey
* This checkin includes many changes done while waiting for the
* 1.1.0 code to be finished. I can't list them all here, but a list is
* available elsewhere.
*
* Revision 1.6 2000/02/06 07:48:34 rahulj
* Year 2K copyright swat.
*
* Revision 1.5 2000/01/25 22:49:58 roddey
* Moved the supportsSrcOfs() method from the individual transcoder to the
* transcoding service, where it should have been to begin with.
*
* Revision 1.4 2000/01/25 19:19:09 roddey
* Simple addition of a getId() method to the xcode and netacess abstractions to
* allow each impl to give back an id string.
*
* Revision 1.3 1999/12/18 00:22:33 roddey
* Changes to support the new, completely orthagonal, transcoder architecture.
*
* Revision 1.2 1999/12/15 19:44:02 roddey
* Now implements the new transcoding abstractions, with separate interface
* classes for XML transcoders and local code page transcoders.
*
* Revision 1.1.1.1 1999/11/09 01:06:06 twl
* Initial checkin
*
* Revision 1.2 1999/11/08 20:45:35 rahul
* Swat for adding in Product name and CVS comment log variable.
*
*/
#ifndef WIN32TRANSSERVICE_HPP
#define WIN32TRANSSERVICE_HPP
#include <xercesc/util/TransService.hpp>
#include <xercesc/util/RefHashTableOf.hpp>
#include <windows.h>
class CPMapEntry;
//---------------------------------------------------------------------------
//
// class Win32TransService
//
//---------------------------------------------------------------------------
class XMLUTIL_EXPORT Win32TransService : public XMLTransService
{
public :
// -----------------------------------------------------------------------
// Constructors and Destructor
// -----------------------------------------------------------------------
Win32TransService();
virtual ~Win32TransService();
// -----------------------------------------------------------------------
// Implementation of the virtual transcoding service API
// -----------------------------------------------------------------------
virtual int compareIString
(
const XMLCh* const comp1
, const XMLCh* const comp2
);
virtual int compareNIString
(
const XMLCh* const comp1
, const XMLCh* const comp2
, const unsigned int maxChars
);
virtual const XMLCh* getId() const;
virtual bool isSpace(const XMLCh toCheck) const;
virtual XMLLCPTranscoder* makeNewLCPTranscoder();
virtual bool supportsSrcOfs() const;
virtual void upperCase(XMLCh* const toUpperCase) const;
protected :
// -----------------------------------------------------------------------
// Protected virtual methods, implemented in Win32TransService2.cpp
// -----------------------------------------------------------------------
virtual XMLTranscoder* makeNewXMLTranscoder
(
const XMLCh* const encodingName
, XMLTransService::Codes& resValue
, const unsigned int blockSize
);
private :
// -----------------------------------------------------------------------
// Unimplemented constructors and operators
// -----------------------------------------------------------------------
Win32TransService(const Win32TransService&);
void operator=(const Win32TransService&);
// This is a hash table of entries which map encoding names to their
// Windows specific code pages. The code page allows us to create
// transcoders for those encodings. The encoding names come from XML
// files.
//
// This map is shared unsynchronized among all threads of the process,
// which is cool since it will be read only once its initialized.
static bool isAlias(const HKEY encodingKey
, char* const aliasBuf = 0
, const unsigned int nameBufSz = 0);
RefHashTableOf<CPMapEntry> *fCPMap;
};
//---------------------------------------------------------------------------
//
// class Win32Transcoder
//
//---------------------------------------------------------------------------
class XMLUTIL_EXPORT Win32Transcoder : public XMLTranscoder
{
public :
// -----------------------------------------------------------------------
// Constructors and Destructor
// -----------------------------------------------------------------------
Win32Transcoder
(
const XMLCh* const encodingName
, const unsigned int winCP
, const unsigned int ieCP
, const unsigned int blockSize);
~Win32Transcoder();
// -----------------------------------------------------------------------
// Implementation of the virtual transcoder interface
// -----------------------------------------------------------------------
virtual unsigned int transcodeFrom
(
const XMLByte* const srcData
, const unsigned int srcCount
, XMLCh* const toFill
, const unsigned int maxChars
, unsigned int& bytesEaten
, unsigned char* const charSizes
);
virtual unsigned int transcodeTo
(
const XMLCh* const srcData
, const unsigned int srcCount
, XMLByte* const toFill
, const unsigned int maxBytes
, unsigned int& charsEaten
, const UnRepOpts options
);
virtual bool canTranscodeTo
(
const unsigned int toCheck
) const;
private :
// -----------------------------------------------------------------------
// Unimplemented constructors and operators
// -----------------------------------------------------------------------
Win32Transcoder(const Win32Transcoder&);
void operator=(const Win32Transcoder&);
// -----------------------------------------------------------------------
// Private data members
//
// fIECP
// This is the internet explorer code page for this encoding.
//
// fWinCP
// This is the windows code page for this encoding.
// -----------------------------------------------------------------------
unsigned int fIECP;
unsigned int fWinCP;
};
//---------------------------------------------------------------------------
//
// class Win32LCPTranscoder
//
//---------------------------------------------------------------------------
class XMLUTIL_EXPORT Win32LCPTranscoder : public XMLLCPTranscoder
{
public :
// -----------------------------------------------------------------------
// Constructors and Destructor
// -----------------------------------------------------------------------
Win32LCPTranscoder();
~Win32LCPTranscoder();
// -----------------------------------------------------------------------
// Implementation of the virtual transcoder interface
// -----------------------------------------------------------------------
virtual unsigned int calcRequiredSize(const char* const srcText);
virtual unsigned int calcRequiredSize(const XMLCh* const srcText);
virtual char* transcode(const XMLCh* const toTranscode);
virtual XMLCh* transcode(const char* const toTranscode);
virtual bool transcode
(
const char* const toTranscode
, XMLCh* const toFill
, const unsigned int maxChars
);
virtual bool transcode
(
const XMLCh* const toTranscode
, char* const toFill
, const unsigned int maxChars
);
private :
// -----------------------------------------------------------------------
// Unimplemented constructors and operators
// -----------------------------------------------------------------------
Win32LCPTranscoder(const Win32LCPTranscoder&);
void operator=(const Win32LCPTranscoder&);
};
#endif
|
#include "performance_monitor.h"
#include <cgv/utils/file.h>
#include <stack>
#include <stdio.h>
namespace cgv {
namespace render {
performance_monitor::frame_data& performance_monitor::current_frame()
{
return data.back();
}
void performance_monitor::add_measurement(const performance_measurement& pm)
{
current_frame().push_back(pm);
}
performance_monitor::performance_monitor() : plot_color(0.3f,1,1)
{
fps = -1;
fps_alpha = 0.1;
time_scale = 60;
enabled = true;
frame_finished = true;
placement.ref_min_pnt().set(10,10);
placement.ref_max_pnt().set(310,110);
nr_display_cycles = 2;
bar_line_width = 5;
frame_id = 0;
init_tasks();
bar_config.push_back(PMB_MAX);
bar_config.push_back(PMB_CUR);
bar_config.push_back(PMB_MIN);
}
/// enable performance monitoring
void performance_monitor::enable()
{
enabled = true;
}
/// disable performance monitoring
void performance_monitor::disable()
{
enabled = false;
}
void performance_monitor::set_file_name(const std::string& _file_name)
{
file_name = _file_name;
}
/// initialize the list of tasks to one for the frame and one for each default rendering pass
void performance_monitor::init_tasks()
{
tasks.clear();
add_task("frame", Col(0.5f,0.5f,0.5f));
add_task("main", Col(0.8f,0.2f,0.2f));
add_task("stereo", Col(0.2f,0.2f,0.8f));
add_task("shadow_map", Col(0.1f, 0.1f, 0.1f));
add_task("shadow_volume", Col(0.2f, 0.2f, 0.2f));
add_task("opaque_surface", Col(1,1,0));
add_task("transparent_surfaces", Col(0.9f,0.7f,0.5f));
add_task("pick", Col(0, 0, 1));
add_task("user", Col(0, 1, 0));
}
/// removes all items of the bar config and hides the bar
void performance_monitor::clear_bar()
{
bar_config.clear();
}
/// add a bar item to the bar config
void performance_monitor::add_bar_item(PerformanceMonitoringBar item)
{
bar_config.push_back(item);
}
/// place the performance monitor on screen in pixel coordinates
void performance_monitor::set_placement(const Rec& rectangle)
{
placement = rectangle;
}
/// set the number of display cycles to by drawn for the performance monitor
void performance_monitor::set_nr_display_cycles(unsigned _nr_cycles)
{
nr_display_cycles = _nr_cycles;
}
/// add a new to me monitored item
int performance_monitor::add_task(const std::string& name, const cgv::media::color<float>& col)
{
performance_task pt(name,col);
tasks.push_back(pt);
return (int)tasks.size() - 1;
}
/// start performance measurement of a new frame
void performance_monitor::start_frame()
{
if (!enabled)
return;
if (!frame_finished)
finish_frame();
while (data.size() >= get_buffer_size())
data.pop_front();
data.push_back(frame_data());
watch.restart();
performance_measurement pm(watch.get_elapsed_time(), 0, true);
add_measurement(pm);
frame_finished = false;
++frame_id;
}
///
void performance_monitor::start_task(int task_id)
{
if (!enabled)
return;
performance_measurement pm(watch.get_elapsed_time(), task_id, true);
add_measurement(pm);
}
void performance_monitor::finish_task(int task_id)
{
if (!enabled)
return;
performance_measurement pm(watch.get_elapsed_time(), task_id, false);
add_measurement(pm);
}
/// finish measurement of a frame, if this is not called by hand, it is called automatically in the next call to start_frame.
void performance_monitor::finish_frame()
{
if (!enabled)
return;
const char* start_or_finish[] = { "start", "finish" };
performance_measurement pm(watch.get_elapsed_time(), 0, false);
add_measurement(pm);
double new_fps = 1.0 / (data.back().back().time - data.back().front().time);
if (fps < 0)
fps = new_fps;
else
fps = fps_alpha * new_fps + (1.0 - fps_alpha)*fps;
frame_finished = true;
if (file_name.empty())
return;
bool need_header = cgv::utils::file::exists(file_name);
FILE* fp = fopen(file_name.c_str(), "wa");
if (!fp)
return;
int i;
if (need_header) {
const frame_data& cf = current_frame();
for (i=0; i<(int)cf.size(); ++i) {
const performance_measurement& pm = cf[i];
fprintf(fp, i==0?"%s %s":",%s %s", start_or_finish[pm.start ? 0 : 1], tasks[pm.task_id].name.c_str());
}
fprintf(fp, "\n");
}
fprintf(fp, "%d", frame_id);
for (i=0; i<(int)current_frame().size(); ++i)
fprintf(fp, ", %f", current_frame()[i].time);
fprintf(fp, "\n");
fclose(fp);
}
void performance_monitor::compute_colors(const frame_data& fdata)
{
colors.resize(2*fdata.size()-2);
std::stack<int> task_stack;
task_stack.push(0);
for (unsigned t=0; t<fdata.size()-1; ++t) {
int task_id = fdata[t].task_id;
if (fdata[t].start) {
colors[2*t+1] = colors[2*t] = tasks[task_id].col;
task_stack.push(task_id);
}
else {
task_stack.pop();
colors[2*t+1] = colors[2*t] = tasks[task_stack.top()].col;
}
}
}
void performance_monitor::compute_positions(int x0, int y0, int dx, int dy, const frame_data& fdata)
{
positions.resize(2*fdata.size()-2);
double scale_x = time_scale*dx;
double scale_y = time_scale*dy;
int x = x0, y = y0;
for (unsigned t=0; t < fdata.size()-1; ++t) {
positions[2*t].set(x,y);
x = x0 + (int)(fdata[t+1].time*scale_x+0.5);
y = y0 + (int)(fdata[t+1].time*scale_y+0.5);
positions[2*t+1].set(x,y);
}
}
}
}
|
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Electronic Arts Inc. All rights reserved.
///////////////////////////////////////////////////////////////////////////////
#include <EAStdC/internal/Config.h>
#include <EAStdC/Int128_t.h>
#include <string.h>
#include <stdio.h>
#include <ctype.h>
#include <math.h>
#include <EAAssert/eaassert.h>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4723) // potential divide by 0
#pragma warning(disable: 4365) // 'argument' : conversion from 'int' to 'uint32_t', signed/unsigned mismatch
#pragma warning(disable: 4146) // unary minus operator applied to unsigned type, result still unsigned
#endif
namespace EA
{
namespace StdC
{
///////////////////////////////////////////////////////////////////////////////
// Constants
// EASTDC_INT128_MIN is equal to: -170141183460469231731687303715884105728;
const int128_t EASTDC_INT128_MIN(0x00000000, 0x00000000, 0x00000000, 0x80000000);
// EASTDC_INT128_MAX is equal to: 170141183460469231731687303715884105727;
const int128_t EASTDC_INT128_MAX(0xffffffff, 0xffffffff, 0xffffffff, 0x7fffffff);
// EASTDC_UINT128_MIN is equal to: 0;
const uint128_t EASTDC_UINT128_MIN(0x00000000, 0x00000000, 0x00000000, 0x00000000);
// EASTDC_UINT128_MAX is equal to: 340282366920938463463374607431768211455;
const uint128_t EASTDC_UINT128_MAX(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff);
///////////////////////////////////////////////////////////////////////////////
// int128_t
///////////////////////////////////////////////////////////////////////////////
int128_t_base::int128_t_base()
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = 0;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = 0;
#endif
}
int128_t_base::int128_t_base(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3)
{
#if EA_INT128_USE_INT64
mPart1 = ((uint64_t)nPart3 << 32) + nPart2;
mPart0 = ((uint64_t)nPart1 << 32) + nPart0;
#else
mPart3 = nPart3;
mPart2 = nPart2;
mPart1 = nPart1;
mPart0 = nPart0;
#endif
}
int128_t_base::int128_t_base(uint64_t nPart0, uint64_t nPart1)
{
#if EA_INT128_USE_INT64
mPart1 = nPart1;
mPart0 = nPart0;
#else
mPart3 = (uint32_t)(nPart1 >> 32);
mPart2 = (uint32_t) nPart1;
mPart1 = (uint32_t)(nPart0 >> 32);
mPart0 = (uint32_t) nPart0;
#endif
}
int128_t_base::int128_t_base(uint8_t value)
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
int128_t_base::int128_t_base(uint16_t value)
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
int128_t_base::int128_t_base(uint32_t value)
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
#if defined(INT128_UINT_TYPE)
int128_t_base::int128_t_base(INT128_UINT_TYPE value)
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
#endif
int128_t_base::int128_t_base(uint64_t value)
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = (uint32_t) ((value >> 32) & 0xffffffff);
mPart0 = (uint32_t) ((value >> 0) & 0xffffffff);
#endif
}
int128_t_base::int128_t_base(const int128_t_base& value)
{
#if EA_INT128_USE_INT64
mPart1 = value.mPart1;
mPart0 = value.mPart0;
#else
mPart3 = value.mPart3;
mPart2 = value.mPart2;
mPart1 = value.mPart1;
mPart0 = value.mPart0;
#endif
}
int128_t_base& int128_t_base::operator=(const int128_t_base& value)
{
#if EA_INT128_USE_INT64
mPart1 = value.mPart1;
mPart0 = value.mPart0;
#else
mPart3 = value.mPart3;
mPart2 = value.mPart2;
mPart1 = value.mPart1;
mPart0 = value.mPart0;
#endif
return *this;
}
///////////////////////////////////////////////////////////////////////////////
// operatorPlus
//
// Returns: (value1 + value2) into result.
// The output 'result' *is* allowed to point to the same memory as one of the inputs.
// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound.
//
void int128_t_base::operatorPlus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
{
#if defined(EA_ASM_STYLE_INTEL) && defined(EA_PROCESSOR_X86)
__asm
{
mov ebx, value1
mov ecx, value2
mov edx, result
mov eax, [ebx]
add eax, [ecx] ;(nCarry, tmp) = value1.mPart0 + value2.mPart0
mov [edx], eax ;result.mPart0 = value1.mPart0 + value2.mPart0
mov eax, [ebx+4]
adc eax, [ecx+4] ;(nCarry, tmp) = value1.mPart1 + value2.mPart1
mov [edx+4], eax ;result.mPart1 = value1.mPart1 + value2.mPart1 + nCarry
mov eax, [ebx+8]
adc eax, [ecx+8] ;(nCarry, tmp) = value1.mPart2 + value2.mPart2
mov [edx+8], eax ;result.mPart2 = value1.mPart2 + value2.mPart2 + nCarry
mov eax, [ebx+12]
adc eax, [ecx+12] ;(nCarry, tmp) = value1.mPart3 + value2.mPart3
mov [edx+12], eax ;result.mPart3 = value1.mPart3 + value2.mPart3 + nCarry
}
#elif EA_INT128_USE_INT64
uint64_t t = value1.mPart0 + value2.mPart0;
uint64_t nCarry = (t < value1.mPart0) && (t < value2.mPart0);
result.mPart0 = t;
result.mPart1 = value1.mPart1 + value2.mPart1 + nCarry;
#else
uint64_t t = ((uint64_t)value1.mPart0) + ((uint64_t)value2.mPart0);
uint32_t nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart0 = (uint32_t) t;
t = ((uint64_t)value1.mPart1) + ((uint64_t)value2.mPart1) + nCarry;
nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart1 = (uint32_t) t;
t = ((uint64_t)value1.mPart2) + ((uint64_t)value2.mPart2) + nCarry;
nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart2 = (uint32_t) t;
t = ((uint64_t)value1.mPart3) + ((uint64_t)value2.mPart3) + nCarry;
//nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart3 = (uint32_t) t;
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorMinus
//
// Returns: (value1 - value2) into result.
// The output 'result' *is* allowed to point to the same memory as one of the inputs.
// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound.
//
void int128_t_base::operatorMinus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
{
#if EA_INT128_USE_INT64
uint64_t t = (value1.mPart0 - value2.mPart0);
uint64_t nCarry = (value1.mPart0 < value2.mPart0) ? 1 : 0;
result.mPart0 = t;
result.mPart1 = (value1.mPart1 - value2.mPart1) - nCarry;
#else
uint64_t t = ((uint64_t)value1.mPart0) - ((uint64_t)value2.mPart0);
uint32_t nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart0 = (uint32_t) t;
t = (((uint64_t)value1.mPart1) - ((uint64_t)value2.mPart1)) - nCarry;
nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart1 = (uint32_t) t;
t = (((uint64_t)value1.mPart2) - ((uint64_t)value2.mPart2)) - nCarry;
nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart2 = (uint32_t) t;
t = (((uint64_t)value1.mPart3) - ((uint64_t)value2.mPart3)) - nCarry;
//nCarry = (uint32_t)((t > 0xffffffff) ? 1 : 0);
result.mPart3 = (uint32_t) t;
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorMul
//
// 32 bit systems:
// The way this works is like decimal multiplication by hand with a pencil and
// paper. The difference is that we work with blocks of 32 bits intead of blocks
// of ten. Here is a multiplication of 0x00000008000000040000000200000001 x
// the same value done like you do with pencil and paper:
//
// Part 3 2 1 0
// 00000008 00000004 00000002 00000001
// x 00000008 00000004 00000002 00000001
// -------------------------------------------
// | 00000008 00000004 00000002 00000001
// 00000010 | 00000008 00000004 00000002 (00000000)
// 00000020 00000010 | 00000008 00000004 (00000000)(00000000)
// + 00000040 00000020 00000010 | 00000008 (00000000)(00000000)(00000000)
// -------------------------------------------------------------------------
//
// That the numbers above have columns each with the same values is a coincidence
// of the choice of the two multiplying numbers and in reality numbers would
// likely be much more complicated. But the above is easy to show. Note that
// the numbers to the left of the column with 00000008 are outside the range
// of 128 bits. As a result, in our implementation below, we skip the steps that
// create these values, as they would just get lost anyway.
//
// 64 bit systems:
// This is how it would be able to work if we could get a 128 bit result from
// two 64 bit values. None of the 64 bit systems that we are currently working
// with have C language support for multiplying two 64 bit numbers and retrieving
// the 128 bit result. However, many 64 bit platforms have support at the asm
// level for doing such a thing.
// Part 1 Part 0
// 0000000000000002 0000000000000001
// x 0000000000000002 0000000000000001
// -------------------------------------------
// | 0000000000000002 0000000000000001
// + 0000000000000004 | 0000000000000002 (0000000000000000)
// -------------------------------------------------------------------------
//
void int128_t_base::operatorMul(const int128_t_base& a, const int128_t_base& b, int128_t_base& result)
{
// To consider: Use compiler or OS-provided custom functionality here, such as
// Windows UnsignedMultiply128 and GCC's built-in int128_t.
#if EA_INT128_USE_INT64
#if defined(DISABLED_PLATFORM_WIN64)
// To do: Implement x86-64 asm here.
#else
// Else we are stuck doing something less efficient. In this case we
// fall back to doing 32 bit multiplies as with 32 bit platforms.
result = (a.mPart0 & 0xffffffff) * (b.mPart0 & 0xffffffff);
int128_t v01 = (a.mPart0 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
int128_t v02 = (a.mPart0 & 0xffffffff) * (b.mPart1 & 0xffffffff);
int128_t v03 = (a.mPart0 & 0xffffffff) * ((b.mPart1 >> 32) & 0xffffffff);
int128_t v10 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff);
int128_t v11 = ((a.mPart0 >> 32) & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
int128_t v12 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart1 & 0xffffffff);
int128_t v20 = (a.mPart1 & 0xffffffff) * (b.mPart0 & 0xffffffff);
int128_t v21 = (a.mPart1 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff);
int128_t v30 = ((a.mPart1 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff);
// Do row addition, shifting as needed.
operatorPlus(result, v01 << 32, result);
operatorPlus(result, v02 << 64, result);
operatorPlus(result, v03 << 96, result);
operatorPlus(result, v10 << 32, result);
operatorPlus(result, v11 << 64, result);
operatorPlus(result, v12 << 96, result);
operatorPlus(result, v20 << 64, result);
operatorPlus(result, v21 << 96, result);
operatorPlus(result, v30 << 96, result);
#endif
#else
// Do part-by-part multiplication, skipping overflowing combinations.
result = ((uint64_t)a.mPart0) * ((uint64_t)b.mPart0);
uint128_t v01 = ((uint64_t)a.mPart0) * ((uint64_t)b.mPart1);
uint128_t v02 = ((uint64_t)a.mPart0) * ((uint64_t)b.mPart2);
uint128_t v03 = ((uint64_t)a.mPart0) * ((uint64_t)b.mPart3);
uint128_t v10 = ((uint64_t)a.mPart1) * ((uint64_t)b.mPart0);
uint128_t v11 = ((uint64_t)a.mPart1) * ((uint64_t)b.mPart1);
uint128_t v12 = ((uint64_t)a.mPart1) * ((uint64_t)b.mPart2);
uint128_t v20 = ((uint64_t)a.mPart2) * ((uint64_t)b.mPart0);
uint128_t v21 = ((uint64_t)a.mPart2) * ((uint64_t)b.mPart1);
uint128_t v30 = ((uint64_t)a.mPart3) * ((uint64_t)b.mPart0);
// Do row addition, shifting as needed.
operatorPlus(result, v01 << 32, result);
operatorPlus(result, v02 << 64, result);
operatorPlus(result, v03 << 96, result);
operatorPlus(result, v10 << 32, result);
operatorPlus(result, v11 << 64, result);
operatorPlus(result, v12 << 96, result);
operatorPlus(result, v20 << 64, result);
operatorPlus(result, v21 << 96, result);
operatorPlus(result, v30 << 96, result);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorShiftRight
//
// Returns: value >> nShift into result
// The output 'result' may *not* be the same as one the input.
// With rightward shifts of negative numbers, shift in zero from the left side.
//
void int128_t_base::operatorShiftRight(const int128_t_base& value, int nShift, int128_t_base& result)
{
#if EA_INT128_USE_INT64
if(nShift >= 0)
{
if(nShift < 64)
{ // 0 - 63
result.mPart1 = (value.mPart1 >> nShift);
if(nShift == 0)
result.mPart0 = (value.mPart0 >> nShift);
else
result.mPart0 = (value.mPart0 >> nShift) | (value.mPart1 << (64 - nShift));
}
else
{ // 64+
result.mPart1 = 0;
result.mPart0 = (value.mPart1 >> (nShift - 64));
}
}
else // (nShift < 0)
operatorShiftLeft(value, -nShift, result);
#else
if(nShift >= 0)
{
if(nShift <= 32)
{
if(nShift == 32)
{ // We can't use the code further below for 0-31 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = value.mPart1;
result.mPart1 = value.mPart2;
result.mPart2 = value.mPart3;
result.mPart3 = 0;
}
else
{ // 0 - 31
result.mPart3 = (value.mPart3 >> nShift);
result.mPart2 = (value.mPart2 >> nShift) | (value.mPart3 << (32 - nShift));
result.mPart1 = (value.mPart1 >> nShift) | (value.mPart2 << (32 - nShift));
result.mPart0 = (value.mPart0 >> nShift) | (value.mPart1 << (32 - nShift));
}
}
else if(nShift <= 64)
{
if(nShift == 64)
{ // We can't use the code further below for 0-31 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = value.mPart2;
result.mPart1 = value.mPart3;
result.mPart2 = 0;
result.mPart3 = 0;
}
else
{ // 33 - 63
result.mPart3 = 0;
result.mPart2 = (value.mPart3 >> (nShift - 32));
result.mPart1 = (value.mPart2 >> (nShift - 32)) | (value.mPart3 << (64 - nShift));
result.mPart0 = (value.mPart1 >> (nShift - 32)) | (value.mPart2 << (64 - nShift));
}
}
else if(nShift <= 96)
{
if(nShift == 96)
{ // We can't use the code further below for 0-31 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = value.mPart3;
result.mPart1 = 0;
result.mPart2 = 0;
result.mPart3 = 0;
}
else
{ // 65 - 95
result.mPart3 = 0;
result.mPart2 = 0;
result.mPart1 = (value.mPart3 >> (nShift - 64));
result.mPart0 = (value.mPart2 >> (nShift - 64)) | (value.mPart3 << (96 - nShift));
}
}
else if(nShift < 128)
{ // 96 - 127
result.mPart3 = 0;
result.mPart2 = 0;
result.mPart1 = 0;
result.mPart0 = (value.mPart3 >> (nShift - 96));
}
else
{ // 128+
result.mPart3 = 0;
result.mPart2 = 0;
result.mPart1 = 0;
result.mPart0 = 0;
}
}
else // (nShift < 0)
operatorShiftLeft(value, -nShift, result);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorShiftRight
//
// Returns: value << nShift into result
// The output 'result' may *not* be the same as one the input.
// With rightward shifts of negative numbers, shift in zero from the left side.
//
void int128_t_base::operatorShiftLeft(const int128_t_base& value, int nShift, int128_t_base& result)
{
#if EA_INT128_USE_INT64
if(nShift >= 0)
{
if(nShift < 64)
{
if(nShift) // We need to have a special case because CPUs convert a shift by 64 to a no-op.
{
// 1 - 63
result.mPart0 = (value.mPart0 << nShift);
result.mPart1 = (value.mPart1 << nShift) | (value.mPart0 >> (64 - nShift));
}
else
{
result.mPart0 = value.mPart0;
result.mPart1 = value.mPart1;
}
}
else
{ // 64+
result.mPart0 = 0;
result.mPart1 = (value.mPart0 << (nShift - 64));
}
}
else // (nShift < 0)
operatorShiftRight(value, -nShift, result);
#else
if(nShift >= 0)
{
if(nShift <= 32)
{
if(nShift == 32)
{ // We can't use the code further below for 32 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = 0;
result.mPart1 = value.mPart0;
result.mPart2 = value.mPart1;
result.mPart3 = value.mPart2;
}
else if(nShift)
{ // 1 - 31
result.mPart0 = (value.mPart0 << nShift);
result.mPart1 = (value.mPart1 << nShift) | (value.mPart0 >> (32 - nShift));
result.mPart2 = (value.mPart2 << nShift) | (value.mPart1 >> (32 - nShift));
result.mPart3 = (value.mPart3 << nShift) | (value.mPart2 >> (32 - nShift));
}
else
{
result.mPart0 = value.mPart0;
result.mPart1 = value.mPart1;
result.mPart2 = value.mPart2;
result.mPart3 = value.mPart3;
}
}
else if(nShift <= 64)
{
if(nShift == 64)
{ // We can't use the code further below for 0-31 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = 0;
result.mPart1 = 0;
result.mPart2 = value.mPart0;
result.mPart3 = value.mPart1;
}
else
{ // 33 - 63
result.mPart0 = 0;
result.mPart1 = (value.mPart0 << (nShift - 32));
result.mPart2 = (value.mPart1 << (nShift - 32)) | (value.mPart0 >> (64 - nShift));
result.mPart3 = (value.mPart2 << (nShift - 32)) | (value.mPart1 >> (64 - nShift));
}
}
else if(nShift <= 96)
{
if(nShift == 96)
{ // We can't use the code further below for 0-31 because 32 bit
// processors (e.g. Intel) often implement a shift of 32 as a no-op.
result.mPart0 = 0;
result.mPart1 = 0;
result.mPart2 = 0;
result.mPart3 = value.mPart0;
}
else
{ // 65 - 95
result.mPart0 = 0;
result.mPart1 = 0;
result.mPart2 = (value.mPart0 << (nShift - 64));
result.mPart3 = (value.mPart1 << (nShift - 64)) | (value.mPart0 >> (96 - nShift));
}
}
else if(nShift < 128)
{ // 96 - 127
result.mPart0 = 0;
result.mPart1 = 0;
result.mPart2 = 0;
result.mPart3 = (value.mPart0 << (nShift - 96));
}
else
{ // 128+
result.mPart3 = 0;
result.mPart2 = 0;
result.mPart1 = 0;
result.mPart0 = 0;
}
}
else // (nShift < 0)
operatorShiftRight(value, -nShift, result);
#endif
}
bool int128_t_base::operator!() const
{
#if EA_INT128_USE_INT64
return (mPart0 == 0) && (mPart1 == 0);
#else
return (mPart0 == 0) && (mPart1 == 0) && (mPart2 == 0) && (mPart3 == 0);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorXOR
//
// Returns: value1 ^ value2 into result
// The output 'result' may be the same as one the input.
//
void int128_t_base::operatorXOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
{
#if EA_INT128_USE_INT64
result.mPart0 = (value1.mPart0 ^ value2.mPart0);
result.mPart1 = (value1.mPart1 ^ value2.mPart1);
#else
result.mPart0 = (value1.mPart0 ^ value2.mPart0);
result.mPart1 = (value1.mPart1 ^ value2.mPart1);
result.mPart2 = (value1.mPart2 ^ value2.mPart2);
result.mPart3 = (value1.mPart3 ^ value2.mPart3);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorOR
//
// Returns: value1 | value2 into result
// The output 'result' may be the same as one the input.
//
void int128_t_base::operatorOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
{
#if EA_INT128_USE_INT64
result.mPart0 = (value1.mPart0 | value2.mPart0);
result.mPart1 = (value1.mPart1 | value2.mPart1);
#else
result.mPart0 = (value1.mPart0 | value2.mPart0);
result.mPart1 = (value1.mPart1 | value2.mPart1);
result.mPart2 = (value1.mPart2 | value2.mPart2);
result.mPart3 = (value1.mPart3 | value2.mPart3);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// operatorAND
//
// Returns: value1 & value2 into result
// The output 'result' may be the same as one the input.
//
void int128_t_base::operatorAND(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result)
{
#if EA_INT128_USE_INT64
result.mPart0 = (value1.mPart0 & value2.mPart0);
result.mPart1 = (value1.mPart1 & value2.mPart1);
#else
result.mPart0 = (value1.mPart0 & value2.mPart0);
result.mPart1 = (value1.mPart1 & value2.mPart1);
result.mPart2 = (value1.mPart2 & value2.mPart2);
result.mPart3 = (value1.mPart3 & value2.mPart3);
#endif
}
bool int128_t_base::AsBool() const
{
#if EA_INT128_USE_INT64
return (mPart0 || mPart1);
#else
return (mPart0 || mPart1 || mPart2 || mPart3);
#endif
}
uint8_t int128_t_base::AsUint8() const
{
// OK for EA_INT128_USE_INT64
return (uint8_t) mPart0;
}
uint16_t int128_t_base::AsUint16() const
{
// OK for EA_INT128_USE_INT64
return (uint16_t) mPart0;
}
uint32_t int128_t_base::AsUint32() const
{
// OK for EA_INT128_USE_INT64
return (uint32_t) mPart0;
}
uint64_t int128_t_base::AsUint64() const
{
#if EA_INT128_USE_INT64
return mPart0;
#else
return (((uint64_t) mPart1) << 32) + mPart0;
#endif
}
int int128_t_base::GetBit(int nIndex) const
{
// EA_ASSERT((nIndex >= 0) && (nIndex < 128));
#if EA_INT128_USE_INT64
const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64));
if(nIndex < 64)
return ((mPart0 & nBitMask) ? 1 : 0);
else if(nIndex < 128)
return ((mPart1 & nBitMask) ? 1 : 0);
return 0;
#else
const uint32_t nBitMask = ((uint32_t)1 << (nIndex % 32));
if(nIndex < 32)
return ((mPart0 & nBitMask) ? 1 : 0);
else if(nIndex < 64)
return ((mPart1 & nBitMask) ? 1 : 0);
else if(nIndex < 96)
return ((mPart2 & nBitMask) ? 1 : 0);
else if(nIndex < 128)
return ((mPart3 & nBitMask) ? 1 : 0);
return 0;
#endif
}
void int128_t_base::SetBit(int nIndex, int value)
{
// EA_ASSERT((nIndex >= 0) && (nIndex < 128));
#if EA_INT128_USE_INT64
const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64));
if(nIndex < 64)
{
if(value)
mPart0 = mPart0 | nBitMask;
else
mPart0 = mPart0 & ~nBitMask;
}
else if(nIndex < 128)
{
if(value)
mPart1 = mPart1 | nBitMask;
else
mPart1 = mPart1 & ~nBitMask;
}
#else
const uint32_t nBitMask = ((uint32_t)1 << (nIndex % 32));
if(nIndex < 32)
{
if(value)
mPart0 = mPart0 | nBitMask;
else
mPart0 = mPart0 & ~nBitMask;
}
else if(nIndex < 64)
{
if(value)
mPart1 = mPart1 | nBitMask;
else
mPart1 = mPart1 & ~nBitMask;
}
else if(nIndex < 96)
{
if(value)
mPart2 = mPart2 | nBitMask;
else
mPart2 = mPart2 & ~nBitMask;
}
else if(nIndex < 128)
{
if(value)
mPart3 = mPart3 | nBitMask;
else
mPart3 = mPart3 & ~nBitMask;
}
#endif
}
// part is in the range of [0,15]
uint8_t int128_t_base::GetPartUint8(int nIndex) const
{
#if EA_INT128_USE_INT64
uint64_t value(0);
switch (nIndex / 8)
{
case 0:
value = mPart0;
break;
case 1:
value = mPart1;
break;
}
nIndex = ((nIndex % 8) * 8);
return (uint8_t)((value & ((uint64_t)0xff << nIndex)) >> nIndex);
#else
uint32_t value(0);
switch (nIndex / 4)
{
case 0:
value = mPart0;
break;
case 1:
value = mPart1;
break;
case 2:
value = mPart2;
break;
case 3:
value = mPart3;
break;
}
nIndex = ((nIndex % 4) * 8);
return (uint8_t)(((value & ((uint32_t)0xff << nIndex))) >> nIndex);
#endif
}
// part is in the range of [0,7]
uint16_t int128_t_base::GetPartUint16(int nIndex) const
{
#if EA_INT128_USE_INT64
uint64_t value(0);
switch (nIndex / 4)
{
case 0:
value = mPart0;
break;
case 1:
value = mPart1;
break;
}
nIndex = ((nIndex % 4) * 16);
return (uint16_t)(((value & ((uint64_t)0xffff << nIndex))) >> nIndex);
#else
uint32_t value(0);
switch (nIndex / 2)
{
case 0:
value = mPart0;
break;
case 1:
value = mPart1;
break;
case 2:
value = mPart2;
break;
case 3:
value = mPart3;
break;
}
if(nIndex % 2)
return (uint16_t)(value >> 16);
else
return (uint16_t)(value);
#endif
}
// part is in the range of [0,3]
uint32_t int128_t_base::GetPartUint32(int nIndex) const
{
#if EA_INT128_USE_INT64
switch (nIndex)
{
case 0:
return (uint32_t) mPart0;
case 1:
return (uint32_t)(mPart0 >> 32);
case 2:
return (uint32_t) mPart1;
case 3:
return (uint32_t)(mPart1 >> 32);
}
return 0;
#else
switch (nIndex)
{
case 0:
return mPart0;
case 1:
return mPart1;
case 2:
return mPart2;
case 3:
return mPart3;
}
return 0;
#endif
}
// part is in the range of [0,1]
uint64_t int128_t_base::GetPartUint64(int nIndex) const
{
#if EA_INT128_USE_INT64
if(nIndex == 0)
return mPart0;
else if(nIndex == 1)
return mPart1;
return 0;
#else
if(nIndex == 0)
return uint64_t((uint64_t(mPart1) << 32) + mPart0);
else if(nIndex == 1)
return uint64_t((uint64_t(mPart3) << 32) + mPart2);
return 0;
#endif
}
void int128_t_base::SetPartUint8(int nIndex, uint8_t value)
{
#if EA_INT128_USE_INT64
uint64_t* pValue;
switch (nIndex / 8)
{
case 0:
pValue = &mPart0;
break;
case 1:
pValue = &mPart1;
break;
default:
return;
}
nIndex %= 8;
*pValue = ((*pValue & ~(UINT64_C(0xff) << (nIndex * 8))) + ((uint64_t)value << (nIndex * 8)));
#else
uint32_t* pValue;
switch (nIndex / 4)
{
case 0:
pValue = &mPart0;
break;
case 1:
pValue = &mPart1;
break;
case 2:
pValue = &mPart2;
break;
case 3:
pValue = &mPart3;
break;
default:
return;
}
switch (nIndex % 4)
{
case 0:
*pValue = ((*pValue & 0xffffff00) + (value << 0));
break;
case 1:
*pValue = ((*pValue & 0xffff00ff) + (value << 8));
break;
case 2:
*pValue = ((*pValue & 0xff00ffff) + (value << 16));
break;
case 3:
*pValue = ((*pValue & 0x00ffffff) + (value << 24));
break;
}
#endif
}
void int128_t_base::SetPartUint16(int nIndex, uint16_t value)
{
#if EA_INT128_USE_INT64
uint64_t* pValue;
switch (nIndex / 4)
{
case 0:
pValue = &mPart0;
break;
case 1:
pValue = &mPart1;
break;
default:
return;
}
nIndex %= 4;
*pValue = ((*pValue & ~(UINT64_C(0xffff) << (nIndex * 16))) + ((uint64_t)value << (nIndex * 16)));
#else
uint32_t* pValue;
switch (nIndex / 2)
{
case 0:
pValue = &mPart0;
break;
case 1:
pValue = &mPart1;
break;
case 2:
pValue = &mPart2;
break;
case 3:
pValue = &mPart3;
break;
default:
return;
}
if(nIndex % 2)
*pValue = ((*pValue & 0x0000ffff) + (value << 16));
else
*pValue = ((*pValue & 0xffff0000) + (value));
#endif
}
void int128_t_base::SetPartUint32(int nIndex, uint32_t value)
{
#if EA_INT128_USE_INT64
switch (nIndex)
{
case 0:
mPart0 = (mPart0 & UINT64_C(0xffffffff00000000)) + value;
break;
case 1:
mPart0 = (mPart0 & UINT64_C(0x00000000ffffffff)) + ((uint64_t)value << 32);
break;
case 2:
mPart1 = (mPart1 & UINT64_C(0xffffffff00000000)) + value;
break;
case 3:
mPart1 = (mPart1 & UINT64_C(0x00000000ffffffff)) + ((uint64_t)value << 32);
break;
}
#else
switch (nIndex)
{
case 0:
mPart0 = value;
break;
case 1:
mPart1 = value;
break;
case 2:
mPart2 = value;
break;
case 3:
mPart3 = value;
break;
}
#endif
}
void int128_t_base::SetPartUint64(int nIndex, uint64_t value)
{
#if EA_INT128_USE_INT64
if(nIndex == 0)
mPart0 = value;
else if(nIndex == 1)
mPart1 = value;
#else
if(nIndex == 0)
{
mPart0 = (uint32_t)(value);
mPart1 = (uint32_t)(value >> 32);
}
else if(nIndex == 1)
{
mPart2 = (uint32_t)(value);
mPart3 = (uint32_t)(value >> 32);
}
#endif
}
bool int128_t_base::IsZero() const
{
#if EA_INT128_USE_INT64
return (mPart0 == 0) && // Check mPart0 first as this will likely yield faster execution.
(mPart1 == 0);
#else
return (mPart0 == 0) && // Check mPart0 first as this will likely yield faster execution.
(mPart1 == 0) &&
(mPart2 == 0) &&
(mPart3 == 0);
#endif
}
void int128_t_base::SetZero()
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = 0;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = 0;
#endif
}
void int128_t_base::TwosComplement()
{
#if EA_INT128_USE_INT64
mPart1 = ~mPart1;
mPart0 = ~mPart0;
#else
mPart3 = ~mPart3;
mPart2 = ~mPart2;
mPart1 = ~mPart1;
mPart0 = ~mPart0;
#endif
// What we want to do, but isn't available at this level:
// operator++();
// Alternative:
int128_t_base one((uint32_t)1);
operatorPlus(*this, one, *this);
}
void int128_t_base::InverseTwosComplement()
{
// What we want to do, but isn't available at this level:
// operator--();
// Alternative:
int128_t_base one((uint32_t)1);
operatorMinus(*this, one, *this);
#if EA_INT128_USE_INT64
mPart1 = ~mPart1;
mPart0 = ~mPart0;
#else
mPart3 = ~mPart3;
mPart2 = ~mPart2;
mPart1 = ~mPart1;
mPart0 = ~mPart0;
#endif
}
void int128_t_base::DoubleToUint128(double value)
{
// Currently this function is limited to 64 bits of integer input.
// We need to make a better version of this function. Perhaps we should implement
// it via dissecting the IEEE floating point format (sign, exponent, matissa).
// EA_ASSERT(fabs(value) < 18446744073709551616.0); // Assert that the input is <= 64 bits of integer.
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = (value >= 0 ? (uint64_t)value : (uint64_t)-value);
#else
const uint64_t value64 = (value >= 0 ? (uint64_t)value : (uint64_t)-value);
mPart3 = 0;
mPart2 = 0;
mPart1 = (uint32_t) (value64 >> 32);
mPart0 = (uint32_t)((value64 >> 0) & 0xffffffff);
// Below is a version I have been working on a version that works up to the full 128 bits.
// The implementation below has a roundoff problem for some cases and would have to be reworked.
/*
double valueTemp(value);
if(value < 0)
valueTemp = -valueTemp;
//Get part3
mPart3 = (uint32_t)(valueTemp / 79228162514264337593543950336.0); // 79228162514264337593543950336.0 is the same as 0xffffffffffffffffffffffff + 1, or 0x1000000000000000000000000.
valueTemp -= (mPart3 * 79228162514264337593543950336.0);
//Get part2
mPart2 = (uint32_t)(valueTemp / 18446744073709551616.0); // 18446744073709551616.0 is the same as 0xffffffffffffffff + 1, or 0x10000000000000000.
valueTemp -= (mPart2 * 18446744073709551616.0);
//Get part1
mPart1 = (uint32_t)(valueTemp / 4294967296.0); // 4294967296.0 is the same as 0xffffffff + 1, or 0x100000000.
valueTemp -= (mPart1 * 4294967296.0);
//Get part0
mPart0 = (uint32_t)(valueTemp);
*/
#endif
}
///////////////////////////////////////////////////////////////////////////////
// int128_t
///////////////////////////////////////////////////////////////////////////////
int128_t::int128_t()
#if EA_INT128_USE_INT64
: int128_t_base(0, 0)
#else
: int128_t_base(0, 0, 0, 0)
#endif
{
}
int128_t::int128_t(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3)
: int128_t_base(nPart0, nPart1, nPart2, nPart3) // OK for EA_INT128_USE_INT64
{
}
int128_t::int128_t(uint64_t nPart0, uint64_t nPart1)
: int128_t_base(nPart0, nPart1) // OK for EA_INT128_USE_INT64
{
}
int128_t::int128_t(uint8_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
int128_t::int128_t(uint16_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
int128_t::int128_t(uint32_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
#if defined(INT128_UINT_TYPE)
int128_t::int128_t(INT128_UINT_TYPE value)
: int128_t_base((uint64_t)value) // OK for EA_INT128_USE_INT64
{
}
#endif
int128_t::int128_t(uint64_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
int128_t::int128_t(int8_t value)
{
if(value < 0)
{
*this = int128_t((uint8_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
}
int128_t::int128_t(int16_t value)
{
if(value < 0)
{
*this = int128_t((uint16_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
}
int128_t::int128_t(int32_t value)
{
if(value < 0)
{
*this = int128_t((uint32_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = (uint32_t)value;
#endif
}
}
#if defined(INT128_INT_TYPE)
int128_t::int128_t(INT128_INT_TYPE value)
{
operator=(int128_t((int64_t)value));
}
#endif
int128_t::int128_t(int64_t value)
{
if(value < 0)
{
*this = int128_t((int64_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = (uint64_t) (value);
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = (uint32_t) ((value >> 32) & 0xffffffff);
mPart0 = (uint32_t) (value & 0xffffffff);
#endif
}
}
int128_t::int128_t(const int128_t& value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
// Not defined because doing so would make the compiler unable to
// decide how to choose binary functions involving int128/uint128.
//int128_t::int128_t(const uint128_t& value)
// : int128_t_base(value) // OK for EA_INT128_USE_INT64
//{
//}
int128_t::int128_t(const float value)
{
// OK for EA_INT128_USE_INT64
DoubleToUint128(value);
if(value < 0)
Negate();
}
int128_t::int128_t(const double value)
{
// OK for EA_INT128_USE_INT64
DoubleToUint128(value);
if(value < 0)
Negate();
}
int128_t::int128_t(const char* pValue, int nBase){
// OK for EA_INT128_USE_INT64
const int128_t value(StrToInt128(pValue, NULL, nBase));
operator=(value);
}
int128_t::int128_t(const wchar_t* pValue, int nBase){
// OK for EA_INT128_USE_INT64
wchar_t* pTextEnd(NULL);
const int128_t value(StrToInt128(pValue, &pTextEnd, nBase));
operator=(value);
}
int128_t& int128_t::operator=(const int128_t_base& value)
{
// C++ requires operator= to be subclassed, even if the subclassed
// implementation is identical to the base implementation.
// OK for EA_INT128_USE_INT64
int128_t_base::operator=(value);
return *this;
}
int128_t int128_t::operator-() const
{
// OK for EA_INT128_USE_INT64
int128_t returnValue(*this);
returnValue.Negate();
return returnValue;
}
int128_t& int128_t::operator++()
{
// OK for EA_INT128_USE_INT64
int128_t_base one((uint32_t)1);
operatorPlus(*this, one, *this);
return *this;
}
int128_t& int128_t::operator--()
{
// OK for EA_INT128_USE_INT64
int128_t_base one((uint32_t)1);
operatorMinus(*this, one, *this);
return *this;
}
int128_t int128_t::operator++(int)
{
// OK for EA_INT128_USE_INT64
int128_t temp((uint32_t)1);
operatorPlus(*this, temp, temp);
return temp;
}
int128_t int128_t::operator--(int)
{
// OK for EA_INT128_USE_INT64
int128_t temp((uint32_t)1);
operatorMinus(*this, temp, temp);
return temp;
}
int128_t int128_t::operator+() const
{
// OK for EA_INT128_USE_INT64
return *this;
}
int128_t int128_t::operator~() const
{
#if EA_INT128_USE_INT64
return int128_t(~mPart0, ~mPart1);
#else
return int128_t(~mPart0, ~mPart1, ~mPart2, ~mPart3);
#endif
}
int128_t operator+(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
int128_t::operatorPlus(value1, value2, temp);
return temp;
}
int128_t operator-(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
int128_t::operatorMinus(value1, value2, temp);
return temp;
}
///////////////////////////////////////////////////////////////////////////////
// operator *
//
int128_t operator*(const int128_t& value1, const int128_t& value2)
{
int128_t a(value1);
int128_t b(value2);
int128_t returnValue;
// Correctly handle negative values
bool bANegative(false);
bool bBNegative(false);
if(a.IsNegative())
{
bANegative = true;
a.Negate();
}
if(b.IsNegative())
{
bBNegative = true;
b.Negate();
}
int128_t_base::operatorMul(a, b, returnValue);
// Do negation as needed.
if(bANegative != bBNegative)
returnValue.Negate();
return returnValue;
}
int128_t operator/(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t remainder;
int128_t quotient;
value1.Modulus(value2, quotient, remainder);
return quotient;
}
int128_t operator%(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t remainder;
int128_t quotient;
value1.Modulus(value2, quotient, remainder);
return remainder;
}
int128_t& int128_t::operator+=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorPlus(*this, value, *this);
return *this;
}
int128_t& int128_t::operator-=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorMinus(*this, value, *this);
return *this;
}
int128_t& int128_t::operator*=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this * value;
return *this;
}
int128_t& int128_t::operator/=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this / value;
return *this;
}
int128_t& int128_t::operator%=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this % value;
return *this;
}
// With rightward shifts of negative numbers, shift in zero from the left side.
int128_t int128_t::operator>>(int nShift) const
{
// OK for EA_INT128_USE_INT64
int128_t temp;
operatorShiftRight(*this, nShift, temp);
return temp;
}
// With rightward shifts of negative numbers, shift in zero from the left side.
int128_t int128_t::operator<<(int nShift) const
{
// OK for EA_INT128_USE_INT64
int128_t temp;
operatorShiftLeft(*this, nShift, temp);
return temp;
}
int128_t& int128_t::operator>>=(int nShift)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
operatorShiftRight(*this, nShift, temp);
*this = temp;
return *this;
}
int128_t& int128_t::operator<<=(int nShift)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
operatorShiftLeft(*this, nShift, temp);
*this = temp;
return *this;
}
int128_t operator^(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
int128_t::operatorXOR(value1, value2, temp);
return temp;
}
int128_t operator|(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
int128_t::operatorOR(value1, value2, temp);
return temp;
}
int128_t operator&(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
int128_t temp;
int128_t::operatorAND(value1, value2, temp);
return temp;
}
int128_t& int128_t::operator^=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorXOR(*this, value, *this);
return *this;
}
int128_t& int128_t::operator|=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorOR(*this, value, *this);
return *this;
}
int128_t& int128_t::operator&=(const int128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorAND(*this, value, *this);
return *this;
}
// This function forms the basis of all logical comparison functions.
// If value1 < value2, the return value is -1.
// If value1 == value2, the return value is 0.
// If value1 > value2, the return value is 1.
int compare(const int128_t& value1, const int128_t& value2)
{
// Cache some values. Positive means >= 0. Negative means < 0 and thus means '!positive'.
const bool bValue1IsPositive(value1.IsPositive());
const bool bValue2IsPositive(value2.IsPositive());
// Do positive/negative tests.
if(bValue1IsPositive != bValue2IsPositive)
return bValue1IsPositive ? 1 : -1;
// Compare individual parts. At this point, the two numbers have the same sign.
#if EA_INT128_USE_INT64
if(value1.mPart1 == value2.mPart1)
{
if(value1.mPart0 == value2.mPart0)
return 0;
else if(value1.mPart0 > value2.mPart0)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart1 > value2.mPart1)
return 1;
return -1;
#else
if(value1.mPart3 == value2.mPart3)
{
if(value1.mPart2 == value2.mPart2)
{
if(value1.mPart1 == value2.mPart1)
{
if(value1.mPart0 == value2.mPart0)
return 0;
else if(value1.mPart0 > value2.mPart0)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart1 > value2.mPart1)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart2 > value2.mPart2)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart3 > value2.mPart3)
return 1;
return -1;
#endif
}
bool operator==(const int128_t& value1, const int128_t& value2)
{
#if EA_INT128_USE_INT64
return (value1.mPart0 == value2.mPart0) && // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 == value2.mPart1);
#else
return (value1.mPart0 == value2.mPart0) && // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 == value2.mPart1) &&
(value1.mPart2 == value2.mPart2) &&
(value1.mPart3 == value2.mPart3);
#endif
}
bool operator!=(const int128_t& value1, const int128_t& value2)
{
#if EA_INT128_USE_INT64
return (value1.mPart0 != value2.mPart0) || // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 != value2.mPart1);
#else
return (value1.mPart0 != value2.mPart0) || // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 != value2.mPart1) ||
(value1.mPart2 != value2.mPart2) ||
(value1.mPart3 != value2.mPart3);
#endif
}
bool operator>(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) > 0);
}
bool operator>=(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) >= 0);
}
bool operator<(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) < 0);
}
bool operator<=(const int128_t& value1, const int128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) <= 0);
}
int8_t int128_t::AsInt8() const
{
// OK for EA_INT128_USE_INT64
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return (int8_t)-t.AsInt8();
}
return (int8_t) mPart0;
}
int16_t int128_t::AsInt16() const
{
// OK for EA_INT128_USE_INT64
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return (int16_t)-t.AsInt16();
}
return (int16_t) mPart0;
}
int32_t int128_t::AsInt32() const
{
// OK for EA_INT128_USE_INT64
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return -t.AsInt32();
}
return (int32_t) mPart0;
}
int64_t int128_t::AsInt64() const
{
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return -t.AsUint64(); // ensure mod2 behaviour
}
#if EA_INT128_USE_INT64
return (int64_t) mPart0;
#else
return (((int64_t) mPart1) << 32) + mPart0;
#endif
}
// I am not convinced that this is a reliable method of conversion.
float int128_t::AsFloat() const
{
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return -t.AsFloat();
}
float fReturnValue(0);
#if EA_INT128_USE_INT64
if(mPart1)
fReturnValue += (mPart1 * 18446744073709551616.f);
if(mPart0)
fReturnValue += (float)mPart0;
#else
if(mPart3)
fReturnValue += (mPart3 * 79228162514264337593543950336.f);
if(mPart2)
fReturnValue += (mPart2 * 18446744073709551616.f);
if(mPart1)
fReturnValue += (mPart1 * 4294967296.f);
if(mPart0)
fReturnValue += (float)mPart0;
#endif
return fReturnValue;
}
// I am not convinced that this is a reliable method of conversion.
double int128_t::AsDouble() const
{
if(IsNegative())
{
int128_t t(*this);
t.Negate();
return -t.AsDouble();
}
double fReturnValue(0);
#if EA_INT128_USE_INT64
if(mPart1)
fReturnValue += (mPart1 * 18446744073709551616.0);
if(mPart0)
fReturnValue += (double)mPart0;
#else
if(mPart3)
fReturnValue += (mPart3 * 79228162514264337593543950336.0);
if(mPart2)
fReturnValue += (mPart2 * 18446744073709551616.0);
if(mPart1)
fReturnValue += (mPart1 * 4294967296.0);
if(mPart0)
fReturnValue += (double)mPart0;
#endif
return fReturnValue;
}
void int128_t::Negate()
{
// OK for EA_INT128_USE_INT64
if(IsPositive())
TwosComplement();
else
InverseTwosComplement();
}
bool int128_t::IsNegative() const
{ // True if value < 0
#if EA_INT128_USE_INT64
return ((mPart1 & UINT64_C(0x8000000000000000)) != 0);
#else
return ((mPart3 & 0x80000000) != 0);
#endif
}
bool int128_t::IsPositive() const
{ // True of value >= 0
#if EA_INT128_USE_INT64
return ((mPart1 & UINT64_C(0x8000000000000000)) == 0);
#else
return ((mPart3 & 0x80000000) == 0);
#endif
}
///////////////////////////////////////////////////////////////////////////////
// Modulus
//
// This is a generic function that does both division modulus calculations.
//
void int128_t::Modulus(const int128_t& divisor, int128_t& quotient, int128_t& remainder) const
{
// OK for EA_INT128_USE_INT64
int128_t tempDividend(*this);
int128_t tempDivisor(divisor);
bool bDividendNegative = false;
bool bDivisorNegative = false;
if(tempDividend.IsNegative())
{
bDividendNegative = true;
tempDividend.Negate();
}
if(tempDivisor.IsNegative())
{
bDivisorNegative = true;
tempDivisor.Negate();
}
// Handle the special cases
if(tempDivisor.IsZero())
{
// Force a divide by zero exception.
// We know that tempDivisor.mPart0 is zero.
quotient.mPart0 /= tempDivisor.mPart0;
}
else if(tempDividend.IsZero())
{
quotient = int128_t((uint32_t)0);
remainder = int128_t((uint32_t)0);
}
else
{
remainder.SetZero();
for(int i(0); i < 128; i++)
{
remainder += (uint32_t)tempDividend.GetBit(127 - i);
const bool bBit(remainder >= tempDivisor);
quotient.SetBit(127 - i, bBit);
if(bBit)
remainder -= tempDivisor;
if((i != 127) && !remainder.IsZero())
remainder <<= 1;
}
}
if((bDividendNegative && !bDivisorNegative) || (!bDividendNegative && bDivisorNegative))
{
// Ensure the following formula applies for negative dividends
// dividend = divisor * quotient + remainder
quotient.Negate();
}
}
///////////////////////////////////////////////////////////////////////////////
// StrToInt128
//
// Same as C runtime strtol function but for int128_t.
// This is probably the most general and useful of the C atoi family of functions.
//
int128_t int128_t::StrToInt128(const char* pValue, char** ppEnd, int nBase)
{
int128_t value((uint32_t)0); // Current value
const char* p = pValue; // Current position
const char* pBegin = NULL; // Where the digits start.
const char* pEnd = NULL; // Where the digits end. One-past the last digit.
char chSign('+'); // One of either '+' or '-'
// Skip leading whitespace
while(isspace((unsigned char)*p))
++p;
// Check for sign.
if((*p == '-') || (*p == '+'))
chSign = *p++;
// Do checks on 'nBase'.
if((nBase < 0) || (nBase == 1) || (nBase > 36)){
if(ppEnd)
*ppEnd = (char*)pValue;
return value;
}
else if(nBase == 0){
// Auto detect one of base 2, 8, 10, or 16.
if(*p != '0')
nBase = 10;
else if((p[1] == 'x') || (p[1] == 'X')) // It's safe to read p[1] because p[0] is known to be '0'.
nBase = 16;
else if((p[1] == 'b') || (p[1] == 'B'))
nBase = 2;
else
nBase = 8;
}
if(nBase == 16){
// If there is a leading '0x', then skip past it.
if((*p == '0') && ((p[1] == 'x') || (p[1] == 'X')))
p += 2;
}
else if(nBase == 2){
// If there is a leading '0b', then skip past it.
if((*p == '0') && ((p[1] == 'b') || (p[1] == 'B')))
p += 2;
}
// Save the position where the digits start.
pBegin = p;
if(nBase == 2) // Binary
{
while((*p == '0') || (*p == '1'))
p++;
pEnd = p;
if(pEnd > pBegin + 128) // There can be at most 128 binary digits in the string.
{
pEnd = pBegin + 128;
p = pEnd;
}
for(int i(0); p > pBegin; ++i)
{
--p;
if(*p == '1')
value.SetBit(i, true);
}
}
else if(nBase == 10) // Decimal
{
while(isdigit((unsigned char)*p))
++p;
pEnd = p;
if(pEnd > pBegin + 39) // With base 10, it is not enough to simply check against 39 digits,
{ // as you can have 39 '9's and overflow. But 39 is the most you could have.
pEnd = pBegin + 39;
p = pEnd;
}
int128_t multiplier((uint32_t)1);
for(int i(0); p > pBegin; ++i)
{
const uint32_t c = (uint32_t)(*(--p) - '0');
if(c)
{
// This can be optimized for faster speed by doing the smaller orders
// of ten on value.mPart0 with an int multiplier instead of on value
// and a int128_t multiplier.
value += (multiplier * c);
}
multiplier *= (uint32_t)10;
}
}
else if(nBase == 16) // Hexadecimal
{
while(isxdigit((unsigned char)*p))
p++;
pEnd = p;
if(pEnd > pBegin + 32) // There can be at most 32 hexadecimal digits in the string.
{
pEnd = pBegin + 32;
p = pEnd;
}
// There can be as many as 32 characters.
for(int i(0); p > pBegin; i++)
{
#if EA_INT128_USE_INT64
const int nPart = (int)((pEnd - p) / 16);
uint64_t c = *(--p); // c is an integer in the range of [0,15].
#else
const int nPart = (int)((pEnd - p) / 8);
uint32_t c = *(--p); // c is an integer in the range of [0,15].
#endif
if(c >= '0' && c <= '9')
c = (c - '0');
else if(c >= 'a' && c <= 'f')
c = 10 + (c - 'a');
else
c = 10 + (c - 'A');
if(c)
{
#if EA_INT128_USE_INT64
c <<= ((i % 16) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
#else
c <<= ((i % 8) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
else if(nPart == 2)
value.mPart2 |= c;
else if(nPart == 3)
value.mPart3 |= c;
#endif
}
}
}
else
{
// EA_ASSERT(false); // For the time being, we handle only the above bases. But that's all that's required by the standard.
}
if(chSign == '-')
value.Negate();
if(ppEnd)
*ppEnd = (char*)pEnd;
return value;
}
///////////////////////////////////////////////////////////////////////////////
// StrToInt128
//
// Same as C runtime strtol function but for int128_t.
// This is probably the most general and useful of the C atoi family of functions.
//
int128_t int128_t::StrToInt128(const wchar_t* pValue, wchar_t** ppEnd, int nBase)
{
// This is simply a copy and paste of the char version of StrToInt128, with minor
// modifications for wchar_t.
// To consider: Make an alternative implementation of this which converts the wchar_t
// buffer to char and uses the char version. Doing this properly would involve more
// than a trivial number of lines of code, and so for the time being we do the copy/paste.
int128_t value((uint32_t)0); // Current value
const wchar_t* p = pValue; // Current position
const wchar_t* pBegin = NULL; // Where the digits start.
const wchar_t* pEnd = NULL; // Where the digits end. One-past the last digit.
wchar_t chSign('+'); // One of either '+' or '-'
// Skip leading whitespace
while((*p > 0) && (*p < 127) && isspace((uint8_t)*p)) // Compare to < 127 because ctype functions will crash for higher values.
++p;
// Check for sign.
if((*p == '-') || (*p == '+'))
chSign = *p++;
// Do checks on 'nBase'.
if((nBase < 0) || (nBase == 1) || (nBase > 36)){
if(ppEnd)
*ppEnd = (wchar_t*)pValue;
return value;
}
else if(nBase == 0){
// Auto detect one of base 2, 8, 10, or 16.
if(*p != '0')
nBase = 10;
else if((p[1] == 'x') || (p[1] == 'X'))
nBase = 16;
else if((p[1] == 'b') || (p[1] == 'B'))
nBase = 2;
else
nBase = 8;
}
if(nBase == 16){
// If there is a leading '0x', then skip past it.
if((*p == '0') && ((p[1] == 'x') || (p[1] == 'X')))
p += 2;
}
else if(nBase == 2){
// If there is a leading '0b', then skip past it.
if((*p == '0') && ((p[1] == 'b') || (p[1] == 'B')))
p += 2;
}
// Save the position where the digits start.
pBegin = p;
if(nBase == 2) // Binary
{
while((*p == '0') || (*p == '1'))
p++;
pEnd = p;
if(pEnd > pBegin + 128) // There can be at most 128 binary digits in the string.
{
pEnd = pBegin + 128;
p = pEnd;
}
for(int i(0); p > pBegin; ++i)
{
--p;
if(*p == '1')
value.SetBit(i, true);
}
}
else if(nBase == 10) // Decimal
{
while((*p > 0) && (*p < 127) && isdigit((uint8_t)*p)) // Compare to < 127 because ctype functions will crash for higher values.
++p;
pEnd = p;
if(pEnd > pBegin + 39) // With base 10, it is not enough to simply check against 39 digits,
{ // as you can have 39 '9's and overflow. But 39 is the most you could have.
pEnd = pBegin + 39;
p = pEnd;
}
int128_t multiplier((uint32_t)1);
for(int i(0); p > pBegin; ++i)
{
const uint32_t c = (uint32_t)(*(--p) - '0');
if(c)
{
// This can be optimized for faster speed by doing the smaller orders
// of ten on value.mPart0 with an int multiplier instead of on value
// and a int128_t multiplier.
value += (multiplier * c);
}
multiplier *= (uint32_t)10;
}
}
else if(nBase == 16) // Hexadecimal
{
while((*p > 0) && (*p < 127) && isxdigit(*p)) // Compare to < 127 because ctype functions will crash for higher values.
p++;
pEnd = p;
if(pEnd > pBegin + 32) // There can be at most 32 hexadecimal digits in the string.
{
pEnd = pBegin + 32;
p = pEnd;
}
// There can be as many as 32 characters.
for(int i(0); p > pBegin; i++)
{
#if EA_INT128_USE_INT64
const int nPart = (int)((pEnd - p) / 16);
uint64_t c = *(--p); // c is an integer in the range of [0,15].
#else
const int nPart = (int)((pEnd - p) / 8);
uint32_t c = *(--p); // c is an integer in the range of [0,15].
#endif
if(c >= '0' && c <= '9')
c = (c - '0');
else if(c >= 'a' && c <= 'f')
c = 10 + (c - 'a');
else
c = 10 + (c - 'A');
if(c)
{
#if EA_INT128_USE_INT64
c <<= ((i % 16) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
#else
c <<= ((i % 8) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
else if(nPart == 2)
value.mPart2 |= c;
else if(nPart == 3)
value.mPart3 |= c;
#endif
}
}
}
else
{
// EA_ASSERT(false); // For the time being, we handle only the above bases. But that's all that's required by the standard.
}
if(chSign == '-')
value.Negate();
if(ppEnd)
*ppEnd = (wchar_t*)pEnd;
return value;
}
///////////////////////////////////////////////////////////////////////////////
// Int128ToStr
//
// Returned string has a NULL appended to it.
// Upon return, ppEnd points to the terminating NULL.
// Thus, ppEnd - pValue => string length.
//
// bPrefix applies only to base 2 (0b) and base 16 (0x).
//
void int128_t::Int128ToStr(char* pValue, char** ppEnd, int nBase, LeadingZeroes lz, Prefix prefix) const
{
if(nBase == 2)
{
bool bLeadingZeros = (lz == kLZEnable); // By default leading zeroes are disabled.
bool bPrefix = (prefix == kPrefixEnable); // By default prefix is disabled.
if(bPrefix)
{
*pValue++ = '0';
*pValue++ = 'b';
}
if(IsZero())
{
if(bLeadingZeros)
{
for(int i(0); i < 128; i++)
*pValue++ = '0';
}
else
*pValue++ = '0'; // This is all we need to write.
}
else
{
// Print out the text.
bool bNonZeroFound(false);
for(int i(127); i >= 0; --i)
{
const int bBitIsSet(GetBit(i));
if(bBitIsSet)
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = (bBitIsSet ? '1' : '0');
}
}
}
else if(nBase == 10)
{
// To do: Support leading zeroes and prefix for base 10.
if(*this == EASTDC_INT128_MIN)
{
// This code has a special pathway because negating EASTDC_INT128_MIN results
// in EASTDC_INT128_MIN and thus the code below can't work.
static const char* pINT128_MIN = "-170141183460469231731687303715884105728";
for(const char* pCurrent = pINT128_MIN; *pCurrent; ++pCurrent, ++pValue)
*pValue = *pCurrent;
}
else
{
int128_t value(*this);
char* pValueInitial = pValue;
const bool bNegative(IsNegative());
if(bNegative)
{
value.Negate();
*pValue++ = '-';
}
// This part here isn't particularly fast.
const int128_t ten((uint32_t)10);
while (value >= ten)
{
const int128_t remainder = (value % ten);
*pValue++ = (char)('0' + remainder.mPart0);
value /= (uint32_t)10;
}
*pValue++ = (char)('0' + value.mPart0);
// Reverse the string.
char* pEnd = pValue - 1;
if(bNegative)
++pValueInitial;
while(pValueInitial < pEnd)
{
char temp = *pValueInitial;
*pValueInitial = *pEnd;
*pEnd = temp;
++pValueInitial;
--pEnd;
}
}
}
else if(nBase == 16)
{
bool bLeadingZeros = (lz != kLZDisable); // By default leading zeroes are enabled.
bool bPrefix = (prefix != kPrefixDisable); // By default prefix is enabled.
static const char* const pHexCharTable = "0123456789abcdef";
if(bPrefix)
{
*pValue++ = '0';
*pValue++ = 'x';
}
if(IsZero())
{
if(bLeadingZeros)
{
for(int i(0); i < 32; i++) // 32 is equal to (128 / 16)
*pValue++ = '0';
}
else
*pValue++ = '0'; // This is all we need to write.
}
else
{
// Print out the text.
bool bNonZeroFound(false);
// Work on each part in turn, starting with the high part.
#if EA_INT128_USE_INT64
for(int i(1); i >= 0; --i)
{
const uint64_t* pCurrent;
if(i == 1)
pCurrent = &mPart1;
else
pCurrent = &mPart0;
// Work on each sub-part (4 bits) or the current part (64 bits), starting with the high sub-part.
for(int j(60); j >= 0; j -= 4)
{
const char c = pHexCharTable[(*pCurrent >> j) & 0x0F];
if(c != '0')
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = c;
}
}
#else
for(int i(3); i >= 0; --i)
{
const uint32_t* pCurrent;
if(i == 3)
pCurrent = &mPart3;
else if(i == 2)
pCurrent = &mPart2;
else if(i == 1)
pCurrent = &mPart1;
else
pCurrent = &mPart0;
// Work on each sub-part (4 bits) or the current part (32 bits), starting with the high sub-part.
for(int j(28); j >= 0; j -= 4)
{
const char c = pHexCharTable[(*pCurrent >> j) & 0x0F];
if(c != '0')
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = c;
}
}
#endif
}
}
else
{
// To do: Implement this in a generic way.
EA_FAIL(); // Base not supported.
}
if(ppEnd)
*ppEnd = pValue;
*pValue = 0;
}
void int128_t::Int128ToStr(wchar_t* pValue, wchar_t** ppEnd, int nBase, LeadingZeroes lz, Prefix prefix) const
{
char str8[130];
char* pEnd = str8;
Int128ToStr(str8, &pEnd, nBase, lz, prefix);
for(char* p = str8; p < pEnd;)
*pValue++ = (wchar_t)(uint8_t)*p++;
if(ppEnd)
*ppEnd = pValue;
*pValue = 0;
}
///////////////////////////////////////////////////////////////////////////////
// uint128_t
///////////////////////////////////////////////////////////////////////////////
uint128_t::uint128_t()
#if EA_INT128_USE_INT64
: int128_t_base(0, 0)
#else
: int128_t_base(0, 0, 0, 0)
#endif
{
}
uint128_t::uint128_t(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3)
: int128_t_base(nPart0, nPart1, nPart2, nPart3) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(uint64_t nPart0, uint64_t nPart1)
: int128_t_base(nPart0, nPart1) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(uint8_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(uint16_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(uint32_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
#if defined(INT128_UINT_TYPE)
uint128_t::uint128_t(INT128_UINT_TYPE value)
: int128_t_base((uint64_t)value) // OK for EA_INT128_USE_INT64
{
}
#endif
uint128_t::uint128_t(uint64_t value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(int8_t value)
{
if(value < 0)
{
*this = uint128_t((uint8_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
}
uint128_t::uint128_t(int16_t value)
{
if(value < 0)
{
*this = uint128_t((uint16_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = value;
#endif
}
}
uint128_t::uint128_t(int32_t value)
{
if(value < 0)
{
*this = uint128_t((uint32_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = value;
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = 0;
mPart0 = (uint32_t)value;
#endif
}
}
#if defined(INT128_INT_TYPE)
uint128_t::uint128_t(INT128_INT_TYPE value)
{
operator=(uint128_t((int64_t)value));
}
#endif
uint128_t::uint128_t(int64_t value)
{
if(value < 0)
{
*this = uint128_t((uint64_t)-value);
TwosComplement();
}
else
{
#if EA_INT128_USE_INT64
mPart1 = 0;
mPart0 = (uint64_t) (value);
#else
mPart3 = 0;
mPart2 = 0;
mPart1 = (uint32_t) ((value >> 32) & 0xffffffff);
mPart0 = (uint32_t) (value & 0xffffffff);
#endif
}
}
uint128_t::uint128_t(const float value)
{
DoubleToUint128(value); // OK for EA_INT128_USE_INT64
}
uint128_t::uint128_t(const double value)
{
DoubleToUint128(value); // OK for EA_INT128_USE_INT64
}
uint128_t::uint128_t(const int128_t& value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(const uint128_t& value)
: int128_t_base(value) // OK for EA_INT128_USE_INT64
{
}
uint128_t::uint128_t(const char* pValue, int nBase){
// OK for EA_INT128_USE_INT64
const uint128_t value(StrToInt128(pValue, NULL, nBase));
operator=(value);
}
uint128_t::uint128_t(const wchar_t* pValue, int nBase){
// OK for EA_INT128_USE_INT64
wchar_t* pTextEnd(NULL);
const uint128_t value(StrToInt128(pValue, &pTextEnd, nBase));
operator=(value);
}
uint128_t& uint128_t::operator=(const int128_t_base& value)
{
// C++ requires operator= to be subclassed, even if the subclassed
// implementation is identical to the base implementation.
// OK for EA_INT128_USE_INT64
int128_t_base::operator=(value);
return *this;
}
uint128_t uint128_t::operator-() const
{
// OK for EA_INT128_USE_INT64
uint128_t returnValue(*this);
returnValue.Negate();
return returnValue;
}
uint128_t& uint128_t::operator++()
{
// OK for EA_INT128_USE_INT64
int128_t_base one((uint32_t)1);
operatorPlus(*this, one, *this);
return *this;
}
uint128_t& uint128_t::operator--()
{
// OK for EA_INT128_USE_INT64
int128_t_base one((uint32_t)1);
operatorMinus(*this, one, *this);
return *this;
}
uint128_t uint128_t::operator++(int)
{
// OK for EA_INT128_USE_INT64
uint128_t temp((uint32_t)1);
operatorPlus(*this, temp, temp);
return temp;
}
uint128_t uint128_t::operator--(int)
{
// OK for EA_INT128_USE_INT64
uint128_t temp((uint32_t)1);
operatorMinus(*this, temp, temp);
return temp;
}
uint128_t uint128_t::operator+() const
{
// OK for EA_INT128_USE_INT64
return *this;
}
uint128_t uint128_t::operator~() const
{
#if EA_INT128_USE_INT64
return uint128_t(~mPart0, ~mPart1);
#else
return uint128_t(~mPart0, ~mPart1, ~mPart2, ~mPart3);
#endif
}
uint128_t operator+(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
uint128_t::operatorPlus(value1, value2, temp);
return temp;
}
uint128_t operator-(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
uint128_t::operatorMinus(value1, value2, temp);
return temp;
}
///////////////////////////////////////////////////////////////////////////////
// operator *
//
uint128_t operator*(const uint128_t& value1, const uint128_t& value2)
{
uint128_t returnValue;
int128_t_base::operatorMul(value1, value2, returnValue);
return returnValue;
}
uint128_t operator/(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t remainder;
uint128_t quotient;
value1.Modulus(value2, quotient, remainder);
return quotient;
}
uint128_t operator%(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t remainder;
uint128_t quotient;
value1.Modulus(value2, quotient, remainder);
return remainder;
}
uint128_t& uint128_t::operator+=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorPlus(*this, value, *this);
return *this;
}
uint128_t& uint128_t::operator-=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorMinus(*this, value, *this);
return *this;
}
uint128_t& uint128_t::operator*=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this * value;
return *this;
}
uint128_t& uint128_t::operator/=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this / value;
return *this;
}
uint128_t& uint128_t::operator%=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
*this = *this % value;
return *this;
}
// With rightward shifts of negative numbers, shift in zero from the left side.
uint128_t uint128_t::operator>>(int nShift) const
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
operatorShiftRight(*this, nShift, temp);
return temp;
}
// With rightward shifts of negative numbers, shift in zero from the left side.
uint128_t uint128_t::operator<<(int nShift) const
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
operatorShiftLeft(*this, nShift, temp);
return temp;
}
uint128_t& uint128_t::operator>>=(int nShift)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
operatorShiftRight(*this, nShift, temp);
*this = temp;
return *this;
}
uint128_t& uint128_t::operator<<=(int nShift)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
operatorShiftLeft(*this, nShift, temp);
*this = temp;
return *this;
}
uint128_t operator^(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
uint128_t::operatorXOR(value1, value2, temp);
return temp;
}
uint128_t operator|(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
uint128_t::operatorOR(value1, value2, temp);
return temp;
}
uint128_t operator&(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
uint128_t temp;
uint128_t::operatorAND(value1, value2, temp);
return temp;
}
uint128_t& uint128_t::operator^=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorXOR(*this, value, *this);
return *this;
}
uint128_t& uint128_t::operator|=(const uint128_t& value)
{
// EA_INT128_USE_INT64
operatorOR(*this, value, *this);
return *this;
}
uint128_t& uint128_t::operator&=(const uint128_t& value)
{
// OK for EA_INT128_USE_INT64
operatorAND(*this, value, *this);
return *this;
}
// This function forms the basis of all logical comparison functions.
// If value1 < value2, the return value is -1.
// If value1 == value2, the return value is 0.
// If value1 > value2, the return value is 1.
int compare(const uint128_t& value1, const uint128_t& value2)
{
// Compare individual parts. At this point, the two numbers have the same sign.
#if EA_INT128_USE_INT64
if(value1.mPart1 == value2.mPart1)
{
if(value1.mPart0 == value2.mPart0)
return 0;
else if(value1.mPart0 > value2.mPart0)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart1 > value2.mPart1)
return 1;
return -1;
#else
if(value1.mPart3 == value2.mPart3)
{
if(value1.mPart2 == value2.mPart2)
{
if(value1.mPart1 == value2.mPart1)
{
if(value1.mPart0 == value2.mPart0)
return 0;
else if(value1.mPart0 > value2.mPart0)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart1 > value2.mPart1)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart2 > value2.mPart2)
return 1;
// return -1; //Just fall through to the end.
}
else if(value1.mPart3 > value2.mPart3)
return 1;
return -1;
#endif
}
bool operator==(const uint128_t& value1, const uint128_t& value2)
{
#if EA_INT128_USE_INT64
return (value1.mPart0 == value2.mPart0) && // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 == value2.mPart1);
#else
return (value1.mPart0 == value2.mPart0) && // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 == value2.mPart1) &&
(value1.mPart2 == value2.mPart2) &&
(value1.mPart3 == value2.mPart3);
#endif
}
bool operator!=(const uint128_t& value1, const uint128_t& value2)
{
#if EA_INT128_USE_INT64
return (value1.mPart0 != value2.mPart0) || // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 != value2.mPart1);
#else
return (value1.mPart0 != value2.mPart0) || // Check mPart0 first as this will likely yield faster execution.
(value1.mPart1 != value2.mPart1) ||
(value1.mPart2 != value2.mPart2) ||
(value1.mPart3 != value2.mPart3);
#endif
}
bool operator>(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) > 0);
}
bool operator>=(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) >= 0);
}
bool operator<(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) < 0);
}
bool operator<=(const uint128_t& value1, const uint128_t& value2)
{
// OK for EA_INT128_USE_INT64
return (compare(value1, value2) <= 0);
}
int8_t uint128_t::AsInt8() const
{
// OK for EA_INT128_USE_INT64
// The C++ Standard, section 4.7, paragraph 3 states that the results of
// conversion of an unsigned type to a signed type that cannot represent
// the unsigned type are implementation-defined.
return (int8_t)mPart0;
}
int16_t uint128_t::AsInt16() const
{
// OK for EA_INT128_USE_INT64
// The C++ Standard, section 4.7, paragraph 3 states that the results of
// conversion of an unsigned type to a signed type that cannot represent
// the unsigned type are implementation-defined.
return (int16_t)mPart0;
}
int32_t uint128_t::AsInt32() const
{
// OK for EA_INT128_USE_INT64
// The C++ Standard, section 4.7, paragraph 3 states that the results of
// conversion of an unsigned type to a signed type that cannot represent
// the unsigned type are implementation-defined.
return (int32_t)mPart0;
}
int64_t uint128_t::AsInt64() const
{
#if EA_INT128_USE_INT64
return (int64_t)mPart0;
#else
return (((int64_t) mPart1) << 32) + mPart0;
#endif
}
// I am not convinced that this is a reliable method of conversion.
float uint128_t::AsFloat() const
{
float fReturnValue(0);
#if EA_INT128_USE_INT64
if(mPart1)
fReturnValue += (mPart1 * 18446744073709551616.f);
if(mPart0)
fReturnValue += (float)mPart0;
#else
if(mPart3)
fReturnValue += (mPart3 * 79228162514264337593543950336.f);
if(mPart2)
fReturnValue += (mPart2 * 18446744073709551616.f);
if(mPart1)
fReturnValue += (mPart1 * 4294967296.f);
if(mPart0)
fReturnValue += (float)mPart0;
#endif
return fReturnValue;
}
// I am not convinced that this is a reliable method of conversion.
double uint128_t::AsDouble() const
{
double fReturnValue(0);
#if EA_INT128_USE_INT64
if(mPart1)
fReturnValue += (mPart1 * 18446744073709551616.0);
if(mPart0)
fReturnValue += (double)mPart0;
#else
if(mPart3)
fReturnValue += (mPart3 * 79228162514264337593543950336.0);
if(mPart2)
fReturnValue += (mPart2 * 18446744073709551616.0);
if(mPart1)
fReturnValue += (mPart1 * 4294967296.0);
if(mPart0)
fReturnValue += (double)mPart0;
#endif
return fReturnValue;
}
void uint128_t::Negate()
{
// OK for EA_INT128_USE_INT64
TwosComplement();
}
bool uint128_t::IsNegative() const
{ // True if value < 0
// OK for EA_INT128_USE_INT64
return false;
}
bool uint128_t::IsPositive() const
{
// True of value >= 0
// OK for EA_INT128_USE_INT64
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Modulus
//
// This is a generic function that does both division modulus calculations.
//
void uint128_t::Modulus(const uint128_t& divisor, uint128_t& quotient, uint128_t& remainder) const
{
// OK for EA_INT128_USE_INT64
uint128_t tempDividend(*this);
uint128_t tempDivisor(divisor);
if(tempDivisor.IsZero())
{
// Force a divide by zero exception.
// We know that tempDivisor.mPart0 is zero.
quotient.mPart0 /= tempDivisor.mPart0;
}
else if(tempDividend.IsZero())
{
quotient = uint128_t((uint32_t)0);
remainder = uint128_t((uint32_t)0);
}
else
{
remainder.SetZero();
for(int i(0); i < 128; i++)
{
remainder += (uint32_t)tempDividend.GetBit(127 - i);
const bool bBit(remainder >= tempDivisor);
quotient.SetBit(127 - i, bBit);
if(bBit)
remainder -= tempDivisor;
if((i != 127) && !remainder.IsZero())
remainder <<= 1;
}
}
}
///////////////////////////////////////////////////////////////////////////////
// StrToInt128
//
// Same as C runtime strtol function but for uint128_t.
// This is probably the most general and useful of the C atoi family of functions.
//
uint128_t uint128_t::StrToInt128(const char* pValue, char** ppEnd, int nBase)
{
uint128_t value((uint32_t)0); // Current value
const char* p = pValue; // Current position
const char* pBegin = NULL; // Where the digits start.
const char* pEnd = NULL; // Where the digits end. One-past the last digit.
char chSign('+'); // One of either '+' or '-'
// Skip leading whitespace
while(isspace((unsigned char)*p))
++p;
// Check for sign.
if((*p == '-') || (*p == '+'))
chSign = *p++;
// Do checks on 'nBase'.
if((nBase < 0) || (nBase == 1) || (nBase > 36)){
if(ppEnd)
*ppEnd = (char*)pValue;
return value;
}
else if(nBase == 0){
// Auto detect one of base 2, 8, 10, or 16.
if(*p != '0')
nBase = 10;
else if((p[1] == 'x') || (p[1] == 'X'))
nBase = 16;
else if((p[1] == 'b') || (p[1] == 'B'))
nBase = 2;
else
nBase = 8;
}
if(nBase == 16){
// If there is a leading '0x', then skip past it.
if((*p == '0') && ((p[1] == 'x') || (p[1] == 'X')))
p += 2;
}
else if(nBase == 2){
// If there is a leading '0b', then skip past it.
if((*p == '0') && ((p[1] == 'b') || (p[1] == 'B')))
p += 2;
}
// Save the position where the digits start.
pBegin = p;
if(nBase == 2) // Binary
{
while((*p == '0') || (*p == '1'))
p++;
pEnd = p;
if(pEnd > pBegin + 128) // There can be at most 128 binary digits in the string.
{
pEnd = pBegin + 128;
p = pEnd;
}
for(int i(0); p > pBegin; ++i)
{
--p;
if(*p == '1')
value.SetBit(i, true);
}
}
else if(nBase == 10) // Decimal
{
while(isdigit((unsigned char)*p))
++p;
pEnd = p;
if(pEnd > pBegin + 39) // With base 10, it is not enough to simply check against 39 digits,
{ // as you can have 39 '9's and overflow. But 39 is the most you could have.
pEnd = pBegin + 39;
p = pEnd;
}
uint128_t multiplier((uint32_t)1);
for(int i(0); p > pBegin; ++i)
{
const uint32_t c = *(--p) - (uint32_t)'0';
if(c)
{
// This can be optimized for faster speed by doing the smaller orders
// of ten on value.mPart0 with an int multiplier instead of on value
// and a uint128_t multiplier.
value += (multiplier * c);
}
multiplier *= (uint32_t)10;
}
}
else if(nBase == 16) // Hexadecimal
{
while(isxdigit((unsigned char)*p))
p++;
pEnd = p;
if(pEnd > pBegin + 32) // There can be at most 32 hexadecimal digits in the string.
{
pEnd = pBegin + 32;
p = pEnd;
}
// There can be as many as 32 characters.
for(int i(0); p > pBegin; i++)
{
#if EA_INT128_USE_INT64
const int nPart = (int)((pEnd - p) / 16);
uint64_t c = *(--p);
#else
const int nPart = (int)((pEnd - p) / 8);
uint32_t c = *(--p);
#endif
if(c >= '0' && c <= '9')
c = (c - '0');
else if(c >= 'a' && c <= 'f')
c = 10 + (c - 'a');
else
c = 10 + (c - 'A');
if(c)
{
#if EA_INT128_USE_INT64
c <<= ((i % 16) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
#else
c <<= ((i % 8) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
else if(nPart == 2)
value.mPart2 |= c;
else if(nPart == 3)
value.mPart3 |= c;
#endif
}
}
}
else
{
// EA_ASSERT(false); // For the time being, we handle only the above bases.
}
if(chSign == '-')
value.Negate();
if(ppEnd)
*ppEnd = (char*)pEnd;
return value;
}
///////////////////////////////////////////////////////////////////////////////
// StrToInt128
//
// Same as C runtime strtol function but for uint128_t.
// This is probably the most general and useful of the C atoi family of functions.
//
uint128_t uint128_t::StrToInt128(const wchar_t* pValue, wchar_t** ppEnd, int nBase)
{
// This is simply a copy and paste of the char version of StrToInt128, with minor
// modifications for wchar_t.
uint128_t value((uint32_t)0); // Current value
const wchar_t* p = pValue; // Current position
const wchar_t* pBegin = NULL; // Where the digits start.
const wchar_t* pEnd = NULL; // Where the digits end. One-past the last digit.
wchar_t chSign('+'); // One of either '+' or '-'
// Skip leading whitespace
while((*p > 0) && (*p < 127) && isspace((uint8_t)*p)) // Compare to < 127 because ctype functions will crash for higher values.
++p;
// Check for sign.
if((*p == '-') || (*p == '+'))
chSign = *p++;
// Do checks on 'nBase'.
if((nBase < 0) || (nBase == 1) || (nBase > 36)){
if(ppEnd)
*ppEnd = (wchar_t*)pValue;
return value;
}
else if(nBase == 0){
// Auto detect one of base 2, 8, 10, or 16.
if(*p != '0')
nBase = 10;
else if((p[1] == 'x') || (p[1] == 'X'))
nBase = 16;
else if((p[1] == 'b') || (p[1] == 'B'))
nBase = 2;
else
nBase = 8;
}
if(nBase == 16){
// If there is a leading '0x', then skip past it.
if((*p == '0') && ((p[1] == 'x') || (p[1] == 'X')))
p += 2;
}
else if(nBase == 2){
// If there is a leading '0b', then skip past it.
if((*p == '0') && ((p[1] == 'b') || (p[1] == 'B')))
p += 2;
}
// Save the position where the digits start.
pBegin = p;
if(nBase == 2) // Binary
{
while((*p == '0') || (*p == '1'))
p++;
pEnd = p;
if(pEnd > pBegin + 128) // There can be at most 128 binary digits in the string.
{
pEnd = pBegin + 128;
p = pEnd;
}
for(int i(0); p > pBegin; ++i)
{
--p;
if(*p == '1')
value.SetBit(i, true);
}
}
else if(nBase == 10) // Decimal
{
while((*p > 0) && (*p < 127) && isdigit((uint8_t)*p)) // Compare to < 127 because ctype functions will crash for higher values.
++p;
pEnd = p;
if(pEnd > pBegin + 39) // With base 10, it is not enough to simply check against 39 digits,
{ // as you can have 39 '9's and overflow. But 39 is the most you could have.
pEnd = pBegin + 39;
p = pEnd;
}
uint128_t multiplier((uint32_t)1);
for(int i(0); p > pBegin; ++i)
{
const uint32_t c = *(--p) - (uint32_t)'0';
if(c)
{
// This can be optimized for faster speed by doing the smaller orders
// of ten on value.mPart0 with an int multiplier instead of on value
// and a uint128_t multiplier.
value += (multiplier * c);
}
multiplier *= (uint32_t)10;
}
}
else if(nBase == 16) // Hexadecimal
{
while((*p > 0) && (*p < 127) && isxdigit((uint8_t)*p)) // Compare to < 127 because ctype functions will crash for higher values.
p++;
pEnd = p;
if(pEnd > pBegin + 32) // There can be at most 32 hexadecimal digits in the string.
{
pEnd = pBegin + 32;
p = pEnd;
}
// There can be as many as 32 characters.
for(int i(0); p > pBegin; i++)
{
#if EA_INT128_USE_INT64
const int nPart = (int)((pEnd - p) / 16);
uint64_t c = *(--p);
#else
const int nPart = (int)((pEnd - p) / 8);
uint32_t c = *(--p);
#endif
if(c >= '0' && c <= '9')
c = (c - '0');
else if(c >= 'a' && c <= 'f')
c = 10 + (c - 'a');
else
c = 10 + (c - 'A');
if(c)
{
#if EA_INT128_USE_INT64
c <<= ((i % 16) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
#else
c <<= ((i % 8) * 4);
if(nPart == 0)
value.mPart0 |= c;
else if(nPart == 1)
value.mPart1 |= c;
else if(nPart == 2)
value.mPart2 |= c;
else if(nPart == 3)
value.mPart3 |= c;
#endif
}
}
}
else
{
// EA_ASSERT(false); // For the time being, we handle only the above bases.
}
if(chSign == '-')
value.Negate();
if(ppEnd)
*ppEnd = (wchar_t*)pEnd;
return value;
}
///////////////////////////////////////////////////////////////////////////////
// Int128ToStr
//
// Returned string has a NULL appended to it.
// Upon return, ppEnd points to the terminating NULL.
// Thus, ppEnd - pValue => string length.
//
// bPrefix applies only to base 2 (0b) and base 16 (0x).
//
void uint128_t::Int128ToStr(char* pValue, char** ppEnd, int nBase, LeadingZeroes lz, Prefix prefix) const
{
if(nBase == 2)
{
bool bLeadingZeros = (lz == kLZEnable); // By default leading zeroes are disabled.
bool bPrefix = (prefix == kPrefixEnable); // By default prefix is disabled.
if(bPrefix)
{
*pValue++ = '0';
*pValue++ = 'b';
}
if(IsZero())
{
if(bLeadingZeros)
{
for(int i(0); i < 128; i++)
*pValue++ = '0';
}
else
*pValue++ = '0'; // This is all we need to write.
}
else
{
// Print out the text.
bool bNonZeroFound(false);
for(int i(127); i >= 0; --i)
{
const int bBitIsSet(GetBit(i));
if(bBitIsSet)
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = (bBitIsSet ? '1' : '0');
}
}
}
else if(nBase == 10)
{
// To do: Support leading zeroes and prefix for base 10.
uint128_t value(*this);
char* pValueInitial = pValue;
// This part here isn't particularly fast.
const uint128_t ten((uint32_t)10);
while (value >= ten)
{
const uint128_t remainder = (value % ten);
*pValue++ = (char)('0' + remainder.mPart0);
value /= (uint32_t)10;
}
*pValue++ = (char)('0' + value.mPart0);
// Reverse the string.
char* pEnd = pValue - 1;
while(pValueInitial < pEnd)
{
char temp = *pValueInitial;
*pValueInitial = *pEnd;
*pEnd = temp;
++pValueInitial;
--pEnd;
}
}
else if(nBase == 16)
{
bool bLeadingZeros = (lz != kLZDisable); // By default leading zeroes are enabled.
bool bPrefix = (prefix != kPrefixDisable); // By default prefix is enabled.
static const char* const pHexCharTable = "0123456789abcdef";
if(bPrefix)
{
*pValue++ = '0';
*pValue++ = 'x';
}
if(IsZero())
{
if(bLeadingZeros)
{
for(int i(0); i < 32; i++) // 32 is equal to (128 / 16)
*pValue++ = '0';
}
else
*pValue++ = '0'; // This is all we need to write.
}
else
{
// Print out the text.
bool bNonZeroFound(false);
// Work on each part in turn, starting with the high part.
#if EA_INT128_USE_INT64
for(int i(1); i >= 0; --i)
{
const uint64_t* pCurrent;
if(i == 1)
pCurrent = &mPart1;
else
pCurrent = &mPart0;
// Work on each sub-part (4 bits) or the current part (64 bits), starting with the high sub-part.
for(int j(60); j >= 0; j -= 4)
{
const char c = pHexCharTable[(*pCurrent >> j) & 0x0F];
if(c != '0')
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = c;
}
}
#else
for(int i(3); i >= 0; --i)
{
const uint32_t* pCurrent;
if(i == 3)
pCurrent = &mPart3;
else if(i == 2)
pCurrent = &mPart2;
else if(i == 1)
pCurrent = &mPart1;
else
pCurrent = &mPart0;
// Work on each sub-part (4 bits) or the current part (32 bits), starting with the high sub-part.
for(int j(28); j >= 0; j -= 4)
{
const char c = pHexCharTable[(*pCurrent >> j) & 0x0F];
if(c != '0')
bNonZeroFound = true;
if(bLeadingZeros || bNonZeroFound)
*pValue++ = c;
}
}
#endif
}
}
else
{
// To do: Implement this in a generic way.
EA_FAIL(); // Base not supported.
}
if(ppEnd)
*ppEnd = pValue;
*pValue++ = 0;
}
void uint128_t::Int128ToStr(wchar_t* pValue, wchar_t** ppEnd, int nBase, LeadingZeroes lz, Prefix prefix) const
{
char str8[130];
char* pEnd = str8;
Int128ToStr(str8, &pEnd, nBase, lz, prefix);
for(char* p = str8; p < pEnd;)
*pValue++ = (wchar_t)(uint8_t)*p++;
if(ppEnd)
*ppEnd = pValue;
*pValue = 0;
}
} // namespace StdC
} // namespace EA
#ifdef _MSC_VER
#pragma warning(pop)
#endif
|
#include "envoy/api/v2/cds.pb.h"
#include "envoy/api/v2/discovery.pb.h"
#include "envoy/grpc/status.h"
#include "envoy/stats/scope.h"
#include "common/config/protobuf_link_hacks.h"
#include "common/config/resources.h"
#include "common/protobuf/protobuf.h"
#include "common/protobuf/utility.h"
#include "test/common/grpc/grpc_client_integration.h"
#include "test/integration/http_integration.h"
#include "test/integration/utility.h"
#include "test/mocks/server/mocks.h"
#include "test/test_common/network_utility.h"
#include "test/test_common/simulated_time_system.h"
#include "test/test_common/utility.h"
#include "absl/synchronization/notification.h"
#include "gtest/gtest.h"
using testing::AssertionFailure;
using testing::AssertionResult;
using testing::AssertionSuccess;
using testing::IsSubstring;
namespace Envoy {
namespace {
const char ClusterName1[] = "cluster_1";
const char ClusterName2[] = "cluster_2";
const int UpstreamIndex1 = 1;
const int UpstreamIndex2 = 2;
class CdsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest {
public:
CdsIntegrationTest()
: HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(),
ConfigHelper::discoveredClustersBootstrap("GRPC")) {}
CdsIntegrationTest(Network::Address::IpVersion ip_version, const std::string& config)
: HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ip_version, config) {}
void TearDown() override {
cleanUpXdsConnection();
test_server_.reset();
fake_upstreams_.clear();
}
// Overridden to insert this stuff into the initialize() at the very beginning of
// HttpIntegrationTest::testRouterHeaderOnlyRequestAndResponse().
void initialize() override {
// Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in
// BaseIntegrationTest::createUpstreams() (which is part of initialize()).
// Make sure this number matches the size of the 'clusters' repeated field in the bootstrap
// config that you use!
setUpstreamCount(1); // the CDS cluster
setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.
// HttpIntegrationTest::initialize() does many things:
// 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().
// 2) It updates your bootstrap config with the ports your fake upstreams are actually listening
// on (since you're supposed to leave them as 0).
// 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual
// Envoy used in the tests.
// 4) Bringing up the server usually entails waiting to ensure that any listeners specified in
// the bootstrap config have come up, and registering them in a port map (see lookupPort()).
// However, this test needs to defer all of that to later.
defer_listener_finalization_ = true;
HttpIntegrationTest::initialize();
// Create the regular (i.e. not an xDS server) upstreams. We create them manually here after
// initialize() because finalize() expects all fake_upstreams_ to correspond to a static
// cluster in the bootstrap config - which we don't want since we're testing dynamic CDS!
fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_,
timeSystem(), enable_half_close_));
fake_upstreams_[UpstreamIndex1]->set_allow_unexpected_disconnects(false);
fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_,
timeSystem(), enable_half_close_));
fake_upstreams_[UpstreamIndex2]->set_allow_unexpected_disconnects(false);
cluster1_ = ConfigHelper::buildCluster(
ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(),
Network::Test::getLoopbackAddressString(ipVersion()));
cluster2_ = ConfigHelper::buildCluster(
ClusterName2, fake_upstreams_[UpstreamIndex2]->localAddress()->ip()->port(),
Network::Test::getLoopbackAddressString(ipVersion()));
// Let Envoy establish its connection to the CDS server.
acceptXdsConnection();
// Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.
// Split out into its own function so that DeltaCdsIntegrationTest can override it.
giveInitialCluster();
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse describing cluster_1 that we sent.
// 2 because the statically specified CDS server itself counts as a cluster.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2);
// Wait for our statically specified listener to become ready, and register its port in the
// test framework's downstream listener port map.
test_server_->waitUntilListenersReady();
registerTestServerPorts({"http"});
}
void acceptXdsConnection() {
AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.
fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);
RELEASE_ASSERT(result, result.message());
result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);
RELEASE_ASSERT(result, result.message());
xds_stream_->startGrpcStream();
fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
}
// Does the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.
// Split out into its own function so that DeltaCdsIntegrationTest can override it.
virtual void giveInitialCluster() {
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster1_},
"55");
}
envoy::api::v2::Cluster cluster1_;
envoy::api::v2::Cluster cluster2_;
};
INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, CdsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS);
// 1) Envoy starts up with no static clusters (other than the CDS-over-gRPC server).
// 2) Envoy is told of a cluster via CDS.
// 3) We send Envoy a request, which we verify is properly proxied to and served by that cluster.
// 4) Envoy is told that cluster is gone.
// 5) We send Envoy a request, which should 503.
// 6) Envoy is told that the cluster is back.
// 7) We send Envoy a request, which we verify is properly proxied to and served by that cluster.
TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) {
// Calls our initialize(), which includes establishing a listener, route, and cluster.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
// Tell Envoy that cluster_1 is gone.
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {}, "42");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse that says cluster_1 is gone.
test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
// Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503.
BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(
lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com");
ASSERT_TRUE(response->complete());
EXPECT_STREQ("503", response->headers().Status()->value().c_str());
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is back.
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster1_},
"413");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse describing cluster_1 that we sent. Again, 2 includes CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2);
// Does *not* call our initialize().
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
}
// Tests adding a cluster, adding another, then removing the first.
TEST_P(CdsIntegrationTest, TwoClusters) {
// Calls our initialize(), which includes establishing a listener, route, and cluster.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_2 is here.
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster,
{cluster1_, cluster2_}, "42");
// The '3' includes the fake CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
// A request for cluster_2 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is gone.
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster2_}, "42");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse that says cluster_1 is gone.
test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
// Even with cluster_1 gone, a request for cluster_2 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is back.
EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}));
sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster,
{cluster1_, cluster2_}, "413");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse describing cluster_1 that we sent. Again, 3 includes CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
// Does *not* call our initialize().
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
}
class DeltaCdsIntegrationTest : public CdsIntegrationTest {
public:
DeltaCdsIntegrationTest()
: CdsIntegrationTest(ipVersion(), ConfigHelper::discoveredClustersBootstrap("DELTA_GRPC")) {}
void TearDown() override {
cleanUpXdsConnection();
test_server_.reset();
fake_upstreams_.clear();
}
// Does the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.
// Split out into its own function so that DeltaCdsIntegrationTest can override it.
void giveInitialCluster() override {
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({cluster1_}, {}, "55");
}
};
INSTANTIATE_TEST_CASE_P(IpVersionsClientType, DeltaCdsIntegrationTest,
GRPC_CLIENT_INTEGRATION_PARAMS);
// 1) Envoy starts up with no static clusters (other than the CDS-over-gRPC server).
// 2) Envoy is told of a cluster via CDS.
// 3) We send Envoy a request, which we verify is properly proxied to and served by that cluster.
// 4) Envoy is told that cluster is gone.
// 5) We send Envoy a request, which should 503.
// 6) Envoy is told that the cluster is back.
// 7) We send Envoy a request, which we verify is properly proxied to and served by that cluster.
TEST_P(DeltaCdsIntegrationTest, CdsClusterUpDownUp) {
// Calls CdsIntegrationTest::initialize(), which includes establishing a listener, route, and
// cluster.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
// Tell Envoy that cluster_1 is gone.
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({}, {ClusterName1}, "42");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse that says cluster_1 is gone.
test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
// Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503.
BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest(
lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com");
ASSERT_TRUE(response->complete());
EXPECT_STREQ("503", response->headers().Status()->value().c_str());
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is back.
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({cluster1_}, {}, "413");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse describing cluster_1 that we sent. Again, 2 includes CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2);
// Does *not* call our initialize().
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
}
// Tests adding a cluster, adding another, then removing the first.
TEST_P(DeltaCdsIntegrationTest, TwoClusters) {
// Calls CdsIntegrationTest::initialize(), which includes establishing a listener, route, and
// cluster.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_2 is here.
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({cluster2_}, {}, "42");
// The '3' includes the fake CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
// A request for cluster_2 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is gone.
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({}, {ClusterName1}, "42");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse that says cluster_1 is gone.
test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
// Even with cluster_1 gone, a request for cluster_2 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Tell Envoy that cluster_1 is back.
EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {}));
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({cluster1_}, {}, "413");
// We can continue the test once we're sure that Envoy's ClusterManager has made use of
// the DiscoveryResponse describing cluster_1 that we sent. Again, 3 includes CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
// Does *not* call our initialize().
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
}
// Tests that when Envoy's xDS gRPC stream dis/reconnects, Envoy can inform the server of the
// resources it already has: the reconnected stream need not start with a state-of-the-world update.
TEST_P(DeltaCdsIntegrationTest, VersionsRememberedAfterReconnect) {
// Calls our initialize(), which includes establishing a listener, route, and cluster.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// Close the connection carrying Envoy's xDS gRPC stream...
AssertionResult result = xds_connection_->close();
RELEASE_ASSERT(result, result.message());
result = xds_connection_->waitForDisconnect();
RELEASE_ASSERT(result, result.message());
xds_connection_.reset();
// ...and reconnect it.
acceptXdsConnection();
// Upon reconnecting, the Envoy should tell us its current resource versions.
envoy::api::v2::DeltaDiscoveryRequest request;
result = xds_stream_->waitForGrpcMessage(*dispatcher_, request);
RELEASE_ASSERT(result, result.message());
const auto& initial_resource_versions = request.initial_resource_versions();
EXPECT_EQ("55", initial_resource_versions.at(std::string(ClusterName1)));
EXPECT_EQ(1, initial_resource_versions.size());
// Tell Envoy that cluster_2 is here. This update does *not* need to include cluster_1,
// which Envoy should already know about despite the disconnect.
sendDeltaDiscoveryResponse<envoy::api::v2::Cluster>({cluster2_}, {}, "42");
// The '3' includes the fake CDS server.
test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
// A request for cluster_1 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
// A request for cluster_2 should be fine.
testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2");
cleanupUpstreamAndDownstream();
codec_client_->waitForDisconnect();
}
} // namespace
} // namespace Envoy
|
#include "mocks.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using testing::_;
using testing::Assign;
using testing::DoAll;
using testing::Invoke;
using testing::NiceMock;
using testing::Return;
using testing::ReturnNew;
using testing::ReturnPointee;
using testing::SaveArg;
namespace Envoy {
namespace Event {
MockDispatcher::MockDispatcher() : MockDispatcher("test_thread") {}
MockDispatcher::MockDispatcher(const std::string& name) : name_(name) {
ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return());
ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void {
to_delete_.clear();
}));
ON_CALL(*this, createTimer_(_)).WillByDefault(ReturnNew<NiceMock<Event::MockTimer>>());
ON_CALL(*this, createScaledTimer_(_, _)).WillByDefault(ReturnNew<NiceMock<Event::MockTimer>>());
ON_CALL(*this, createScaledTypedTimer_(_, _))
.WillByDefault(ReturnNew<NiceMock<Event::MockTimer>>());
ON_CALL(*this, post(_)).WillByDefault(Invoke([](PostCb cb) -> void { cb(); }));
ON_CALL(buffer_factory_, createBuffer_(_, _, _))
.WillByDefault(Invoke([](std::function<void()> below_low, std::function<void()> above_high,
std::function<void()> above_overflow) -> Buffer::Instance* {
return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow);
}));
ON_CALL(*this, isThreadSafe()).WillByDefault(Return(true));
}
MockDispatcher::~MockDispatcher() = default;
MockTimer::MockTimer() {
ON_CALL(*this, enableTimer(_, _))
.WillByDefault(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject* scope) {
enabled_ = true;
scope_ = scope;
}));
ON_CALL(*this, disableTimer()).WillByDefault(Assign(&enabled_, false));
ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));
}
MockTimer::MockTimer(MockDispatcher* dispatcher) : MockTimer() {
dispatcher_ = dispatcher;
EXPECT_CALL(*dispatcher, createTimer_(_))
.WillOnce(DoAll(SaveArg<0>(&callback_), Return(this)))
.RetiresOnSaturation();
ON_CALL(*this, enableTimer(_, _))
.WillByDefault(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject* scope) {
enabled_ = true;
scope_ = scope;
}));
ON_CALL(*this, disableTimer()).WillByDefault(Assign(&enabled_, false));
ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));
}
MockTimer::~MockTimer() {
if (timer_destroyed_) {
*timer_destroyed_ = true;
}
}
MockSchedulableCallback::~MockSchedulableCallback() = default;
MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher)
: dispatcher_(dispatcher) {
EXPECT_CALL(*dispatcher, createSchedulableCallback_(_))
.WillOnce(DoAll(SaveArg<0>(&callback_), Return(this)))
.RetiresOnSaturation();
ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false));
ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));
}
MockSignalEvent::MockSignalEvent(MockDispatcher* dispatcher) {
EXPECT_CALL(*dispatcher, listenForSignal_(_, _))
.WillOnce(DoAll(SaveArg<1>(&callback_), Return(this)))
.RetiresOnSaturation();
}
MockSignalEvent::~MockSignalEvent() = default;
MockFileEvent::MockFileEvent() = default;
MockFileEvent::~MockFileEvent() = default;
} // namespace Event
} // namespace Envoy
|
#include "../utility/String.hpp"
#include "RETypeDB.hpp"
#include "RETypeDefinition.hpp"
#include "MurmurHash.hpp"
namespace sdk::murmur_hash {
sdk::RETypeDefinition* type() {
static auto t = sdk::find_type_definition("via.murmur_hash");
return t;
}
uint32_t calc32(std::wstring_view str) {
static auto calc_method = type()->get_method("calc32");
return calc_method->call<uint32_t>(sdk::get_thread_context(), sdk::VM::create_managed_string(str));
}
uint32_t calc32(std::string_view str) {
return calc32(utility::widen(str));
}
}
|
#ifndef NEPOMUK_TIMETABLE_STATION_TABLE_FACTORY_HPP_
#define NEPOMUK_TIMETABLE_STATION_TABLE_FACTORY_HPP_
#include "gtfs/stop.hpp"
#include "timetable/station_table.hpp"
#include <vector>
namespace nepomuk
{
namespace timetable
{
class StationTableFactory
{
public:
static StationTable produce(std::vector<gtfs::Stop>::iterator const begin,
std::vector<gtfs::Stop>::iterator const end);
};
} // namespace timetable
} // namespace nepomuk
#endif // NEPOMUK_TIMETABLE_STATION_TABLE_FACTORY_HPP_
|
// Copyright (c) 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/chromeos/policy/cached_policy_key_loader_chromeos.h"
#include <stddef.h>
#include <utility>
#include "base/bind.h"
#include "base/callback.h"
#include "base/files/file_util.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/sequenced_task_runner.h"
#include "base/strings/stringprintf.h"
#include "base/task_runner_util.h"
#include "chromeos/cryptohome/cryptohome_parameters.h"
#include "chromeos/dbus/cryptohome/cryptohome_client.h"
namespace policy {
namespace {
// Path within |user_policy_key_dir_| that contains the policy key.
// "%s" must be substituted with the sanitized username.
const base::FilePath::CharType kPolicyKeyFile[] =
FILE_PATH_LITERAL("%s/policy.pub");
// Maximum key size that will be loaded, in bytes.
const size_t kKeySizeLimit = 16 * 1024;
// Failures that can happen when loading the policy key,
// This enum is used to define the buckets for an enumerated UMA histogram.
// Hence,
// (a) existing enumerated constants should never be deleted or reordered, and
// (b) new constants should only be appended at the end of the enumeration.
enum class ValidationFailure {
DBUS,
LOAD_KEY,
// Number of histogram buckets. Has to be the last element.
MAX_VALUE,
};
void SampleValidationFailure(ValidationFailure sample) {
UMA_HISTOGRAM_ENUMERATION("Enterprise.UserPolicyValidationFailure", sample,
ValidationFailure::MAX_VALUE);
}
} // namespace
CachedPolicyKeyLoaderChromeOS::CachedPolicyKeyLoaderChromeOS(
chromeos::CryptohomeClient* cryptohome_client,
scoped_refptr<base::SequencedTaskRunner> task_runner,
const AccountId& account_id,
const base::FilePath& user_policy_key_dir)
: task_runner_(task_runner),
cryptohome_client_(cryptohome_client),
account_id_(account_id),
user_policy_key_dir_(user_policy_key_dir) {}
CachedPolicyKeyLoaderChromeOS::~CachedPolicyKeyLoaderChromeOS() {}
void CachedPolicyKeyLoaderChromeOS::EnsurePolicyKeyLoaded(
base::OnceClosure callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (key_loaded_) {
std::move(callback).Run();
return;
}
key_loaded_callbacks_.push_back(std::move(callback));
// If a key load is in progress, the callback will be called once it finishes.
// No need to trigger another one.
if (key_load_in_progress_)
return;
key_load_in_progress_ = true;
// Get the hashed username that's part of the key's path, to determine
// |cached_policy_key_path_|.
cryptohome_client_->GetSanitizedUsername(
cryptohome::CreateAccountIdentifierFromAccountId(account_id_),
base::BindOnce(&CachedPolicyKeyLoaderChromeOS::OnGetSanitizedUsername,
weak_factory_.GetWeakPtr()));
}
bool CachedPolicyKeyLoaderChromeOS::LoadPolicyKeyImmediately() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
const std::string sanitized_username =
cryptohome_client_->BlockingGetSanitizedUsername(
cryptohome::CreateAccountIdentifierFromAccountId(account_id_));
if (sanitized_username.empty())
return false;
cached_policy_key_path_ = user_policy_key_dir_.Append(
base::StringPrintf(kPolicyKeyFile, sanitized_username.c_str()));
cached_policy_key_ = LoadPolicyKey(cached_policy_key_path_);
key_loaded_ = true;
return true;
}
void CachedPolicyKeyLoaderChromeOS::ReloadPolicyKey(
base::OnceClosure callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
key_loaded_callbacks_.push_back(std::move(callback));
if (key_load_in_progress_) {
// When a load is in progress, cancel the current load by invalidating weak
// pointers and before starting a new load.
weak_factory_.InvalidateWeakPtrs();
}
key_load_in_progress_ = true;
if (cached_policy_key_path_.empty()) {
// Get the hashed username that's part of the key's path, to determine
// |cached_policy_key_path_|.
cryptohome_client_->GetSanitizedUsername(
cryptohome::CreateAccountIdentifierFromAccountId(account_id_),
base::BindOnce(&CachedPolicyKeyLoaderChromeOS::OnGetSanitizedUsername,
weak_factory_.GetWeakPtr()));
} else {
TriggerLoadPolicyKey();
}
}
// static
std::string CachedPolicyKeyLoaderChromeOS::LoadPolicyKey(
const base::FilePath& path) {
std::string key;
if (!base::PathExists(path)) {
// There is no policy key the first time that a user fetches policy. If
// |path| does not exist then that is the most likely scenario, so there's
// no need to sample a failure.
VLOG(1) << "No key at " << path.value();
return key;
}
const bool read_success =
base::ReadFileToStringWithMaxSize(path, &key, kKeySizeLimit);
// If the read was successful and the file size is 0 or if the read fails
// due to file size exceeding |kKeySizeLimit|, log error.
if ((read_success && key.length() == 0) ||
(!read_success && key.length() == kKeySizeLimit)) {
LOG(ERROR) << "Key at " << path.value()
<< (read_success ? " is empty." : " exceeds size limit");
key.clear();
} else if (!read_success) {
LOG(ERROR) << "Failed to read key at " << path.value();
}
if (key.empty())
SampleValidationFailure(ValidationFailure::LOAD_KEY);
return key;
}
void CachedPolicyKeyLoaderChromeOS::TriggerLoadPolicyKey() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
base::PostTaskAndReplyWithResult(
task_runner_.get(), FROM_HERE,
base::BindOnce(&CachedPolicyKeyLoaderChromeOS::LoadPolicyKey,
cached_policy_key_path_),
base::BindOnce(&CachedPolicyKeyLoaderChromeOS::OnPolicyKeyLoaded,
weak_factory_.GetWeakPtr()));
}
void CachedPolicyKeyLoaderChromeOS::OnPolicyKeyLoaded(const std::string& key) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
cached_policy_key_ = key;
key_loaded_ = true;
key_load_in_progress_ = false;
NotifyAndClearCallbacks();
}
void CachedPolicyKeyLoaderChromeOS::OnGetSanitizedUsername(
base::Optional<std::string> sanitized_username) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!sanitized_username || sanitized_username->empty()) {
SampleValidationFailure(ValidationFailure::DBUS);
// Don't bother trying to load a key if we don't know where it is - just
// signal that the load attempt has finished.
key_load_in_progress_ = false;
NotifyAndClearCallbacks();
return;
}
cached_policy_key_path_ = user_policy_key_dir_.Append(
base::StringPrintf(kPolicyKeyFile, sanitized_username->c_str()));
TriggerLoadPolicyKey();
}
void CachedPolicyKeyLoaderChromeOS::NotifyAndClearCallbacks() {
std::vector<base::OnceClosure> callbacks = std::move(key_loaded_callbacks_);
key_loaded_callbacks_.clear();
for (auto& callback : callbacks)
std::move(callback).Run();
}
} // namespace policy
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.h"
#include <cstddef>
#include "integrations/tensorflow/compiler/dialect/tf_strings/ir/dialect.h"
#include "integrations/tensorflow/compiler/dialect/tf_strings/ir/ops.h"
#include "integrations/tensorflow/compiler/dialect/tf_strings/ir/types.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace iree_compiler {
namespace tf_strings {
namespace {
#include "integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.inc"
class StringTypeConverter : public TypeConverter {
public:
StringTypeConverter() {
// Required to covert any unknown or already converted types.
addConversion([](Type type) { return type; });
addConversion([](RankedTensorType type) -> Type {
if (type.getElementType().isa<TF::StringType>()) {
auto elementType = tf_strings::StringType::get(type.getContext());
// TODO(suderman): Find a better way to identify tensor<!tf.string> and
// !tf.string.
// Tensorflow only operates on tensors, so "scalar" strings are actually
// rank-0 tensors of strings. For now separate operating on tensors of
// strings and scalar strings by forcing all rank-0 tensors of strings
// to strings.
if (type.getRank() == 0) {
return tf_strings::StringType::get(type.getContext());
}
return RankedTensorType::get(type.getShape(), elementType);
}
return type;
});
addConversion([](TF::StringType type) {
return tf_strings::StringType::get(type.getContext());
});
}
};
struct StringFormatOpLowering : public OpRewritePattern<TF::StringFormatOp> {
using OpRewritePattern<TF::StringFormatOp>::OpRewritePattern;
LogicalResult matchAndRewrite(TF::StringFormatOp op,
PatternRewriter &rewriter) const override {
auto inputs = op.inputs();
// TODO(suderman): Implement a variadic version. For now assume one input.
if (inputs.size() != 1)
return rewriter.notifyMatchFailure(op,
"Variadic StringFormat unsupported.");
auto input = inputs[0];
rewriter.replaceOpWithNewOp<tf_strings::StringTensorToStringOp>(op, input);
return success();
}
};
class LowerTensorflowToStringsPass
: public PassWrapper<LowerTensorflowToStringsPass,
OperationPass<ModuleOp>> {
public:
void runOnOperation() override {
if (failed(run())) {
signalPassFailure();
}
}
LogicalResult run() {
auto module = getOperation();
OpBuilder builder(module.getContext());
OwningRewritePatternList patterns;
StringTypeConverter typeConverter;
// Lower to the standard string operations.
ConversionTarget target(getContext());
target.addIllegalOp<TF::AsStringOp>();
target.addIllegalOp<TF::PrintV2Op>();
target.addLegalDialect<tf_strings::TFStringsDialect>();
target.addDynamicallyLegalOp<FuncOp>([](FuncOp op) {
StringTypeConverter typeConverter;
return typeConverter.isSignatureLegal(op.getType()) &&
typeConverter.isLegal(&op.getBody());
});
target.addDynamicallyLegalOp<ReturnOp>([](ReturnOp op) {
StringTypeConverter typeConverter;
auto func = [&](Type type) { return typeConverter.isLegal(type); };
return llvm::all_of(op.getOperandTypes(), func);
});
target.addDynamicallyLegalOp<CallOp>([](CallOp op) {
StringTypeConverter typeConverter;
auto func = [&](Type type) { return typeConverter.isLegal(type); };
return llvm::all_of(op.getOperandTypes(), func) &&
llvm::all_of(op.getResultTypes(), func);
});
populateFuncOpTypeConversionPattern(patterns, &getContext(), typeConverter);
populateCallOpTypeConversionPattern(patterns, &getContext(), typeConverter);
populateTFToTFStringsPatterns(&getContext(), patterns);
auto result =
applyPartialConversion(module.getOperation(), target, patterns);
// Partial conversion doesn't include return types. Update in a separate
// walk.
module.walk([&](Operation *op) {
for (auto result : op->getResults()) {
auto result_type = result.getType();
auto new_type = typeConverter.convertType(result_type);
if (new_type) {
result.setType(typeConverter.convertType(result_type));
}
}
});
return result;
}
};
} // namespace
void populateTFToTFStringsPatterns(MLIRContext *ctx,
OwningRewritePatternList &patterns) {
populateWithGenerated(ctx, &patterns);
patterns.insert<StringFormatOpLowering>(ctx);
}
std::unique_ptr<OperationPass<ModuleOp>> createConvertTfToTfStrings() {
return std::make_unique<LowerTensorflowToStringsPass>();
}
static PassRegistration<LowerTensorflowToStringsPass> pass(
"convert-tensorflow-to-tf-strings", "Lower tensorflow to tf-strings.");
} // namespace tf_strings
} // namespace iree_compiler
} // namespace mlir
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.