text
stringlengths 5
1.04M
|
|---|
/*
Name:
Copyright:
Author:
Date: 14/01/16 12:33
Description: woj1289
*/
#include<iostream>
#include<stdio.h>
#include<string>
using namespace std;
int main()
{
//freopen("in.txt","r",stdin);
//freopen("out.txt","w",stdout);
string str1,str2;
char map[]={'A','T','C','G'};
int count=0;
while(cin>>str1>>str2&&str1!="#")
{
for(int i=0;i<str1.length();++i)
{
if(str1[i]==str2[i])
count++;
else
{
if((str1[i]=='A'||str1[i]=='T')&&(str2[i]=='C'||str2[i]=='G'))
count++;
else if((str1[i]=='C'||str1[i]=='G')&&(str2[i]=='A'||str2[i]=='T'))
count++;
}
}
cout<<count<<endl;
count=0;
}
return 0;
}
|
// Copyright (c) 2021-2022 The MetabaseNet developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "mode/basic_config.h"
#include "mode/config_macro.h"
namespace metabasenet
{
namespace po = boost::program_options;
CBasicConfig::CBasicConfig()
{
po::options_description desc("Basic");
CBasicConfigOption::AddOptionsImpl(desc);
AddOptions(desc);
}
CBasicConfig::~CBasicConfig() {}
bool CBasicConfig::PostLoad()
{
if (fHelp)
{
return true;
}
if (fTestNet)
{
pathData /= "testnet";
}
nMagicNum = fTestNet ? TESTNET_MAGICNUM : MAINNET_MAGICNUM;
return true;
}
std::string CBasicConfig::ListConfig() const
{
std::ostringstream oss;
oss << CBasicConfigOption::ListConfigImpl();
oss << "magicNum: " << nMagicNum << "\n";
return oss.str();
}
std::string CBasicConfig::Help() const
{
return CBasicConfigOption::HelpImpl();
}
} // namespace metabasenet
|
#include "meta.h"
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/control/magma_zmscale.cpp, normal z -> s, Tue Aug 30 09:38:49 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define RTOLERANCE lapackf77_slamch( "E" )
#define ATOLERANCE lapackf77_slamch( "E" )
/**
Purpose
-------
Scales a matrix.
Arguments
---------
@param[in,out]
A magma_s_matrix*
input/output matrix
@param[in]
scaling magma_scale_t
scaling type (unit rownorm / unit diagonal)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_smscale(
magma_s_matrix *A,
magma_scale_t scaling,
magma_queue_t queue )
{
magma_int_t info = 0;
float *tmp=NULL;
magma_s_matrix hA={Magma_CSR}, CSRA={Magma_CSR};
if( A->num_rows != A->num_cols && scaling != Magma_NOSCALE ){
printf("%% warning: non-square matrix.\n");
printf("%% Fallback: no scaling.\n");
scaling = Magma_NOSCALE;
}
if ( A->memory_location == Magma_CPU && A->storage_type == Magma_CSRCOO ) {
if ( scaling == Magma_NOSCALE ) {
// no scale
;
}
else if( A->num_rows == A->num_cols ){
if ( scaling == Magma_UNITROW ) {
// scale to unit rownorm
CHECK( magma_smalloc_cpu( &tmp, A->num_rows ));
for( magma_int_t z=0; z<A->num_rows; z++ ) {
float s = MAGMA_S_MAKE( 0.0, 0.0 );
for( magma_int_t f=A->row[z]; f<A->row[z+1]; f++ )
s+= MAGMA_S_REAL(A->val[f])*MAGMA_S_REAL(A->val[f]);
tmp[z] = MAGMA_S_MAKE( 1.0/sqrt( MAGMA_S_REAL( s ) ), 0.0 );
}
for( magma_int_t z=0; z<A->nnz; z++ ) {
A->val[z] = A->val[z] * tmp[A->col[z]] * tmp[A->rowidx[z]];
}
}
else if (scaling == Magma_UNITDIAG ) {
// scale to unit diagonal
CHECK( magma_smalloc_cpu( &tmp, A->num_rows ));
for( magma_int_t z=0; z<A->num_rows; z++ ) {
float s = MAGMA_S_MAKE( 0.0, 0.0 );
for( magma_int_t f=A->row[z]; f<A->row[z+1]; f++ ) {
if ( A->col[f]== z ) {
// add some identity matrix
//A->val[f] = A->val[f] + MAGMA_S_MAKE( 100000.0, 0.0 );
s = A->val[f];
}
}
if ( s == MAGMA_S_MAKE( 0.0, 0.0 ) ){
printf("%%error: zero diagonal element.\n");
info = MAGMA_ERR;
}
tmp[z] = MAGMA_S_MAKE( 1.0/sqrt( MAGMA_S_REAL( s ) ), 0.0 );
}
for( magma_int_t z=0; z<A->nnz; z++ ) {
A->val[z] = A->val[z] * tmp[A->col[z]] * tmp[A->rowidx[z]];
}
}
else {
printf( "%%error: scaling not supported.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
}
else {
printf( "%%error: scaling not supported.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_smtransfer( *A, &hA, A->memory_location, Magma_CPU, queue ));
CHECK( magma_smconvert( hA, &CSRA, hA.storage_type, Magma_CSRCOO, queue ));
CHECK( magma_smscale( &CSRA, scaling, queue ));
magma_smfree( &hA, queue );
magma_smfree( A, queue );
CHECK( magma_smconvert( CSRA, &hA, Magma_CSRCOO, A_storage, queue ));
CHECK( magma_smtransfer( hA, A, Magma_CPU, A_location, queue ));
}
cleanup:
magma_free_cpu( tmp );
magma_smfree( &hA, queue );
magma_smfree( &CSRA, queue );
return info;
}
/**
Purpose
-------
Adds a multiple of the Identity matrix to a matrix: A = A+add * I
Arguments
---------
@param[in,out]
A magma_s_matrix*
input/output matrix
@param[in]
add float
scaling for the identity matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_smdiagadd(
magma_s_matrix *A,
float add,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_s_matrix hA={Magma_CSR}, CSRA={Magma_CSR};
if ( A->memory_location == Magma_CPU && A->storage_type == Magma_CSRCOO ) {
for( magma_int_t z=0; z<A->nnz; z++ ) {
if ( A->col[z]== A->rowidx[z] ) {
// add some identity matrix
A->val[z] = A->val[z] + add;
}
}
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_smtransfer( *A, &hA, A->memory_location, Magma_CPU, queue ));
CHECK( magma_smconvert( hA, &CSRA, hA.storage_type, Magma_CSRCOO, queue ));
CHECK( magma_smdiagadd( &CSRA, add, queue ));
magma_smfree( &hA, queue );
magma_smfree( A, queue );
CHECK( magma_smconvert( CSRA, &hA, Magma_CSRCOO, A_storage, queue ));
CHECK( magma_smtransfer( hA, A, Magma_CPU, A_location, queue ));
}
cleanup:
magma_smfree( &hA, queue );
magma_smfree( &CSRA, queue );
return info;
}
|
#include <Arduino.h>
#include <unity.h>
#define LED_BUILTIN (13) // LED is connected to IO13
// Modem serial port
#define SerialAT Serial1
// Modem pinning
#define MODEM_RST (5)
#define MODEM_PWKEY (4)
#define MODEM_POWER_ON (23)
#define MODEM_TX (27)
#define MODEM_RX (26)
// Configure TinyGSM library
#define TINY_GSM_MODEM_SIM800 // Modem is SIM800
#define TINY_GSM_RX_BUFFER (1024) // Set RX buffer to 1Kb
#include <TinyGsmClient.h>
#include <TinyGsmCommon.h>
// void setUp(void) {
// // set stuff up here
// }
// void tearDown(void) {
// // clean stuff up here
// }
void test_led_builtin_pin_number(void)
{
TEST_ASSERT_EQUAL(LED_BUILTIN, 13);
}
void test_led_state_high(void)
{
digitalWrite(LED_BUILTIN, HIGH);
TEST_ASSERT_EQUAL(digitalRead(LED_BUILTIN), HIGH);
}
void test_led_state_low(void)
{
digitalWrite(LED_BUILTIN, LOW);
TEST_ASSERT_EQUAL(digitalRead(LED_BUILTIN), LOW);
}
TinyGsm modem(SerialAT);
void test_modem(void)
{
// reset modem, just for the lulz
Serial.println("[ INIT ] modem power-on");
pinMode(MODEM_PWKEY, OUTPUT);
pinMode(MODEM_RST, OUTPUT);
pinMode(MODEM_POWER_ON, OUTPUT);
digitalWrite(MODEM_PWKEY, LOW);
delay(1000);
digitalWrite(MODEM_RST, HIGH);
delay(1000);
digitalWrite(MODEM_POWER_ON, HIGH);
delay(6000);
// prepare modem for test
Serial.println("[ INIT ] init serial ifc to modem...");
SerialAT.begin(115200, SERIAL_8N1, MODEM_RX, MODEM_TX);
bool connect = modem.gprsConnect("iot.1nce.net");
TEST_ASSERT_TRUE_MESSAGE(connect, "connect GPRS");
String datetime = modem.getGSMDateTime(DATE_TIME); // DATE_FULL = 0, DATE_TIME = 1, DATE_DATE = 2
Serial.print("[ TEST ] GSM Date Time: "); Serial.println(datetime);
modem.sendAT(GF("+CIPGSMLOC=2,1"));
int code = modem.waitResponse(2000L, "+CIPGSMLOC: ");
Serial.print("[ TEST ] response code of CIPGSMLOC: "); Serial.println(code);
TEST_ASSERT_GREATER_OR_EQUAL_INT_MESSAGE(1, code, "woke clock orange");
String res = modem.stream.readString();
Serial.print("[ TEST ] response: "); Serial.println(res);
TEST_ASSERT_NOT_NULL_MESSAGE(res, "read failed");
modem.waitResponse(); // wait for the OK
// does not return proper time
// modem.sendAT(GF("+CCLK?"));
// code = modem.waitResponse(5000L, "+CCLK: ");
// Serial.print("[ TEST ] response code of CCLK: ");
// Serial.println(code);
// TEST_ASSERT_GREATER_OR_EQUAL_INT_MESSAGE(1, code, "woke clock orange");
// res = modem.stream.readString();
// Serial.print("[ TEST ] 2nd string: "); Serial.println(res);
// TEST_ASSERT_NOT_NULL_MESSAGE(res, "2nd read failed");
// modem.waitResponse(); // wait for the OK
}
void setup()
{
UNITY_BEGIN();
RUN_TEST(test_led_builtin_pin_number);
// prepare for I/O test
pinMode(LED_BUILTIN, OUTPUT);
RUN_TEST(test_led_state_high);
RUN_TEST(test_led_state_low);
// modem related stuff
RUN_TEST(test_modem);
UNITY_END();
}
void loop()
{
digitalWrite(LED_BUILTIN, HIGH);
delay(250);
digitalWrite(LED_BUILTIN, LOW);
delay(250);
}
|
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "key.h"
#include "arith_uint256.h"
#include "crypto/common.h"
#include "crypto/hmac_sha512.h"
#include "pubkey.h"
#include "random.h"
#include <secp256k1.h>
#include <secp256k1_recovery.h>
static secp256k1_context *secp256k1_context_sign = nullptr;
/** These functions are taken from the libsecp256k1 distribution and are very
* ugly. */
static int ec_privkey_import_der(const secp256k1_context *ctx, uint8_t *out32,
const uint8_t *privkey, size_t privkeylen) {
const uint8_t *end = privkey + privkeylen;
int lenb = 0;
int len = 0;
memset(out32, 0, 32);
/* sequence header */
if (end < privkey + 1 || *privkey != 0x30) {
return 0;
}
privkey++;
/* sequence length constructor */
if (end < privkey + 1 || !(*privkey & 0x80)) {
return 0;
}
lenb = *privkey & ~0x80;
privkey++;
if (lenb < 1 || lenb > 2) {
return 0;
}
if (end < privkey + lenb) {
return 0;
}
/* sequence length */
len = privkey[lenb - 1] | (lenb > 1 ? privkey[lenb - 2] << 8 : 0);
privkey += lenb;
if (end < privkey + len) {
return 0;
}
/* sequence element 0: version number (=1) */
if (end < privkey + 3 || privkey[0] != 0x02 || privkey[1] != 0x01 ||
privkey[2] != 0x01) {
return 0;
}
privkey += 3;
/* sequence element 1: octet string, up to 32 bytes */
if (end < privkey + 2 || privkey[0] != 0x04 || privkey[1] > 0x20 ||
end < privkey + 2 + privkey[1]) {
return 0;
}
memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
if (!secp256k1_ec_seckey_verify(ctx, out32)) {
memset(out32, 0, 32);
return 0;
}
return 1;
}
static int ec_privkey_export_der(const secp256k1_context *ctx, uint8_t *privkey,
size_t *privkeylen, const uint8_t *key32,
int compressed) {
secp256k1_pubkey pubkey;
size_t pubkeylen = 0;
if (!secp256k1_ec_pubkey_create(ctx, &pubkey, key32)) {
*privkeylen = 0;
return 0;
}
if (compressed) {
static const uint8_t begin[] = {0x30, 0x81, 0xD3, 0x02,
0x01, 0x01, 0x04, 0x20};
static const uint8_t middle[] = {
0xA0, 0x81, 0x85, 0x30, 0x81, 0x82, 0x02, 0x01, 0x01, 0x30, 0x2C,
0x06, 0x07, 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x01, 0x01, 0x02, 0x21,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F,
0x30, 0x06, 0x04, 0x01, 0x00, 0x04, 0x01, 0x07, 0x04, 0x21, 0x02,
0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62,
0x95, 0xCE, 0x87, 0x0B, 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE,
0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17, 0x98, 0x02,
0x21, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, 0xE6,
0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41,
0x41, 0x02, 0x01, 0x01, 0xA1, 0x24, 0x03, 0x22, 0x00};
uint8_t *ptr = privkey;
memcpy(ptr, begin, sizeof(begin));
ptr += sizeof(begin);
memcpy(ptr, key32, 32);
ptr += 32;
memcpy(ptr, middle, sizeof(middle));
ptr += sizeof(middle);
pubkeylen = 33;
secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey,
SECP256K1_EC_COMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
} else {
static const uint8_t begin[] = {0x30, 0x82, 0x01, 0x13, 0x02,
0x01, 0x01, 0x04, 0x20};
static const uint8_t middle[] = {
0xA0, 0x81, 0xA5, 0x30, 0x81, 0xA2, 0x02, 0x01, 0x01, 0x30, 0x2C,
0x06, 0x07, 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x01, 0x01, 0x02, 0x21,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F,
0x30, 0x06, 0x04, 0x01, 0x00, 0x04, 0x01, 0x07, 0x04, 0x41, 0x04,
0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62,
0x95, 0xCE, 0x87, 0x0B, 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE,
0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17, 0x98, 0x48,
0x3A, 0xDA, 0x77, 0x26, 0xA3, 0xC4, 0x65, 0x5D, 0xA4, 0xFB, 0xFC,
0x0E, 0x11, 0x08, 0xA8, 0xFD, 0x17, 0xB4, 0x48, 0xA6, 0x85, 0x54,
0x19, 0x9C, 0x47, 0xD0, 0x8F, 0xFB, 0x10, 0xD4, 0xB8, 0x02, 0x21,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, 0xE6, 0xAF,
0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x41,
0x02, 0x01, 0x01, 0xA1, 0x44, 0x03, 0x42, 0x00};
uint8_t *ptr = privkey;
memcpy(ptr, begin, sizeof(begin));
ptr += sizeof(begin);
memcpy(ptr, key32, 32);
ptr += 32;
memcpy(ptr, middle, sizeof(middle));
ptr += sizeof(middle);
pubkeylen = 65;
secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey,
SECP256K1_EC_UNCOMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
}
return 1;
}
bool CKey::Check(const uint8_t *vch) {
return secp256k1_ec_seckey_verify(secp256k1_context_sign, vch);
}
void CKey::MakeNewKey(bool fCompressedIn) {
do {
GetStrongRandBytes(keydata.data(), keydata.size());
} while (!Check(keydata.data()));
fValid = true;
fCompressed = fCompressedIn;
}
bool CKey::SetPrivKey(const CPrivKey &privkey, bool fCompressedIn) {
if (!ec_privkey_import_der(secp256k1_context_sign, (uint8_t *)begin(),
&privkey[0], privkey.size()))
return false;
fCompressed = fCompressedIn;
fValid = true;
return true;
}
CPrivKey CKey::GetPrivKey() const {
assert(fValid);
CPrivKey privkey;
int ret;
size_t privkeylen;
privkey.resize(279);
privkeylen = 279;
ret = ec_privkey_export_der(
secp256k1_context_sign, (uint8_t *)&privkey[0], &privkeylen, begin(),
fCompressed ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED);
assert(ret);
privkey.resize(privkeylen);
return privkey;
}
CPubKey CKey::GetPubKey() const {
assert(fValid);
secp256k1_pubkey pubkey;
size_t clen = 65;
CPubKey result;
int ret =
secp256k1_ec_pubkey_create(secp256k1_context_sign, &pubkey, begin());
assert(ret);
secp256k1_ec_pubkey_serialize(
secp256k1_context_sign, (uint8_t *)result.begin(), &clen, &pubkey,
fCompressed ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED);
assert(result.size() == clen);
assert(result.IsValid());
return result;
}
bool CKey::Sign(const uint256 &hash, std::vector<uint8_t> &vchSig,
uint32_t test_case) const {
if (!fValid) return false;
vchSig.resize(72);
size_t nSigLen = 72;
uint8_t extra_entropy[32] = {0};
WriteLE32(extra_entropy, test_case);
secp256k1_ecdsa_signature sig;
int ret = secp256k1_ecdsa_sign(secp256k1_context_sign, &sig, hash.begin(),
begin(), secp256k1_nonce_function_rfc6979,
test_case ? extra_entropy : nullptr);
assert(ret);
secp256k1_ecdsa_signature_serialize_der(
secp256k1_context_sign, (uint8_t *)&vchSig[0], &nSigLen, &sig);
vchSig.resize(nSigLen);
return true;
}
bool CKey::VerifyPubKey(const CPubKey &pubkey) const {
if (pubkey.IsCompressed() != fCompressed) {
return false;
}
uint8_t rnd[8];
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd, sizeof(rnd));
uint256 hash;
CHash256()
.Write((uint8_t *)str.data(), str.size())
.Write(rnd, sizeof(rnd))
.Finalize(hash.begin());
std::vector<uint8_t> vchSig;
Sign(hash, vchSig);
return pubkey.Verify(hash, vchSig);
}
bool CKey::SignCompact(const uint256 &hash,
std::vector<uint8_t> &vchSig) const {
if (!fValid) return false;
vchSig.resize(65);
int rec = -1;
secp256k1_ecdsa_recoverable_signature sig;
int ret = secp256k1_ecdsa_sign_recoverable(
secp256k1_context_sign, &sig, hash.begin(), begin(),
secp256k1_nonce_function_rfc6979, nullptr);
assert(ret);
secp256k1_ecdsa_recoverable_signature_serialize_compact(
secp256k1_context_sign, (uint8_t *)&vchSig[1], &rec, &sig);
assert(ret);
assert(rec != -1);
vchSig[0] = 27 + rec + (fCompressed ? 4 : 0);
return true;
}
bool CKey::Load(CPrivKey &privkey, CPubKey &vchPubKey,
bool fSkipCheck = false) {
if (!ec_privkey_import_der(secp256k1_context_sign, (uint8_t *)begin(),
&privkey[0], privkey.size()))
return false;
fCompressed = vchPubKey.IsCompressed();
fValid = true;
if (fSkipCheck) return true;
return VerifyPubKey(vchPubKey);
}
bool CKey::Derive(CKey &keyChild, ChainCode &ccChild, unsigned int nChild,
const ChainCode &cc) const {
assert(IsValid());
assert(IsCompressed());
std::vector<uint8_t, secure_allocator<uint8_t>> vout(64);
if ((nChild >> 31) == 0) {
CPubKey pubkey = GetPubKey();
assert(pubkey.begin() + 33 == pubkey.end());
BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin() + 1, vout.data());
} else {
assert(begin() + 32 == end());
BIP32Hash(cc, nChild, 0, begin(), vout.data());
}
memcpy(ccChild.begin(), vout.data() + 32, 32);
memcpy((uint8_t *)keyChild.begin(), begin(), 32);
bool ret = secp256k1_ec_privkey_tweak_add(
secp256k1_context_sign, (uint8_t *)keyChild.begin(), vout.data());
keyChild.fCompressed = true;
keyChild.fValid = ret;
return ret;
}
bool CExtKey::Derive(CExtKey &out, unsigned int _nChild) const {
out.nDepth = nDepth + 1;
CKeyID id = key.GetPubKey().GetID();
memcpy(&out.vchFingerprint[0], &id, 4);
out.nChild = _nChild;
return key.Derive(out.key, out.chaincode, _nChild, chaincode);
}
void CExtKey::SetMaster(const uint8_t *seed, unsigned int nSeedLen) {
static const uint8_t hashkey[] = {'B', 'i', 't', 'c', 'o', 'i',
'n', ' ', 's', 'e', 'e', 'd'};
std::vector<uint8_t, secure_allocator<uint8_t>> vout(64);
CHMAC_SHA512(hashkey, sizeof(hashkey))
.Write(seed, nSeedLen)
.Finalize(vout.data());
key.Set(&vout[0], &vout[32], true);
memcpy(chaincode.begin(), &vout[32], 32);
nDepth = 0;
nChild = 0;
memset(vchFingerprint, 0, sizeof(vchFingerprint));
}
CExtPubKey CExtKey::Neuter() const {
CExtPubKey ret;
ret.nDepth = nDepth;
memcpy(&ret.vchFingerprint[0], &vchFingerprint[0], 4);
ret.nChild = nChild;
ret.pubkey = key.GetPubKey();
ret.chaincode = chaincode;
return ret;
}
void CExtKey::Encode(uint8_t code[BIP32_EXTKEY_SIZE]) const {
code[0] = nDepth;
memcpy(code + 1, vchFingerprint, 4);
code[5] = (nChild >> 24) & 0xFF;
code[6] = (nChild >> 16) & 0xFF;
code[7] = (nChild >> 8) & 0xFF;
code[8] = (nChild >> 0) & 0xFF;
memcpy(code + 9, chaincode.begin(), 32);
code[41] = 0;
assert(key.size() == 32);
memcpy(code + 42, key.begin(), 32);
}
void CExtKey::Decode(const uint8_t code[BIP32_EXTKEY_SIZE]) {
nDepth = code[0];
memcpy(vchFingerprint, code + 1, 4);
nChild = (code[5] << 24) | (code[6] << 16) | (code[7] << 8) | code[8];
memcpy(chaincode.begin(), code + 9, 32);
key.Set(code + 42, code + BIP32_EXTKEY_SIZE, true);
}
bool ECC_InitSanityCheck() {
CKey key;
key.MakeNewKey(true);
CPubKey pubkey = key.GetPubKey();
return key.VerifyPubKey(pubkey);
}
void ECC_Start() {
assert(secp256k1_context_sign == nullptr);
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
assert(ctx != nullptr);
{
// Pass in a random blinding seed to the secp256k1 context.
std::vector<uint8_t, secure_allocator<uint8_t>> vseed(32);
GetRandBytes(vseed.data(), 32);
bool ret = secp256k1_context_randomize(ctx, vseed.data());
assert(ret);
}
secp256k1_context_sign = ctx;
}
void ECC_Stop() {
secp256k1_context *ctx = secp256k1_context_sign;
secp256k1_context_sign = nullptr;
if (ctx) {
secp256k1_context_destroy(ctx);
}
}
|
#include <cstdio>
#include <cstring>
#include <cmath>
#include <algorithm>
#define MAX_PRIME 2147483647
using namespace std;
int cnt;
long long sum;
int main()
{
int curCase = 1;
int n, i, least;
while (scanf("%d", &n), n)
{
sum = 0;
least = sqrt(n + 1);
printf("Case %d: ", curCase++);
for (i = 2, cnt = 0; i <= least; i ++)
{
if (n % i == 0)
{
long long tmp = 1;
cnt ++;
while (n % i == 0)
{
tmp *= i;
n /= i;
}
sum += tmp;
}
}
if (n != 1 || cnt == 0)
{
cnt ++;
sum += n;
}
if (cnt == 1) sum ++;
printf("%lld\n", sum);
}
return 0;
}
|
#define GLM_FORCE_MESSAGES
#include <glm/vec3.hpp>
#include <cstdio>
int test_compiler()
{
int Error(0);
if(GLM_COMPILER & GLM_COMPILER_VC)
{
switch(GLM_COMPILER)
{
case GLM_COMPILER_VC10:
std::printf("Visual C++ 10 - 2010\n");
break;
case GLM_COMPILER_VC11:
std::printf("Visual C++ 11 - 2012\n");
break;
case GLM_COMPILER_VC12:
std::printf("Visual C++ 12 - 2013\n");
break;
case GLM_COMPILER_VC14:
std::printf("Visual C++ 14 - 2015\n");
break;
case GLM_COMPILER_VC15:
std::printf("Visual C++ 15 - 201X\n");
break;
default:
std::printf("Visual C++ version not detected\n");
Error += 1;
break;
}
}
else if(GLM_COMPILER & GLM_COMPILER_GCC)
{
switch(GLM_COMPILER)
{
case GLM_COMPILER_GCC44:
std::printf("GCC 4.4\n");
break;
case GLM_COMPILER_GCC45:
std::printf("GCC 4.5\n");
break;
case GLM_COMPILER_GCC46:
std::printf("GCC 4.6\n");
break;
case GLM_COMPILER_GCC47:
std::printf("GCC 4.7\n");
break;
case GLM_COMPILER_GCC48:
std::printf("GCC 4.8\n");
break;
case GLM_COMPILER_GCC49:
std::printf("GCC 4.9\n");
break;
case GLM_COMPILER_GCC50:
std::printf("GCC 5.0\n");
break;
case GLM_COMPILER_GCC51:
std::printf("GCC 5.1\n");
break;
case GLM_COMPILER_GCC52:
std::printf("GCC 5.2\n");
break;
case GLM_COMPILER_GCC53:
std::printf("GCC 5.3\n");
break;
case GLM_COMPILER_GCC54:
std::printf("GCC 5.4\n");
break;
case GLM_COMPILER_GCC60:
std::printf("GCC 6.0\n");
break;
case GLM_COMPILER_GCC61:
std::printf("GCC 6.1\n");
break;
case GLM_COMPILER_GCC62:
std::printf("GCC 6.2\n");
break;
case GLM_COMPILER_GCC70:
std::printf("GCC 7.0\n");
break;
case GLM_COMPILER_GCC71:
std::printf("GCC 7.1\n");
break;
case GLM_COMPILER_GCC72:
std::printf("GCC 7.2\n");
break;
case GLM_COMPILER_GCC80:
std::printf("GCC 8.0\n");
break;
default:
std::printf("GCC version not detected\n");
Error += 1;
break;
}
}
else if(GLM_COMPILER & GLM_COMPILER_CUDA)
{
std::printf("CUDA\n");
}
else if(GLM_COMPILER & GLM_COMPILER_CLANG)
{
switch(GLM_COMPILER)
{
case GLM_COMPILER_CLANG32:
std::printf("Clang 3.2\n");
break;
case GLM_COMPILER_CLANG33:
std::printf("Clang 3.3\n");
break;
case GLM_COMPILER_CLANG34:
std::printf("Clang 3.4\n");
break;
case GLM_COMPILER_CLANG35:
std::printf("Clang 3.5\n");
break;
case GLM_COMPILER_CLANG36:
std::printf("Clang 3.6\n");
break;
case GLM_COMPILER_CLANG37:
std::printf("Clang 3.7\n");
break;
case GLM_COMPILER_CLANG38:
std::printf("Clang 3.8\n");
break;
case GLM_COMPILER_CLANG39:
std::printf("Clang 3.9\n");
break;
case GLM_COMPILER_CLANG40:
std::printf("Clang 4.0\n");
break;
case GLM_COMPILER_CLANG41:
std::printf("Clang 4.1\n");
break;
case GLM_COMPILER_CLANG42:
std::printf("Clang 4.2\n");
break;
default:
std::printf("LLVM version not detected\n");
break;
}
}
else if(GLM_COMPILER & GLM_COMPILER_INTEL)
{
switch(GLM_COMPILER)
{
case GLM_COMPILER_INTEL12:
std::printf("ICC 12\n");
break;
case GLM_COMPILER_INTEL12_1:
std::printf("ICC 12.1\n");
break;
case GLM_COMPILER_INTEL13:
std::printf("ICC 13\n");
break;
case GLM_COMPILER_INTEL14:
std::printf("ICC 14\n");
break;
case GLM_COMPILER_INTEL15:
std::printf("ICC 15\n");
break;
case GLM_COMPILER_INTEL16:
std::printf("ICC 16\n");
break;
default:
std::printf("Intel compiler version not detected\n");
Error += 1;
break;
}
}
else
{
std::printf("Undetected compiler\n");
Error += 1;
}
return Error;
}
int test_model()
{
int Error = 0;
Error += ((sizeof(void*) == 4) && (GLM_MODEL == GLM_MODEL_32)) || ((sizeof(void*) == 8) && (GLM_MODEL == GLM_MODEL_64)) ? 0 : 1;
if(GLM_MODEL == GLM_MODEL_32)
std::printf("GLM_MODEL_32\n");
else if(GLM_MODEL == GLM_MODEL_64)
std::printf("GLM_MODEL_64\n");
return Error;
}
int test_instruction_set()
{
int Error = 0;
std::printf("GLM_ARCH: ");
if(GLM_ARCH == GLM_ARCH_PURE)
std::printf("GLM_ARCH_PURE ");
if(GLM_ARCH & GLM_ARCH_ARM_BIT)
std::printf("ARM ");
if(GLM_ARCH & GLM_ARCH_NEON_BIT)
std::printf("NEON ");
if(GLM_ARCH & GLM_ARCH_AVX2)
std::printf("AVX2 ");
if(GLM_ARCH & GLM_ARCH_AVX)
std::printf("AVX ");
if(GLM_ARCH & GLM_ARCH_SSE42_BIT)
std::printf("SSE4.2 ");
if(GLM_ARCH & GLM_ARCH_SSE41_BIT)
std::printf("SSE4.1 ");
if(GLM_ARCH & GLM_ARCH_SSSE3_BIT)
std::printf("SSSE3 ");
if(GLM_ARCH & GLM_ARCH_SSE3_BIT)
std::printf("SSE3 ");
if(GLM_ARCH & GLM_ARCH_SSE2_BIT)
std::printf("SSE2 ");
std::printf("\n");
return Error;
}
int test_cpp_version()
{
std::printf("__cplusplus: %lld\n", __cplusplus);
return 0;
}
int test_operators()
{
glm::vec3 A(1.0f);
glm::vec3 B(1.0f);
bool R = A != B;
bool S = A == B;
return (S && !R) ? 0 : 1;
}
template <typename T>
struct vec
{
};
template <template <typename> class C, typename T>
struct Class
{
};
template <typename T>
struct Class<vec, T>
{
};
int main()
{
//Class<vec, float> C;
int Error = 0;
Error += test_cpp_version();
Error += test_compiler();
Error += test_model();
Error += test_instruction_set();
Error += test_operators();
return Error;
}
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/nn/adam_weight_decay_gpu_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_ONE(AdamWeightDecay,
KernelAttr()
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
AdamWeightDecayGpuKernel, float)
MS_REG_GPU_KERNEL_ONE(AdamWeightDecay,
KernelAttr()
.AddInputAttr(kNumberTypeFloat16)
.AddInputAttr(kNumberTypeFloat16)
.AddInputAttr(kNumberTypeFloat16)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat16)
.AddOutputAttr(kNumberTypeFloat16)
.AddOutputAttr(kNumberTypeFloat16)
.AddOutputAttr(kNumberTypeFloat16),
AdamWeightDecayGpuKernel, half)
} // namespace kernel
} // namespace mindspore
|
/*************************************************************************/
/* Xania (M)ulti(U)ser(D)ungeon server source code */
/* (C) 1995-2000 Xania Development Team */
/* See the header to file: merc.h for original code copyrights */
/* */
/* Note.cpp: notes system */
/* */
/*************************************************************************/
#include "Note.hpp"
#include "Char.hpp"
#include "CommFlag.hpp"
#include "DescriptorList.hpp"
#include "TimeInfoData.hpp"
#include "common/BitOps.hpp"
#include "common/Configuration.hpp"
#include "db.h"
#include "interp.h"
#include "string_utils.hpp"
#include <fmt/format.h>
#include <range/v3/algorithm/count_if.hpp>
#include <range/v3/algorithm/find_if.hpp>
#include <range/v3/view/filter.hpp>
#include <functional>
bool Note::is_to(const Char &ch) const {
if (matches(ch.name, sender_)) {
return true;
}
if (is_name("all", to_list_)) {
return true;
}
if (ch.is_immortal() && is_name("immortal", to_list_)) {
return true;
}
if (is_name(ch.name, to_list_)) {
return true;
}
return false;
}
bool Note::sent_by(const Char &ch) const { return matches(ch.name, sender_); }
void Note::save(FILE *file) const {
fmt::print(file, "Sender {}~\n", sender_);
fmt::print(file, "Date {}~\n", date_);
fmt::print(file, "Stamp {}\n", Clock::to_time_t(date_stamp_));
fmt::print(file, "To {}~\n", to_list_);
fmt::print(file, "Subject {}~\n", subject_);
fmt::print(file, "Text\n{}~\n", text_);
}
void Notes::erase(const Note ¬e) {
auto it = ranges::find_if(notes_, [&](const Note &data) { return &data == ¬e; });
if (it != notes_.end())
notes_.erase(it);
}
Note &Notes::add(Note note) { return notes_.emplace_back(std::move(note)); }
int Notes::num_unread(const Char &ch) const {
return ranges::count_if(
notes_, [&ch](auto ¬e) { return note.is_to(ch) && !note.sent_by(ch) && note.date_stamp() > ch.last_note; });
}
Note *Notes::lookup(int index, const Char &ch) {
for (auto ¬e : notes_) {
if (note.is_to(ch)) {
if (index-- <= 0) {
return ¬e;
}
}
}
return nullptr;
}
std::pair<Note *, int> Notes::lookup(Time date, const Char &ch) {
int count = 0;
for (auto ¬e : notes_) {
if (note.is_to(ch) && !note.sent_by(ch)) {
if (date < note.date_stamp()) {
return {¬e, count};
}
count++;
}
}
return {nullptr, 0};
}
void Notes::save(FILE *file) const {
for (auto ¬e : notes_)
note.save(file);
}
static Note &ensure_note(Char &ch) {
if (!ch.pnote)
ch.pnote = std::make_unique<Note>(ch.name);
return *ch.pnote;
}
Note Note::from_file(FILE *fp) {
auto expect = [&](std::string_view expected) {
const auto word = fread_word(fp);
if (!matches(word, expected))
throw std::runtime_error(fmt::format("Expected '{}'", expected));
};
auto expect_str = [&](std::string_view expected) {
expect(expected);
return fread_string(fp);
};
Note note(expect_str("sender"));
note.date_ = expect_str("date");
expect("stamp");
note.date_stamp_ = Clock::from_time_t(fread_number(fp));
note.to_list_ = expect_str("to");
note.subject_ = expect_str("subject");
note.text_ = expect_str("text");
return note;
}
void Note::subject(std::string_view subject) { subject_ = subject; }
void Note::date(Time point) {
date_ = formatted_time(point);
date_stamp_ = point;
}
void Note::to_list(std::string_view to_list) { to_list_ = to_list; }
void Note::add_line(std::string_view line) { text_ += fmt::format("{}\n\r", line); }
void Note::remove_line() { text_ = remove_last_line(text_); }
NoteHandler &NoteHandler::singleton() {
static auto on_change = [](NoteHandler &handler) {
const auto notes_file = Configuration::singleton().notes_file();
if (auto *file = fopen(notes_file.c_str(), "w")) {
handler.write_to(file);
fclose(file);
} else {
perror(notes_file.c_str());
}
};
static NoteHandler singleton(on_change);
return singleton;
}
void note_initialise() {
const auto notes_file = Configuration::singleton().notes_file();
auto &handler = NoteHandler::singleton();
if (auto *fp = fopen(notes_file.c_str(), "r")) {
handler.read_from(fp);
fclose(fp);
}
}
void do_note(Char *ch, std::string_view args) {
if (ch->is_npc()) {
return;
}
auto note_remainder = smash_tilde(args);
auto &handler = NoteHandler::singleton();
handler.on_command(*ch, ArgParser(note_remainder));
}
NoteHandler::NoteHandler(OnChangeFunc on_change_func) : on_change_func_(std::move(on_change_func)) {
sub_commands.add("read", &NoteHandler::read);
sub_commands.add("list", &NoteHandler::list);
sub_commands.add("+", &NoteHandler::add_line);
sub_commands.add("-", &NoteHandler::remove_line);
sub_commands.add("subject", &NoteHandler::subject);
sub_commands.add("to", &NoteHandler::to);
sub_commands.add("clear", &NoteHandler::clear);
sub_commands.add("show", &NoteHandler::show);
sub_commands.add("post", &NoteHandler::post);
sub_commands.add("send", &NoteHandler::post);
sub_commands.add("remove", &NoteHandler::remove);
sub_commands.add("delete", &NoteHandler::delet, MAX_LEVEL - 2);
}
void NoteHandler::write_to(FILE *fp) { notes_.save(fp); }
void NoteHandler::read_from(FILE *fp) {
for (;;) {
int letter;
do {
letter = getc(fp);
if (feof(fp) || ferror(fp))
return;
} while (isspace(letter));
ungetc(letter, fp);
auto note = Note::from_file(fp);
if (note.date_stamp() < current_time - date::weeks(2))
continue;
notes_.add(std::move(note));
}
}
void NoteHandler::on_command(Char &ch, ArgParser args) {
auto sub = args.shift();
auto note_fn = sub_commands.get(sub.empty() ? "read" : sub, ch.get_trust());
if (note_fn.has_value()) {
(this->*note_fn.value())(ch, std::move(args));
} else {
ch.send_line("Huh? Type 'help note' for usage.");
}
}
void NoteHandler::read(Char &ch, ArgParser args) {
auto show_note = [&ch](int note_index, const Note ¬e) {
ch.send_line("[{:3}] {}|w: {}|w", note_index, note.sender(), note.subject());
ch.send_line("{}|w", note.date());
ch.send_line("To: {}|w", note.to_list());
if (!note.text().empty())
ch.page_to(note.text());
ch.send_line("|w");
ch.last_note = std::max(ch.last_note, note.date_stamp());
};
if (auto num = args.try_shift_number()) {
if (auto *note = notes_.lookup(*num, ch)) {
show_note(*num, *note);
} else {
ch.send_line("No such note.");
}
} else if (args.empty() || matches_start(args.shift(), "next")) {
if (auto note_pair = notes_.lookup(ch.last_note, ch); note_pair.first) {
show_note(note_pair.second, *note_pair.first);
} else {
ch.send_line("You have no unread notes.");
}
} else {
ch.send_line("Note read which number?");
}
}
void NoteHandler::list(Char &ch, [[maybe_unused]] ArgParser) {
int num = 0;
for (auto ¬e : notes_.notes()) {
if (note.is_to(ch)) {
bool is_new = note.date_stamp() > ch.last_note && !note.sent_by(ch);
ch.send_line("[{:3}{}] {}: {}|w", num, is_new ? "N" : " ", note.sender(), note.subject());
num++;
}
}
}
void NoteHandler::post(Char &ch, [[maybe_unused]] ArgParser) {
if (!ch.pnote) {
ch.send_line("You have no note in progress.");
return;
}
if (ch.pnote->to_list().empty()) {
ch.send_line("You need to provide a recipient (name, all, or immortal).");
return;
}
if (ch.pnote->subject().empty()) {
ch.send_line("You need to provide a subject.");
return;
}
ch.pnote->date(current_time);
auto ¬e = notes_.add(std::move(*ch.pnote));
ch.pnote.reset();
on_change_func_(*this);
for (auto &chtarg : descriptors().all_but(ch) | DescriptorFilter::to_person()) {
if (!check_enum_bit(chtarg.comm, CommFlag::NoAnnounce) && !check_enum_bit(chtarg.comm, CommFlag::Quiet)
&& note.is_to(chtarg)) {
chtarg.send_line("The Spirit of Hermes announces the arrival of a new note.");
}
}
ch.send_line("Ok.");
}
void NoteHandler::add_line(Char &ch, ArgParser args) {
ensure_note(ch).add_line(args.remaining());
ch.send_line("Ok.");
}
void NoteHandler::remove_line(Char &ch, [[maybe_unused]] ArgParser) {
if (!ch.pnote || ch.pnote->text().empty()) {
ch.send_line("There is no text to delete.");
return;
}
ch.pnote->remove_line();
ch.send_line("Ok.");
}
void NoteHandler::subject(Char &ch, ArgParser args) {
ensure_note(ch).subject(args.remaining());
ch.send_line("Ok.");
}
void NoteHandler::to(Char &ch, ArgParser args) {
ensure_note(ch).to_list(args.remaining());
ch.send_line("Ok.");
}
void NoteHandler::clear(Char &ch, [[maybe_unused]] ArgParser) {
ch.pnote.reset();
ch.send_line("Ok.");
}
void NoteHandler::show(Char &ch, [[maybe_unused]] ArgParser) {
if (!ch.pnote) {
ch.send_line("You have no note in progress.");
return;
}
const auto ¬e = *ch.pnote;
ch.send_line("{}|w: {}|w", note.sender(), note.subject().empty() ? "(No subject)" : note.subject());
ch.send_line("To: {}|w", note.to_list().empty() ? "(No recipients)" : note.to_list());
if (note.text().empty())
ch.send_line("(No message body)");
else
ch.send_to(note.text());
ch.send_line("|w");
}
void NoteHandler::remove(Char &ch, ArgParser args) {
auto num = args.try_shift_number();
if (!num) {
ch.send_line("Note remove which number?");
return;
}
if (auto *note = notes_.lookup(*num, ch)) {
remove(ch, *note);
ch.send_line("Ok.");
} else {
ch.send_line("No such note.");
}
}
void NoteHandler::remove(Char &ch, Note ¬e) {
if (note.sent_by(ch)) {
notes_.erase(note);
on_change_func_(*this);
return;
}
// Build a new to_list. Strip out this recipient.
std::string to_list;
for (auto recip : ArgParser(note.to_list())) {
if (matches(ch.name, recip))
continue;
if (!to_list.empty())
to_list += " ";
to_list += recip;
}
// Destroy completely only if there are no recipients left.
if (!to_list.empty()) {
note.to_list(to_list);
} else {
notes_.erase(note);
}
on_change_func_(*this);
}
void NoteHandler::delet(Char &ch, ArgParser args) {
auto num = args.try_shift_number();
if (!num) {
ch.send_line("Note delete which number?");
return;
}
if (auto *note = notes_.lookup(*num, ch)) {
notes_.erase(*note);
on_change_func_(*this);
ch.send_line("Ok.");
} else {
ch.send_line("No such note.");
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef GEODE_INTEGRATION_TEST_THINCLIENTSECURITYHELPER_H_
#define GEODE_INTEGRATION_TEST_THINCLIENTSECURITYHELPER_H_
#include <boost/process.hpp>
#include <boost/lexical_cast.hpp>
#include "fw_dunit.hpp"
#include "ThinClientHelper.hpp"
#include "hacks/AceThreadId.h"
namespace { // NOLINT(google-build-namespaces)
using apache::geode::client::CacheableBoolean;
using apache::geode::client::Exception;
using apache::geode::client::testframework::security::CredentialGenerator;
using apache::geode::client::testframework::security::OP_CONTAINS_KEY;
using apache::geode::client::testframework::security::OP_CREATE;
using apache::geode::client::testframework::security::OP_DESTROY;
using apache::geode::client::testframework::security::OP_EXECUTE_FUNCTION;
using apache::geode::client::testframework::security::OP_GET;
using apache::geode::client::testframework::security::OP_GETALL;
using apache::geode::client::testframework::security::OP_INVALIDATE;
using apache::geode::client::testframework::security::OP_KEY_SET;
using apache::geode::client::testframework::security::OP_PUTALL;
using apache::geode::client::testframework::security::OP_QUERY;
using apache::geode::client::testframework::security::OP_REGION_CLEAR;
using apache::geode::client::testframework::security::OP_REGISTER_CQ;
using apache::geode::client::testframework::security::OP_REGISTER_INTEREST;
using apache::geode::client::testframework::security::OP_UNREGISTER_INTEREST;
using apache::geode::client::testframework::security::OP_UPDATE;
using apache::geode::client::testframework::security::opCodeList;
bool isLocator = false;
bool isLocalServer = false;
const std::string locHostPort =
CacheHelper::getLocatorHostPort(isLocator, isLocalServer, 1);
const char* regionNamesAuth[] = {"DistRegionAck"};
std::shared_ptr<CredentialGenerator> credentialGeneratorHandler;
void initCredentialGenerator() {
static int loopNum = 1;
switch (loopNum) {
case 1: {
credentialGeneratorHandler = CredentialGenerator::create("DUMMY");
break;
}
case 2: {
credentialGeneratorHandler = CredentialGenerator::create("LDAP");
break;
}
default:
case 3: {
credentialGeneratorHandler = CredentialGenerator::create("PKCS");
break;
}
}
if (credentialGeneratorHandler == nullptr) {
FAIL("credentialGeneratorHandler is nullptr");
}
loopNum++;
}
opCodeList::value_type tmpRArr[] = {OP_GET,
OP_GETALL,
OP_REGISTER_INTEREST,
OP_UNREGISTER_INTEREST,
OP_KEY_SET,
OP_CONTAINS_KEY,
OP_QUERY,
OP_REGISTER_CQ};
opCodeList::value_type tmpWArr[] = {OP_CREATE, OP_UPDATE, OP_PUTALL,
OP_DESTROY, OP_INVALIDATE, OP_REGION_CLEAR};
opCodeList::value_type tmpAArr[] = {OP_CREATE, OP_UPDATE,
OP_DESTROY, OP_INVALIDATE,
OP_REGION_CLEAR, OP_REGISTER_INTEREST,
OP_GET};
#define HANDLE_NOT_AUTHORIZED_EXCEPTION \
catch (const apache::geode::client::NotAuthorizedException&) { \
LOG("NotAuthorizedException Caught"); \
LOG("Success"); \
} \
catch (const apache::geode::client::Exception& other) { \
LOG(other.getStackTrace().c_str()); \
FAIL(other.what()); \
}
#define HANDLE_CACHEWRITER_EXCEPTION \
catch (const apache::geode::client::CacheWriterException&) { \
LOG("CacheWriterException Caught"); \
LOG("Success"); \
}
#define TYPE_ADMIN_CLIENT 'A'
#define TYPE_WRITER_CLIENT 'W'
#define TYPE_READER_CLIENT 'R'
#define TYPE_USER_CLIENT 'U'
void initClientAuth(char UserType) {
auto config = Properties::create();
opCodeList wr(tmpWArr, tmpWArr + sizeof tmpWArr / sizeof *tmpWArr);
opCodeList rt(tmpRArr, tmpRArr + sizeof tmpRArr / sizeof *tmpRArr);
opCodeList ad(tmpAArr, tmpAArr + sizeof tmpAArr / sizeof *tmpAArr);
credentialGeneratorHandler->getAuthInit(config);
switch (UserType) {
case 'W':
credentialGeneratorHandler->getAllowedCredentialsForOps(wr, config,
nullptr);
printf("User is %s Pass is %s ",
config->find("security-username")->value().c_str(),
(config->find("security-password") != nullptr
? config->find("security-password")->value().c_str()
: " not set"));
break;
case 'R':
credentialGeneratorHandler->getAllowedCredentialsForOps(rt, config,
nullptr);
printf("User is %s Pass is %s ",
config->find("security-username")->value().c_str(),
(config->find("security-password") != nullptr
? config->find("security-password")->value().c_str()
: " not set"));
break;
case 'A':
credentialGeneratorHandler->getAllowedCredentialsForOps(ad, config,
nullptr);
printf("User is %s Pass is %s ",
config->find("security-username")->value().c_str(),
(config->find("security-password") != nullptr
? config->find("security-password")->value().c_str()
: " not set"));
break;
default:
break;
}
try {
initClient(true, config);
} catch (...) {
throw;
}
}
// This putThread class is used in
// testThinClientTracking,testThinClientTicket304, testThinClientTicket317
class putThread : public ACE_Task_Base {
public:
explicit putThread(std::shared_ptr<Region> r, bool regInt = false,
int waitTime = 0) {
m_reg = r;
m_regInt = regInt;
m_numthreads = 1;
m_numops = 0;
m_isCallBack = false;
m_sameKey = false;
m_waitTime = waitTime;
}
void setParams(int opcode, int numofops, int numthreads,
bool isCallBack = false, bool sameKey = false,
int waitTime = 0) { //
m_opcode = opcode;
m_numops = numofops;
m_numthreads = numthreads;
m_isCallBack = isCallBack;
m_sameKey = sameKey;
m_waitTime = waitTime;
}
void start() {
m_run = true;
activate(THR_NEW_LWP | THR_JOINABLE, m_numthreads);
}
void stop() {
if (m_run) {
m_run = false;
wait();
}
}
int svc() override {
int ops = 0;
std::string key_str;
std::shared_ptr<CacheableKey> key;
std::shared_ptr<CacheableString> value;
std::vector<std::shared_ptr<CacheableKey>> keys0;
auto pid = boost::this_process::get_id();
if (m_regInt) {
m_reg->registerAllKeys(false, true);
}
if (m_waitTime != 0) {
std::this_thread::sleep_for(std::chrono::seconds{m_waitTime});
}
while (ops++ < m_numops) {
if (m_sameKey) {
key_str = "key-1";
} else {
key_str = "key-" + std::to_string(ops);
}
key = CacheableKey::create(key_str);
if (m_opcode == 0) {
std::string value_str;
if (m_isCallBack) {
auto boolptr = CacheableBoolean::create("true");
value_str = "client1-value" + std::to_string(ops);
value = CacheableString::create(value_str);
m_reg->put(key, value, boolptr);
} else {
value_str = "client2-value" + std::to_string(ops);
value = CacheableString::create(value_str);
m_reg->put(key, value);
}
} else if (m_opcode == 1) {
m_reg->get(key);
} else if (m_opcode == 5) {
keys0.push_back(key);
if (ops == m_numops) {
m_reg->registerKeys(keys0, false, true);
}
} else if (m_opcode == 6) {
m_reg->registerRegex("key-[1-3]", false, true);
} else {
try {
if (m_isCallBack) {
auto boolptr = CacheableBoolean::create("true");
m_reg->destroy(key, boolptr);
} else {
m_reg->destroy(key);
}
} catch (Exception& ex) {
auto tid = boost::lexical_cast<std::string>(std::this_thread::get_id());
printf("%d: %s exception got and exception message = %s\n",
pid, tid.c_str(), ex.what());
}
}
}
return 0;
}
std::shared_ptr<Region> m_reg;
bool m_run;
int m_opcode;
int m_numops;
int m_numthreads;
bool m_isCallBack;
bool m_sameKey;
bool m_regInt;
int m_waitTime;
};
} // namespace
#endif // GEODE_INTEGRATION_TEST_THINCLIENTSECURITYHELPER_H_
|
/**
* Copyright (C) 2016 Turi
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#ifndef GRAPHLAB_SGRAPH_SGRAPH_COMPUTE_VERTEX_BLOCK_HPP
#define GRAPHLAB_SGRAPH_SGRAPH_COMPUTE_VERTEX_BLOCK_HPP
#include <map>
#include <vector>
#include <algorithm>
#include <parallel/mutex.hpp>
#include <sframe/sframe.hpp>
#include <flexible_type/flexible_type.hpp>
namespace graphlab {
namespace sgraph_compute {
/**
* Represents a partition of vertices which is held in memory.
*/
template <typename SIterableType>
class vertex_block {
public:
/**
* Loads an SFrame/SArray into memory (accessible directly via m_vertices)
* if not already loaded.
*/
void load_if_not_loaded(const SIterableType& sf) {
if (!m_loaded) {
load_impl(sf);
m_loaded = true;
}
}
/**
* Loads an SFrame/SArray into memory (accessible directly via m_vertices)
* reloading it if it has already been loaded.
*/
void load(const SIterableType& sf) {
load_impl(sf);
m_loaded = true;
}
void flush(SIterableType& outputsf) {
std::copy(m_vertices.begin(), m_vertices.end(),
outputsf.get_output_iterator(0));
outputsf.close();
}
void flush(SIterableType& outputsf, const std::vector<size_t>& mutated_field_index) {
auto out = outputsf.get_output_iterator(0);
std::vector<flexible_type> temp(mutated_field_index.size());
for (const auto& value: m_vertices) {
for (size_t i = 0; i < mutated_field_index.size(); ++i) {
temp[i] = value[mutated_field_index[i]];
}
*out = temp;
++out;
}
outputsf.close();
}
/**
* Unloads the loaded data, releasing all memory used.
*/
void unload() {
m_loaded = false;
m_vertices.clear();
m_vertices.shrink_to_fit();
if (is_modified()) {
m_reader.reset();
}
clear_modified_flag();
}
/**
* Returns true if the SFrame is loaded. False otherwise.
*/
bool is_loaded() {
return m_loaded;
}
/**
* Returns true if the SFrame is modified. False otherwise.
*/
bool is_modified() {
return m_modified;
}
/**
* Sets the modified flag
*/
void set_modified_flag() {
m_modified = true;
}
/**
* Clears the modified flag
*/
void clear_modified_flag() {
m_modified = false;
}
typename SIterableType::value_type& operator[](size_t i) {
return m_vertices[i];
}
const typename SIterableType::value_type& operator[](size_t i) const {
return m_vertices[i];
}
/// The loaded data
std::vector<typename SIterableType::value_type> m_vertices;
private:
/// Internal load implementation
void load_impl(const SIterableType& sf) {
if (m_last_index_file != sf.get_index_file() || !m_reader) {
m_last_index_file = sf.get_index_file();
m_reader = std::move(sf.get_reader());
}
m_vertices.reserve(sf.size());
m_reader->read_rows(0, m_reader->size(), m_vertices);
}
/// Flag denoting if the data has been loaded
bool m_loaded = false;
/// Flag denoting modification
bool m_modified = false;
// cache the reader
std::string m_last_index_file;
std::unique_ptr<typename SIterableType::reader_type> m_reader;
};
} // sgraph_compute
} // graphlab
#endif
|
#ifndef DUNE_AX1_ACME0_TOPERATOR_HH
#define DUNE_AX1_ACME0_TOPERATOR_HH
#include<dune/geometry/referenceelements.hh>
#include<dune/geometry/quadraturerules.hh>
#include<dune/pdelab/common/geometrywrapper.hh>
#include<dune/pdelab/localoperator/defaultimp.hh>
#include<dune/pdelab/localoperator/pattern.hh>
#include<dune/pdelab/localoperator/flags.hh>
#include<dune/pdelab/localoperator/idefault.hh>
#include <dune/ax1/common/constants.hh>
template<typename PHYSICS>
class NernstPlanckTimeLocalOperator: public Dune::PDELab::NumericalJacobianApplyVolume<
NernstPlanckTimeLocalOperator<PHYSICS> >,
public Dune::PDELab::NumericalJacobianVolume<NernstPlanckTimeLocalOperator<PHYSICS> >,
public Dune::PDELab::FullVolumePattern,
public Dune::PDELab::LocalOperatorDefaultFlags,
public Dune::PDELab::InstationaryLocalOperatorDefaultMethods<double>
{
public:
// pattern assembly flags
enum
{
doPatternVolume = true
};
// residual assembly flags
enum
{
doAlphaVolume = true
};
// constructor remembers parameters
NernstPlanckTimeLocalOperator(PHYSICS& physics_, unsigned int intorder_ = 2) :
physics(physics_),
intorder(intorder_)
{
}
// volume integral depending on test and ansatz functions
template<typename EG, typename LFSU, typename X, typename LFSV, typename R>
void alpha_volume(const EG& eg, const LFSU& lfsu, const X& x,
const LFSV& lfsv, R& r) const
{
// select only concentration component
//typedef typename LFSU::template Child<0>::Type PLFSU_CON;
//const PLFSU_CON& plfsuCon = lfsu.template getChild<0>();
typedef typename LFSU::template Child<0>::Type LFSU_SINGLE_CON;
// Numer of local power function space must equal number of ion species
//assert(NUMBER_OF_SPECIES == physics.numOfSpecies());
// domain and range field type
typedef typename LFSU_SINGLE_CON::Traits::FiniteElementType::Traits::LocalBasisType::Traits::DomainFieldType
DF;
typedef typename LFSU_SINGLE_CON::Traits::FiniteElementType::Traits::LocalBasisType::Traits::RangeFieldType
RF;
typedef typename LFSU_SINGLE_CON::Traits::FiniteElementType::Traits::LocalBasisType::Traits::JacobianType
JacobianType;
typedef typename LFSU_SINGLE_CON::Traits::FiniteElementType::Traits::LocalBasisType::Traits::RangeType
RangeType;
typedef typename LFSU::Traits::SizeType size_type;
// dimensions
const int dim = EG::Geometry::dimension;
// select quadrature rule
Dune::GeometryType gt = eg.geometry().type();
const Dune::QuadratureRule<DF, dim>& rule =
Dune::QuadratureRules<DF, dim>::rule(gt, intorder);
bool useLogScaling = physics.getParams().useLogScaling();
// loop over quadrature points
for (typename Dune::QuadratureRule<DF, dim>::const_iterator it =
rule.begin(); it != rule.end(); ++it)
{
RF factor = it->weight() * eg.geometry().integrationElement(
it->position());
// *************** Concentration part ************************
for (int j = 0; j < NUMBER_OF_SPECIES; ++j)
{
const LFSU_SINGLE_CON& lfsuCon = lfsu.child(j);
// evaluate basis functions on reference element
std::vector<RangeType> phiCon(lfsu.size());
lfsuCon.finiteElement().localBasis().evaluateFunction(it->position(),
phiCon);
// compute con at integration point
RF con = 0.0;
for (size_type i = 0; i < lfsuCon.size(); ++i)
con += x(lfsuCon, i) * phiCon[i];
// integration
for (size_type i = 0; i < lfsuCon.size(); ++i)
{
if (not useLogScaling)
{
r.accumulate(lfsuCon, i, con * phiCon[i] * factor);
}
else
{
r.accumulate(lfsuCon, i, con * std::exp( con ) * phiCon[i] * factor);
}
}
}
}
}
private:
PHYSICS& physics;
unsigned int intorder;
};
#endif /* DUNE_AX1_ACME0_TOPERATOR_HH */
|
#include "Composition.h"
Composition::Composition()
{
temp = parts.begin();
}
void Composition::writePart(std::vector<Part>::iterator temp) {
cout << "|";
for (auto itR : temp->right) {
cout << itR->getMidi() << " ";
}
cout << "|" << endl;
cout << "|";
for (auto itL : temp->left) {
cout << itL->getMidi() << " ";
}
cout << "|" << endl;
}
void Composition::writeComp()
{
bool one = false;
for (auto it : parts) {
cout << "|";
for (auto itR : it->right) {
if (itR->getMidi() != "1" && !one)
cout << itR->getMidi() << " ";
else {
if (!one) one = true;
else {
if (itR->getMidi() != "1") {
cout << itR->getMidi();
}
else {
one = false;
cout << " ";
}
}
}
}
cout << "|" << endl;
one = false;
cout << "|";
for (auto itL : it->left) {
if (itL->getMidi() != "1" && !one)
cout << itL->getMidi() << " ";
else {
if (!one) one = true;
else {
if (itL->getMidi() != "1") {
cout << itL->getMidi();
}
else {
one = false;
cout << " ";
}
}
}
}
cout << "|" << endl << endl;
}
}
std::vector<Part*>::iterator Composition::nextPart() {
temp = temp++;
return temp;
}
std::vector<Part*>::iterator Composition::prevPart() {
temp = temp--;
return temp;
}
void Composition::addPart(Part* part) {
parts.push_back(part);
}
|
/***
DEVSIM
Copyright 2013 Devsim LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
***/
#ifndef EQUATION_HH
#define EQUATION_HH
#include "MathEnum.hh"
#include <string>
#include <vector>
#include <complex>
#include <map>
#include <iosfwd>
class PermutationEntry;
template <typename T, typename U> class ScalarData;
class NodeModel;
template<typename T>
using NodeScalarData = ScalarData<NodeModel, T>;
class EdgeModel;
class TriangleEdgeModel;
class TetrahedronEdgeModel;
template<typename T>
using EdgeScalarData = ScalarData<EdgeModel, T>;
template<typename T>
using TriangleEdgeScalarData = ScalarData<TriangleEdgeModel, T>;
template<typename T>
using TetrahedronEdgeScalarData = ScalarData<TetrahedronEdgeModel, T>;
template<typename T>
using NodeScalarList = std::vector<T>;
class Region;
typedef Region *RegionPtr;
typedef const Region *ConstRegionPtr;
class ObjectHolder;
// This would be for getting a stamp
// calculate stamp from first assembly
//typedef std::pair<int, int > RowColEntry;
//typedef std::vector<RowColEntry> RowColEntryVec;
//#include "MatrixEntries.hh"
namespace dsMath {
template <typename T> class RowColVal;
template <typename DoubleType>
using RealRowColVal = RowColVal<DoubleType>;
template <typename DoubleType>
using RealRowColValueVec = std::vector<RealRowColVal<DoubleType>>;
template <typename DoubleType>
using RHSEntry = std::pair<int, DoubleType>;
template <typename DoubleType>
using RHSEntryVec = std::vector<RHSEntry<DoubleType>>;
}
namespace EquationEnum
{
enum UpdateType {DEFAULT, LOGDAMP, POSITIVE};
extern const char *UpdateTypeString;
}
// base class
// called for assembly
// start with specific equation derived classes
// Then work out automatically derived Equations and Models
template <typename DoubleType>
class Equation {
public:
Equation(const std::string &, RegionPtr, const std::string &/*variable*/, EquationEnum::UpdateType utype = EquationEnum::DEFAULT/*update type*/);
virtual ~Equation() = 0;
void Assemble(dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, dsMathEnum::TimeMode);
const std::string &GetName() const {
return myname;
}
const std::string &GetVariable() const {
return variable;
}
void Update(NodeModel &, const std::vector<DoubleType> &);
void ACUpdate(NodeModel &, const std::vector<std::complex<DoubleType> > &);
void NoiseUpdate(const std::string &, const std::vector<PermutationEntry> &, const std::vector<std::complex<DoubleType> > &);
std::string GetNoiseRealName(const std::string &);
std::string GetNoiseImagName(const std::string &);
void DefaultNoiseUpdate(const std::string &, const std::vector<PermutationEntry> &, const std::vector<std::complex<DoubleType> > &);
DoubleType GetAbsError() const;
DoubleType GetRelError() const;
size_t GetAbsErrorNodeIndex() const;
size_t GetRelErrorNodeIndex() const;
void setMinError(DoubleType);
DoubleType GetMinError() const;
const Region &GetRegion() const
{
return *myregion;
}
void DevsimSerialize(std::ostream &) const;
void GetCommandOptions(std::map<std::string, ObjectHolder> &) const;
protected:
virtual void Serialize(std::ostream &) const = 0;
virtual void GetCommandOptions_Impl(std::map<std::string, ObjectHolder> &) const = 0;
// for non negative variable
void DefaultUpdate(NodeModel &, const std::vector<DoubleType> &);
void DefaultACUpdate(NodeModel &, const std::vector<std::complex<DoubleType> > &);
void setAbsError(DoubleType);
void setRelError(DoubleType);
void setAbsErrorNodeIndex(size_t);
void setRelErrorNodeIndex(size_t);
/// Stuff like potential is symmetric. It's derivative with respect to a node on either side is of opposite sign.
/// Stuff should already be integrated w.r.t. EdgeCouple
void EdgeAssembleRHS(dsMath::RHSEntryVec<DoubleType> &, const EdgeScalarData<DoubleType> &/*rhs*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
void TriangleEdgeAssembleRHS(dsMath::RHSEntryVec<DoubleType> &, const TriangleEdgeScalarData<DoubleType> &/*rhs*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
void TetrahedronEdgeAssembleRHS(dsMath::RHSEntryVec<DoubleType> &, const TetrahedronEdgeScalarData<DoubleType> &/*rhs*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
// void SymmetricEdgeAssembleJacobian(dsMath::RealRowColValueVec<DoubleType> &, const EdgeScalarData<DoubleType> &/*der*/, const std::string &/*var*/);
void UnSymmetricEdgeAssembleJacobian(dsMath::RealRowColValueVec<DoubleType> &, const EdgeScalarData<DoubleType> &/*der0*/, const EdgeScalarData<DoubleType> &/*der1*/, const std::string &/*var*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
void UnSymmetricTriangleEdgeAssembleJacobian(dsMath::RealRowColValueVec<DoubleType> &, const TriangleEdgeScalarData<DoubleType> &/*der0*/, const TriangleEdgeScalarData<DoubleType> &/*der1*/, const TriangleEdgeScalarData<DoubleType> &/*der2*/, const std::string &/*var*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
void UnSymmetricTetrahedronEdgeAssembleJacobian(dsMath::RealRowColValueVec<DoubleType> &, const TetrahedronEdgeScalarData<DoubleType> &/*der0*/, const TetrahedronEdgeScalarData<DoubleType> &/*der1*/, const TetrahedronEdgeScalarData<DoubleType> &/*der2*/, const TetrahedronEdgeScalarData<DoubleType> &/*der3*/, const std::string &/*var*/, const DoubleType /*n0_sign*/, const DoubleType /*n1_sign*/);
/// Stuff should already be integrated w.r.t. NodeVolume
void NodeAssembleRHS(dsMath::RHSEntryVec<DoubleType> &, const NodeScalarData<DoubleType> &/*rhs*/);
void NodeAssembleJacobian(dsMath::RealRowColValueVec<DoubleType> &, const NodeScalarData<DoubleType> &/*der*/, const std::string &/*var*/);
void NodeVolumeAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad);
void NodeVolumeAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, const std::string &/*node_volume*/);
void EdgeCoupleAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad);
void EdgeNodeVolumeAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad);
void EdgeCoupleAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, const std::string &/*edge_couple*/, const DoubleType n0_sign, const DoubleType n1_sign);
void ElementEdgeCoupleAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad);
void ElementNodeVolumeAssemble(const std::string &, const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad);
void TriangleEdgeCoupleAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, const std::string &, const DoubleType, const DoubleType);
void TetrahedronEdgeCoupleAssemble(const std::string &, dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, const std::string &, const DoubleType, const DoubleType);
std::string GetDerivativeModelName(const std::string &, const std::string &);
private:
virtual void DerivedAssemble(dsMath::RealRowColValueVec<DoubleType> &, dsMath::RHSEntryVec<DoubleType> &, dsMathEnum::WhatToLoad, dsMathEnum::TimeMode) = 0;
virtual void UpdateValues(NodeModel &, const std::vector<DoubleType> &) = 0;
virtual void ACUpdateValues(NodeModel &, const std::vector<std::complex<DoubleType> > &) = 0;
virtual void NoiseUpdateValues(const std::string &, const std::vector<PermutationEntry> &, const std::vector<std::complex<DoubleType> > &) = 0;
void PositiveSolutionUpdate(const NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &);
void LogSolutionUpdate(const NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &);
void DefaultSolutionUpdate(const NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &, NodeScalarList<DoubleType> &);
Equation();
Equation(const Equation &);
Equation &operator=(const Equation &);
const std::string myname;
RegionPtr myregion;
const std::string variable;
DoubleType absError;
DoubleType relError;
size_t absErrorNodeIndex;
size_t relErrorNodeIndex;
DoubleType minError;
static const DoubleType defminError;
EquationEnum::UpdateType updateType;
};
#endif
|
/*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#include <mitkTransform.h>
#include <mitkTestingMacros.h>
#include <mitkStringFromCvMat.h>
#include <mitkCvMatCompare.h>
#include <mitkCvMatFromVnlMatrix.h>
/**Documentation
* test for the class "Transform".
*/
int mitkTransformTest(int, char*[])
{
MITK_TEST_BEGIN("Transform")
mitk::Transform::Pointer transform = mitk::Transform::New();
mitk::Transform* t = transform;
// test with homogeneous matrixes
vnl_matrix_fixed<mitk::ScalarType, 4, 4> I;
I.set_identity();
std::cout << "vnl I: " << std::endl << I << std::endl;
t->SetMatrix( I );
cv::Mat cvI = t->GetCvMatrix();
std::string matAsString;
mitk::StringFromCvMat _StringFromCvMat( &cvI, &matAsString );
_StringFromCvMat.Update();
std::cout << "cv identity matrix: " << matAsString << std::endl;
MITK_TEST_CONDITION_REQUIRED( trace(cvI)
== cv::Scalar(4)
, "trace(t->GetCvMatrix()) == cv::Scalar(4)" );
MITK_TEST_CONDITION_REQUIRED( countNonZero(t->GetCvMatrix()) == 4
, "countNonZero(t->GetCvMatrix()) == 4" );
// test 2: 1. create a rotation matrix,convert to quaternion
// set as rotation vector
// get it back as vnl rotation matrix
// convert to cv matrix
// convert to quaternion (cv::Rodrigues)
// compare received quaternion with original one (created from rotation matrix)
cv::Mat cvRotMat = cv::Mat::ones( 3, 3, cv::DataType<mitk::ScalarType>::type );
cvRotMat.at<mitk::ScalarType>(0,1) = 2;
cvRotMat.at<mitk::ScalarType>(0,2) = 3;
cv::Mat cvRotVec;
cv::Rodrigues( cvRotMat, cvRotVec );
t->SetRotationVector( cvRotVec );
vnl_matrix_fixed<mitk::ScalarType, 3, 3> vnlRotMat
= t->GetVnlRotationMatrix();
std::cout << "vnl rotation matrix: "
<< vnlRotMat << std::endl;
vnl_matrix<mitk::ScalarType> rotMat = vnlRotMat.as_matrix();
cv::Mat cvRotMatReturned;
mitk::CvMatFromVnlMatrix<mitk::ScalarType>
_CvMatFromVnlMatrix( &rotMat, &cvRotMatReturned );
_CvMatFromVnlMatrix.Update();
_StringFromCvMat.SetMatrix( &cvRotMatReturned );
_StringFromCvMat.Update();
std::cout << "cvRotMatReturned: " << matAsString << std::endl;
cv::Mat cvRotVecReturned;
cv::Rodrigues( cvRotMatReturned, cvRotVecReturned );
_StringFromCvMat.SetMatrix( &cvRotVec );
_StringFromCvMat.Update();
std::cout << "cvRotVec: " << matAsString << std::endl;
_StringFromCvMat.SetMatrix( &cvRotVecReturned );
_StringFromCvMat.Update();
std::cout << "cvRotVecReturned: " << matAsString << std::endl;
double epsilon = 0.001;
bool equals = false;
mitk::CvMatCompare _CvMatCompare( &cvRotVec, &cvRotVecReturned, &epsilon, &equals );
_CvMatCompare.Update();
MITK_TEST_CONDITION( equals,
"testing returned rotation vector");
std::cout << "Transform as string: " << transform << std::endl;
// always end with this!
MITK_TEST_END();
}
|
/***************************************************************************
This is a library for the BNO055 orientation sensor
Designed specifically to work with the Adafruit BNO055 Breakout.
Pick one up today in the adafruit shop!
------> http://www.adafruit.com/products
These sensors use I2C to communicate, 2 pins are required to interface.
Adafruit invests time and resources providing this open source code,
please support Adafruit andopen-source hardware by purchasing products
from Adafruit!
Written by KTOWN for Adafruit Industries.
MIT license, all text above must be included in any redistribution
***************************************************************************/
#include <math.h>
#include <limits.h>
#include <iostream>
#include <unistd.h> // usleep
#include "third_party/bno055/RPi_BNO055.h"
namespace {
constexpr double get_rad_per_lsb_gyro() {
constexpr double DEGPS_PER_LSB = 1.0 / 16.0;
constexpr double RADPS_PER_DEGPS = 0.0174533;
constexpr double RADPS_PER_LSB = RADPS_PER_DEGPS * DEGPS_PER_LSB;
return RADPS_PER_LSB;
}
constexpr double get_mpss_per_lsb_accel() {
constexpr double MPSS_PER_LSB = 1.0 / 100.0;
return MPSS_PER_LSB;
}
constexpr double MICROTESLA_PER_LSB = 1.0 / 16.0;
} // namespace
/***************************************************************************
CONSTRUCTOR
***************************************************************************/
/**************************************************************************/
/*!
@brief Instantiates a new Adafruit_BNO055 class
*/
/**************************************************************************/
Adafruit_BNO055::Adafruit_BNO055(const char* i2cBus, int32_t sensorID, uint8_t address) {
_sensorID = sensorID;
_address = address;
_i2cChannel = 1;
_i2cBus = i2cBus;
_i2cDevice = {};
}
/***************************************************************************
PUBLIC FUNCTIONS
***************************************************************************/
/**************************************************************************/
/*!
@brief Sets up the HW
*/
/**************************************************************************/
bool Adafruit_BNO055::begin(adafruit_bno055_opmode_t mode) {
/* Make sure we have the right device */
_HandleBNO = i2c_open(_i2cBus);
if (_HandleBNO == -1) {
return false;
}
_i2cDevice.bus = _HandleBNO;
_i2cDevice.addr = _address;
_i2cDevice.iaddr_bytes = 1;
_i2cDevice.page_bytes = 16;
uint8_t id = read8(BNO055_CHIP_ID_ADDR);
if(id != BNO055_ID) {
usleep(1000 * 1000);
id = read8(BNO055_CHIP_ID_ADDR);
if (id != BNO055_ID) {
return false; // still not? ok bail
}
}
/* Switch to config mode (just in case since this is the default) */
setMode(OPERATION_MODE_CONFIG);
/* Reset -- We expect the device to go down for about half a second */
write8(BNO055_SYS_TRIGGER_ADDR, 0x20);
const int por_time_ms = 10; // Expected reset time, from the documentation
while (read8(BNO055_CHIP_ID_ADDR) != BNO055_ID) {
usleep(1000 * por_time_ms);
}
usleep(1000*50);
/* Set to normal power mode */
write8(BNO055_PWR_MODE_ADDR, POWER_MODE_NORMAL);
usleep(1000*10);
write8(BNO055_PAGE_ID_ADDR, 0);
/* Set the output units */
/*
uint8_t unitsel = (0 << 7) | // Orientation = Android
(0 << 4) | // Temperature = Celsius
(0 << 2) | // Euler = Degrees
(1 << 1) | // Gyro = Rads
(0 << 0); // Accelerometer = m/s^2
write8(BNO055_UNIT_SEL_ADDR, unitsel);
*/
/* Configure axis mapping (see section 3.4) */
/*
write8(BNO055_AXIS_MAP_CONFIG_ADDR, REMAP_CONFIG_P2); // P0-P7, Default is P1
usleep(1000*10);
write8(BNO055_AXIS_MAP_SIGN_ADDR, REMAP_SIGN_P2); // P0-P7, Default is P1
usleep(1000*10);
*/
write8(BNO055_SYS_TRIGGER_ADDR, 0x0);
usleep(1000*10);
/* Set the requested operating mode (see section 3.3) */
setMode(mode);
usleep(1000*20);
return true;
}
/**************************************************************************/
/*!
@brief Puts the chip in the specified operating mode
*/
/**************************************************************************/
void Adafruit_BNO055::setMode(adafruit_bno055_opmode_t mode) {
_mode = mode;
write8(BNO055_OPR_MODE_ADDR, _mode);
usleep(1000*30);;
}
Adafruit_BNO055::adafruit_bno055_opmode_t Adafruit_BNO055::getMode() {
return static_cast<Adafruit_BNO055::adafruit_bno055_opmode_t>(read8(BNO055_OPR_MODE_ADDR));
}
/**************************************************************************/
/*!
@brief Use the external 32.768KHz crystal
*/
/**************************************************************************/
void Adafruit_BNO055::setExtCrystalUse(bool usextal) {
adafruit_bno055_opmode_t modeback = _mode;
/* Switch to config mode (just in case since this is the default) */
setMode(OPERATION_MODE_CONFIG);
usleep(1000*25);
write8(BNO055_PAGE_ID_ADDR, 0);
if (usextal) {
write8(BNO055_SYS_TRIGGER_ADDR, 0x80);
} else {
write8(BNO055_SYS_TRIGGER_ADDR, 0x00);
}
usleep(1000*10);
/* Set the requested operating mode (see section 3.3) */
setMode(modeback);
usleep(1000*20);
}
/**************************************************************************/
/*!
@brief Gets the latest system status info
*/
/**************************************************************************/
void Adafruit_BNO055::getSystemStatus(uint8_t *system_status, uint8_t *self_test_result, uint8_t *system_error) {
write8(BNO055_PAGE_ID_ADDR, 0);
/* System Status (see section 4.3.58)
---------------------------------
0 = Idle
1 = System Error
2 = Initializing Peripherals
3 = System Iniitalization
4 = Executing Self-Test
5 = Sensor fusio algorithm running
6 = System running without fusion algorithms */
if (system_status != 0)
*system_status = read8(BNO055_SYS_STAT_ADDR);
/* Self Test Results (see section )
--------------------------------
1 = test passed, 0 = test failed
Bit 0 = Accelerometer self test
Bit 1 = Magnetometer self test
Bit 2 = Gyroscope self test
Bit 3 = MCU self test
0x0F = all good! */
if (self_test_result != 0)
*self_test_result = read8(BNO055_SELFTEST_RESULT_ADDR);
/* System Error (see section 4.3.59)
---------------------------------
0 = No error
1 = Peripheral initialization error
2 = System initialization error
3 = Self test result failed
4 = Register map value out of range
5 = Register map address out of range
6 = Register map write error
7 = BNO low power mode not available for selected operat ion mode
8 = Accelerometer power mode not available
9 = Fusion algorithm configuration error
A = Sensor configuration error */
if (system_error != 0)
*system_error = read8(BNO055_SYS_ERR_ADDR);
usleep(1000*200);
}
/**************************************************************************/
/*!
@brief Gets the chip revision numbers
*/
/**************************************************************************/
void Adafruit_BNO055::getRevInfo(adafruit_bno055_rev_info_t* info) {
uint8_t a, b;
memset(info, 0, sizeof(adafruit_bno055_rev_info_t));
/* Check the accelerometer revision */
info->accel_rev = read8(BNO055_ACCEL_REV_ID_ADDR);
/* Check the magnetometer revision */
info->mag_rev = read8(BNO055_MAG_REV_ID_ADDR);
/* Check the gyroscope revision */
info->gyro_rev = read8(BNO055_GYRO_REV_ID_ADDR);
/* Check the SW revision */
info->bl_rev = read8(BNO055_BL_REV_ID_ADDR);
a = read8(BNO055_SW_REV_ID_LSB_ADDR);
b = read8(BNO055_SW_REV_ID_MSB_ADDR);
info->sw_rev = (((uint16_t)b) << 8) | ((uint16_t)a);
}
/**************************************************************************/
/*!
@brief Gets current calibration state. Each value should be a uint8_t
pointer and it will be set to 0 if not calibrated and 3 if
fully calibrated.
*/
/**************************************************************************/
void Adafruit_BNO055::getCalibration(uint8_t* sys, uint8_t* gyro, uint8_t* accel, uint8_t* mag) {
uint8_t calData = read8(BNO055_CALIB_STAT_ADDR);
if (sys != NULL) {
*sys = (calData >> 6) & 0x03;
}
if (gyro != NULL) {
*gyro = (calData >> 4) & 0x03;
}
if (accel != NULL) {
*accel = (calData >> 2) & 0x03;
}
if (mag != NULL) {
*mag = calData & 0x03;
}
}
/**************************************************************************/
/*!
@brief Gets the temperature in degrees celsius
*/
/**************************************************************************/
int8_t Adafruit_BNO055::getTemp(void) {
int8_t temp = (int8_t)(read8(BNO055_TEMP_ADDR));
return temp;
}
/**************************************************************************/
/*!
@brief Gets a vector reading from the specified source
*/
/**************************************************************************/
imu::Vector<3> Adafruit_BNO055::getVector(adafruit_vector_type_t vector_type) {
imu::Vector<3> xyz;
uint8_t buffer[6];
memset(buffer, 0, 6);
int16_t x, y, z;
x = y = z = 0;
/* Read vector data (6 bytes) */
readLen((adafruit_bno055_reg_t)vector_type, buffer, 6);
x = ((int16_t)buffer[0]) | (((int16_t)buffer[1]) << 8);
y = ((int16_t)buffer[2]) | (((int16_t)buffer[3]) << 8);
z = ((int16_t)buffer[4]) | (((int16_t)buffer[5]) << 8);
constexpr double RADPS_PER_LSB = get_rad_per_lsb_gyro();
constexpr double MPSS_PER_LSB = get_mpss_per_lsb_accel();
/* Convert the value to an appropriate range (section 3.6.4) */
/* and assign the value to the Vector type */
switch (vector_type) {
case VECTOR_MAGNETOMETER:
/* 1uT = 16 LSB */
xyz[0] = ((double)x) / 16.0;
xyz[1] = ((double)y) / 16.0;
xyz[2] = ((double)z) / 16.0;
break;
case VECTOR_GYROSCOPE:
xyz[0] = static_cast<double>(x) * RADPS_PER_LSB;
xyz[1] = static_cast<double>(y) * RADPS_PER_LSB;
xyz[2] = static_cast<double>(z) * RADPS_PER_LSB;
break;
case VECTOR_EULER:
/* 1 degree = 16 LSB */
xyz[0] = ((double)x) / 16.0;
xyz[1] = ((double)y) / 16.0;
xyz[2] = ((double)z) / 16.0;
break;
case VECTOR_ACCELEROMETER:
xyz[0] = static_cast<double>(x) * MPSS_PER_LSB;
xyz[1] = static_cast<double>(y) * MPSS_PER_LSB;
xyz[2] = static_cast<double>(z) * MPSS_PER_LSB;
break;
case VECTOR_LINEARACCEL:
case VECTOR_GRAVITY:
/* 1m/s^2 = 100 LSB */
xyz[0] = ((double)x) / 100.0;
xyz[1] = ((double)y) / 100.0;
xyz[2] = ((double)z) / 100.0;
break;
}
return xyz;
}
const jcc::Optional<jet::embedded::ImuMeasurements> Adafruit_BNO055::getVectors() {
constexpr uint8_t NUM_SENSORS = 3;
constexpr uint8_t NUM_REGISTERS_PER_AXIS = 2;
constexpr uint8_t NUM_AXES = 3;
constexpr uint8_t NUM_REGISTERS = NUM_SENSORS * NUM_REGISTERS_PER_AXIS * NUM_AXES;
uint8_t buffer[NUM_REGISTERS];
int16_t accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z, mag_x, mag_y, mag_z;
accel_x = accel_y = accel_z = gyro_x = gyro_y = gyro_z = mag_x = mag_y = mag_z = 0;
jcc::Optional<jet::embedded::ImuMeasurements> measurements;
if (!readLen(adafruit_bno055_reg_t::BNO055_ACCEL_DATA_X_LSB_ADDR, buffer, NUM_REGISTERS)) {
return {};
}
accel_x = static_cast<int16_t>((0xFF00 & (buffer[1] << 8)) | (0xFF & buffer[0]));
accel_y = static_cast<int16_t>((0xFF00 & (buffer[3] << 8)) | (0xFF & buffer[2]));
accel_z = static_cast<int16_t>((0xFF00 & (buffer[5] << 8)) | (0xFF & buffer[4]));
mag_x = static_cast<int16_t>((0xFF00 & (buffer[7] << 8)) | (0xFF & buffer[6]));
mag_y = static_cast<int16_t>((0xFF00 & (buffer[9] << 8)) | (0xFF & buffer[8]));
mag_z = static_cast<int16_t>((0xFF00 & (buffer[11] << 8)) | (0xFF & buffer[10]));
gyro_x = static_cast<int16_t>((0xFF00 & (buffer[13] << 8)) | (0xFF & buffer[12]));
gyro_y = static_cast<int16_t>((0xFF00 & (buffer[15] << 8)) | (0xFF & buffer[14]));
gyro_z = static_cast<int16_t>((0xFF00 & (buffer[17] << 8)) | (0xFF & buffer[16]));
constexpr double RADPS_PER_LSB = get_rad_per_lsb_gyro();
constexpr double MPSS_PER_LSB = get_mpss_per_lsb_accel();
measurements = jet::embedded::ImuMeasurements();
measurements->accel_mpss.x() = static_cast<double>(accel_x) * MPSS_PER_LSB;
measurements->accel_mpss.y() = static_cast<double>(accel_y) * MPSS_PER_LSB;
measurements->accel_mpss.z() = static_cast<double>(accel_z) * MPSS_PER_LSB;
measurements->angvel_radps.x() = static_cast<double>(gyro_x) * RADPS_PER_LSB;
measurements->angvel_radps.y() = static_cast<double>(gyro_y) * RADPS_PER_LSB;
measurements->angvel_radps.z() = static_cast<double>(gyro_z) * RADPS_PER_LSB;
measurements->mag_utesla.x() = static_cast<double>(mag_x) * MICROTESLA_PER_LSB;
measurements->mag_utesla.y() = static_cast<double>(mag_y) * MICROTESLA_PER_LSB;
measurements->mag_utesla.z() = static_cast<double>(mag_z) * MICROTESLA_PER_LSB;
return measurements;
}
/**************************************************************************/
/*!
@brief Gets a quaternion reading from the specified source
*/
/**************************************************************************/
imu::Quaternion Adafruit_BNO055::getQuat(void) {
uint8_t buffer[8];
memset (buffer, 0, 8);
int16_t x, y, z, w;
x = y = z = w = 0;
/* Read quat data (8 bytes) */
readLen(BNO055_QUATERNION_DATA_W_LSB_ADDR, buffer, 8);
w = (((uint16_t)buffer[1]) << 8) | ((uint16_t)buffer[0]);
x = (((uint16_t)buffer[3]) << 8) | ((uint16_t)buffer[2]);
y = (((uint16_t)buffer[5]) << 8) | ((uint16_t)buffer[4]);
z = (((uint16_t)buffer[7]) << 8) | ((uint16_t)buffer[6]);
/* Assign to Quaternion */
/* See http://ae-bst.resource.bosch.com/media/products/dokumente/bno055/BST_BNO055_DS000_12~1.pdf
3.6.5.5 Orientation (Quaternion) */
const double scale = (1.0 / (1<<14));
imu::Quaternion quat(scale * w, scale * x, scale * y, scale * z);
return quat;
}
/**************************************************************************/
/*!
@brief Provides the sensor_t data for this sensor
*/
/**************************************************************************/
void Adafruit_BNO055::getSensor(sensor_t *sensor) {
/* Clear the sensor_t object */
memset(sensor, 0, sizeof(sensor_t));
/* Insert the sensor name in the fixed length char array */
strncpy (sensor->name, "BNO055", sizeof(sensor->name) - 1);
sensor->name[sizeof(sensor->name)- 1] = 0;
sensor->version = 1;
sensor->sensor_id = _sensorID;
sensor->type = SENSOR_TYPE_ORIENTATION;
sensor->min_delay = 0;
sensor->max_value = 0.0F;
sensor->min_value = 0.0F;
sensor->resolution = 0.01F;
}
/**************************************************************************/
/*!
@brief Reads the sensor and returns the data as a sensors_event_t
*/
/**************************************************************************/
bool Adafruit_BNO055::getEvent(sensors_event_t *event) {
// /* Clear the event */
// memset(event, 0, sizeof(sensors_event_t));
// event->version = sizeof(sensors_event_t);
// event->sensor_id = _sensorID;
// event->type = SENSOR_TYPE_ORIENTATION;
// event->timestamp = gpioTick()/1000;
// /* Get a Euler angle sample for orientation */
// imu::Vector<3> euler = getVector(Adafruit_BNO055::VECTOR_EULER);
// event->orientation.x = euler.x();
// event->orientation.y = euler.y();
// event->orientation.z = euler.z();
// return true;
return false;
}
/**************************************************************************/
/*!
@brief Reads the sensor's offset registers into a byte array
*/
/**************************************************************************/
bool Adafruit_BNO055::getSensorOffsets(uint8_t* calibData) {
if (isFullyCalibrated()) {
adafruit_bno055_opmode_t lastMode = _mode;
setMode(OPERATION_MODE_CONFIG);
readLen(ACCEL_OFFSET_X_LSB_ADDR, calibData, NUM_BNO055_OFFSET_REGISTERS);
setMode(lastMode);
return true;
}
return false;
}
/**************************************************************************/
/*!
@brief Reads the sensor's offset registers into an offset struct
*/
/**************************************************************************/
bool Adafruit_BNO055::getSensorOffsets(adafruit_bno055_offsets_t &offsets_type) {
if (isFullyCalibrated()) {
adafruit_bno055_opmode_t lastMode = _mode;
setMode(OPERATION_MODE_CONFIG);
usleep(1000*25);
offsets_type.accel_offset_x = (read8(ACCEL_OFFSET_X_MSB_ADDR) << 8) | (read8(ACCEL_OFFSET_X_LSB_ADDR));
offsets_type.accel_offset_y = (read8(ACCEL_OFFSET_Y_MSB_ADDR) << 8) | (read8(ACCEL_OFFSET_Y_LSB_ADDR));
offsets_type.accel_offset_z = (read8(ACCEL_OFFSET_Z_MSB_ADDR) << 8) | (read8(ACCEL_OFFSET_Z_LSB_ADDR));
offsets_type.gyro_offset_x = (read8(GYRO_OFFSET_X_MSB_ADDR) << 8) | (read8(GYRO_OFFSET_X_LSB_ADDR));
offsets_type.gyro_offset_y = (read8(GYRO_OFFSET_Y_MSB_ADDR) << 8) | (read8(GYRO_OFFSET_Y_LSB_ADDR));
offsets_type.gyro_offset_z = (read8(GYRO_OFFSET_Z_MSB_ADDR) << 8) | (read8(GYRO_OFFSET_Z_LSB_ADDR));
offsets_type.mag_offset_x = (read8(MAG_OFFSET_X_MSB_ADDR) << 8) | (read8(MAG_OFFSET_X_LSB_ADDR));
offsets_type.mag_offset_y = (read8(MAG_OFFSET_Y_MSB_ADDR) << 8) | (read8(MAG_OFFSET_Y_LSB_ADDR));
offsets_type.mag_offset_z = (read8(MAG_OFFSET_Z_MSB_ADDR) << 8) | (read8(MAG_OFFSET_Z_LSB_ADDR));
offsets_type.accel_radius = (read8(ACCEL_RADIUS_MSB_ADDR) << 8) | (read8(ACCEL_RADIUS_LSB_ADDR));
offsets_type.mag_radius = (read8(MAG_RADIUS_MSB_ADDR) << 8) | (read8(MAG_RADIUS_LSB_ADDR));
setMode(lastMode);
return true;
}
return false;
}
/**************************************************************************/
/*!
@brief Writes an array of calibration values to the sensor's offset registers
*/
/**************************************************************************/
void Adafruit_BNO055::setSensorOffsets(const uint8_t* calibData) {
adafruit_bno055_opmode_t lastMode = _mode;
setMode(OPERATION_MODE_CONFIG);
usleep(1000*25);
/* A writeLen() would make this much cleaner */
write8(ACCEL_OFFSET_X_LSB_ADDR, calibData[0]);
write8(ACCEL_OFFSET_X_MSB_ADDR, calibData[1]);
write8(ACCEL_OFFSET_Y_LSB_ADDR, calibData[2]);
write8(ACCEL_OFFSET_Y_MSB_ADDR, calibData[3]);
write8(ACCEL_OFFSET_Z_LSB_ADDR, calibData[4]);
write8(ACCEL_OFFSET_Z_MSB_ADDR, calibData[5]);
write8(GYRO_OFFSET_X_LSB_ADDR, calibData[6]);
write8(GYRO_OFFSET_X_MSB_ADDR, calibData[7]);
write8(GYRO_OFFSET_Y_LSB_ADDR, calibData[8]);
write8(GYRO_OFFSET_Y_MSB_ADDR, calibData[9]);
write8(GYRO_OFFSET_Z_LSB_ADDR, calibData[10]);
write8(GYRO_OFFSET_Z_MSB_ADDR, calibData[11]);
write8(MAG_OFFSET_X_LSB_ADDR, calibData[12]);
write8(MAG_OFFSET_X_MSB_ADDR, calibData[13]);
write8(MAG_OFFSET_Y_LSB_ADDR, calibData[14]);
write8(MAG_OFFSET_Y_MSB_ADDR, calibData[15]);
write8(MAG_OFFSET_Z_LSB_ADDR, calibData[16]);
write8(MAG_OFFSET_Z_MSB_ADDR, calibData[17]);
write8(ACCEL_RADIUS_LSB_ADDR, calibData[18]);
write8(ACCEL_RADIUS_MSB_ADDR, calibData[19]);
write8(MAG_RADIUS_LSB_ADDR, calibData[20]);
write8(MAG_RADIUS_MSB_ADDR, calibData[21]);
setMode(lastMode);
}
/**************************************************************************/
/*!
@brief Writes to the sensor's offset registers from an offset struct
*/
/**************************************************************************/
void Adafruit_BNO055::setSensorOffsets(const adafruit_bno055_offsets_t &offsets_type) {
adafruit_bno055_opmode_t lastMode = _mode;
setMode(OPERATION_MODE_CONFIG);
usleep(1000*25);
write8(ACCEL_OFFSET_X_LSB_ADDR, (offsets_type.accel_offset_x) & 0x0FF);
write8(ACCEL_OFFSET_X_MSB_ADDR, (offsets_type.accel_offset_x >> 8) & 0x0FF);
write8(ACCEL_OFFSET_Y_LSB_ADDR, (offsets_type.accel_offset_y) & 0x0FF);
write8(ACCEL_OFFSET_Y_MSB_ADDR, (offsets_type.accel_offset_y >> 8) & 0x0FF);
write8(ACCEL_OFFSET_Z_LSB_ADDR, (offsets_type.accel_offset_z) & 0x0FF);
write8(ACCEL_OFFSET_Z_MSB_ADDR, (offsets_type.accel_offset_z >> 8) & 0x0FF);
write8(GYRO_OFFSET_X_LSB_ADDR, (offsets_type.gyro_offset_x) & 0x0FF);
write8(GYRO_OFFSET_X_MSB_ADDR, (offsets_type.gyro_offset_x >> 8) & 0x0FF);
write8(GYRO_OFFSET_Y_LSB_ADDR, (offsets_type.gyro_offset_y) & 0x0FF);
write8(GYRO_OFFSET_Y_MSB_ADDR, (offsets_type.gyro_offset_y >> 8) & 0x0FF);
write8(GYRO_OFFSET_Z_LSB_ADDR, (offsets_type.gyro_offset_z) & 0x0FF);
write8(GYRO_OFFSET_Z_MSB_ADDR, (offsets_type.gyro_offset_z >> 8) & 0x0FF);
write8(MAG_OFFSET_X_LSB_ADDR, (offsets_type.mag_offset_x) & 0x0FF);
write8(MAG_OFFSET_X_MSB_ADDR, (offsets_type.mag_offset_x >> 8) & 0x0FF);
write8(MAG_OFFSET_Y_LSB_ADDR, (offsets_type.mag_offset_y) & 0x0FF);
write8(MAG_OFFSET_Y_MSB_ADDR, (offsets_type.mag_offset_y >> 8) & 0x0FF);
write8(MAG_OFFSET_Z_LSB_ADDR, (offsets_type.mag_offset_z) & 0x0FF);
write8(MAG_OFFSET_Z_MSB_ADDR, (offsets_type.mag_offset_z >> 8) & 0x0FF);
write8(ACCEL_RADIUS_LSB_ADDR, (offsets_type.accel_radius) & 0x0FF);
write8(ACCEL_RADIUS_MSB_ADDR, (offsets_type.accel_radius >> 8) & 0x0FF);
write8(MAG_RADIUS_LSB_ADDR, (offsets_type.mag_radius) & 0x0FF);
write8(MAG_RADIUS_MSB_ADDR, (offsets_type.mag_radius >> 8) & 0x0FF);
setMode(lastMode);
}
bool Adafruit_BNO055::isFullyCalibrated(void) {
uint8_t system, gyro, accel, mag;
getCalibration(&system, &gyro, &accel, &mag);
if (system < 3 || gyro < 3 || accel < 3 || mag < 3)
return false;
return true;
}
void Adafruit_BNO055::configure_page_1(const int address, const uint8_t value) {
write8(BNO055_PAGE_ID_ADDR, 1);
write8_unprotected(address, value);
}
uint8_t Adafruit_BNO055::read_page_1(const int address) {
write8(BNO055_PAGE_ID_ADDR, 1);
uint8_t value = 0;
i2c_read(&_i2cDevice, address, &value, 1);
return value;
}
/***************************************************************************
PRIVATE FUNCTIONS
***************************************************************************/
bool Adafruit_BNO055::write8_unprotected(int reg, uint8_t value) {
ssize_t bytes_written = 0;
uint8_t val_to_write = value;
bytes_written = i2c_write(&_i2cDevice, reg, &val_to_write, 1);
/* ToDo: Check for error! */
return bytes_written == 1;
}
/**************************************************************************/
/*!
@brief Writes an 8 bit value over I2C
*/
/**************************************************************************/
bool Adafruit_BNO055::write8(adafruit_bno055_reg_t reg, uint8_t value) {
ssize_t bytes_written = 0;
uint8_t val_to_write = value;
bytes_written = i2c_write(&_i2cDevice, reg, &val_to_write, 1);
/* ToDo: Check for error! */
return bytes_written == 1;
}
/**************************************************************************/
/*!
@brief Reads an 8 bit value over I2C
*/
/**************************************************************************/
uint8_t Adafruit_BNO055::read8(adafruit_bno055_reg_t reg ) {
uint8_t value = 0;
i2c_read(&_i2cDevice, reg, &value, 1);
return value;
}
/**************************************************************************/
/*!
@brief Reads the specified number of bytes over I2C
*/
/**************************************************************************/
bool Adafruit_BNO055::readLen(adafruit_bno055_reg_t reg, uint8_t * buffer, uint8_t len) {
int BRead = i2c_read(&_i2cDevice, reg, (char*)buffer, len);
if (BRead != (int) len) {
return false;
}
/* ToDo: Check for errors! */
return true;
}
|
/*
* hango-arduino
* Copyright (c) 2020 Golagola
* MIT License
*/
#include "Arduino.h"
#include "Char2Int.h"
int char2int(int data){
int sensed_position = 0;
if (data >= 48 && data <= 58 ) {
sensed_position = int(data - 48);
}
return sensed_position;
}
|
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "RestBatchHandler.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "GeneralServer/GeneralServer.h"
#include "GeneralServer/GeneralServerFeature.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "Logger/LogMacros.h"
#include "Logger/Logger.h"
#include "Logger/LoggerStream.h"
#include "Rest/HttpRequest.h"
#include "Rest/HttpResponse.h"
#include "Scheduler/SchedulerFeature.h"
#include "Utils/ExecContext.h"
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
RestBatchHandler::RestBatchHandler(GeneralRequest* request, GeneralResponse* response)
: RestVocbaseBaseHandler(request, response), _errors(0) {}
RestBatchHandler::~RestBatchHandler() {}
////////////////////////////////////////////////////////////////////////////////
/// @brief was docuBlock JSF_batch_processing
////////////////////////////////////////////////////////////////////////////////
RestStatus RestBatchHandler::execute() {
switch (_response->transportType()) {
case Endpoint::TransportType::HTTP: {
return executeHttp();
}
case Endpoint::TransportType::VST:
default: {
return executeVst();
}
}
// should never get here
TRI_ASSERT(false);
return RestStatus::DONE;
}
RestStatus RestBatchHandler::executeVst() {
generateError(rest::ResponseCode::METHOD_NOT_ALLOWED, TRI_ERROR_NO_ERROR,
"The RestBatchHandler is not supported for this protocol!");
return RestStatus::DONE;
}
void RestBatchHandler::processSubHandlerResult(RestHandler const& handler) {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response.get());
HttpResponse* partResponse = dynamic_cast<HttpResponse*>(handler.response());
if (partResponse == nullptr) {
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not create a response for batch part request");
continueHandlerExecution();
return;
}
rest::ResponseCode const code = partResponse->responseCode();
// count everything above 400 as error
if (int(code) >= 400) {
++_errors;
}
// append the boundary for this subpart
httpResponse->body().appendText(_boundary + "\r\nContent-Type: ");
httpResponse->body().appendText(StaticStrings::BatchContentType);
// append content-id if it is present
if (_helper.contentId != nullptr) {
httpResponse->body().appendText(
"\r\nContent-Id: " + std::string(_helper.contentId, _helper.contentIdLength));
}
httpResponse->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n\r\n"));
// remove some headers we don't need
partResponse->setHeaderNC(StaticStrings::Server, "");
// append the part response header
partResponse->writeHeader(&httpResponse->body());
// append the part response body
httpResponse->body().appendText(partResponse->body());
httpResponse->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
// we've read the last part
if (!_helper.containsMore) {
// complete the handler
// append final boundary + "--"
httpResponse->body().appendText(_boundary + "--");
if (_errors > 0) {
httpResponse->setHeaderNC(StaticStrings::Errors,
StringUtils::itoa(static_cast<uint64_t>(_errors)));
}
continueHandlerExecution();
} else {
if (!executeNextHandler()) {
continueHandlerExecution();
}
}
}
bool RestBatchHandler::executeNextHandler() {
// get authorization header. we will inject this into the subparts
std::string const& authorization = _request->header(StaticStrings::Authorization);
// get the next part from the multipart message
if (!extractPart(_helper)) {
// error
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
"invalid multipart message received");
LOG_TOPIC("3204a", WARN, arangodb::Logger::REPLICATION)
<< "received a corrupted multipart message";
return false;
}
// split part into header & body
char const* partStart = _helper.foundStart;
char const* partEnd = partStart + _helper.foundLength;
size_t const partLength = _helper.foundLength;
char const* headerStart = partStart;
char const* bodyStart = nullptr;
size_t headerLength = 0;
size_t bodyLength = 0;
// assume Windows linebreak \r\n\r\n as delimiter
char const* p = strstr(headerStart, "\r\n\r\n");
if (p != nullptr && p + 4 <= partEnd) {
headerLength = p - partStart;
bodyStart = p + 4;
bodyLength = partEnd - bodyStart;
} else {
// test Unix linebreak
p = strstr(headerStart, "\n\n");
if (p != nullptr && p + 2 <= partEnd) {
headerLength = p - partStart;
bodyStart = p + 2;
bodyLength = partEnd - bodyStart;
} else {
// no delimiter found, assume we have only a header
headerLength = partLength;
}
}
// set up request object for the part
LOG_TOPIC("910e9", TRACE, arangodb::Logger::REPLICATION)
<< "part header is: " << std::string(headerStart, headerLength);
std::unique_ptr<HttpRequest> request(
new HttpRequest(_request->connectionInfo(), headerStart, headerLength, false));
// inject the request context from the framing (batch) request
// the "false" means the context is not responsible for resource handling
request->setRequestContext(_request->requestContext(), false);
request->setDatabaseName(_request->databaseName());
if (bodyLength > 0) {
LOG_TOPIC("63afb", TRACE, arangodb::Logger::REPLICATION)
<< "part body is '" << std::string(bodyStart, bodyLength) << "'";
request->body().clear();
request->body().reserve(bodyLength+1);
request->body().append(bodyStart, bodyLength);
request->body().push_back('\0');
request->body().resetTo(bodyLength); // ensure null terminated
}
if (!authorization.empty()) {
// inject Authorization header of multipart message into part message
request->setHeader(StaticStrings::Authorization.c_str(),
StaticStrings::Authorization.size(),
authorization.c_str(), authorization.size());
}
std::shared_ptr<RestHandler> handler;
{
auto response = std::make_unique<HttpResponse>(rest::ResponseCode::SERVER_ERROR,
std::make_unique<StringBuffer>(false));
handler.reset(
GeneralServerFeature::HANDLER_FACTORY->createHandler(std::move(request),
std::move(response)));
if (handler == nullptr) {
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not create handler for batch part processing");
return false;
}
}
// assume a bad lane, so the request is definitely executed via the queues
auto const lane = RequestLane::CLIENT_V8;
// now schedule the real handler
bool ok =
SchedulerFeature::SCHEDULER->queue(lane, [this, self = shared_from_this(), handler]() {
// start to work for this handler
// ignore any errors here, will be handled later by inspecting the response
try {
ExecContextScope scope(nullptr); // workaround because of assertions
handler->runHandler([this, self](RestHandler* handler) {
processSubHandlerResult(*handler);
});
} catch (...) {
processSubHandlerResult(*handler);
}
});
if (!ok) {
generateError(rest::ResponseCode::SERVICE_UNAVAILABLE, TRI_ERROR_QUEUE_FULL);
return false;
}
return true;
}
RestStatus RestBatchHandler::executeHttp() {
TRI_ASSERT(_response->transportType() == Endpoint::TransportType::HTTP);
// extract the request type
auto const type = _request->requestType();
if (type != rest::RequestType::POST && type != rest::RequestType::PUT) {
generateError(rest::ResponseCode::METHOD_NOT_ALLOWED, TRI_ERROR_HTTP_METHOD_NOT_ALLOWED);
return RestStatus::DONE;
}
// invalid content-type or boundary sent
if (!getBoundary(_boundary)) {
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
"invalid content-type or boundary received");
return RestStatus::DONE;
}
LOG_TOPIC("b03fa", TRACE, arangodb::Logger::REPLICATION)
<< "boundary of multipart-message is '" << _boundary << "'";
_errors = 0;
// create the response
resetResponse(rest::ResponseCode::OK);
_response->setContentType(_request->header(StaticStrings::ContentTypeHeader));
// http required here
VPackStringRef bodyStr = _request->rawPayload();
// setup some auxiliary structures to parse the multipart message
_multipartMessage =
MultipartMessage{_boundary.data(), _boundary.size(), bodyStr.data(),
bodyStr.data() + bodyStr.size()};
_helper.message = _multipartMessage;
_helper.searchStart = _multipartMessage.messageStart;
// and wait for completion
return executeNextHandler() ? RestStatus::WAITING : RestStatus::DONE;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief extract the boundary from the body of a multipart message
////////////////////////////////////////////////////////////////////////////////
bool RestBatchHandler::getBoundaryBody(std::string& result) {
TRI_ASSERT(_response->transportType() == Endpoint::TransportType::HTTP);
VPackStringRef bodyStr = _request->rawPayload();
char const* p = bodyStr.data();
char const* e = p + bodyStr.size();
// skip whitespace
while (*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n') {
++p;
}
if (p + 10 > e) {
return false;
}
if (p[0] != '-' || p[1] != '-') {
// boundary must start with "--"
return false;
}
char const* q = p;
while (q < e && *q && *q != ' ' && *q != '\t' && *q != '\r' && *q != '\n') {
++q;
}
if ((q - p) < 5) {
// 3 bytes is min length for boundary (without "--")
return false;
}
result = std::string(p, (q - p));
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief extract the boundary from the HTTP header of a multipart message
////////////////////////////////////////////////////////////////////////////////
bool RestBatchHandler::getBoundaryHeader(std::string& result) {
// extract content type
std::string const contentType =
StringUtils::trim(_request->header(StaticStrings::ContentTypeHeader));
// content type is expect to contain a boundary like this:
// "Content-Type: multipart/form-data; boundary=<boundary goes here>"
std::vector<std::string> parts = StringUtils::split(contentType, ';');
if (parts.size() != 2 || parts[0] != StaticStrings::MultiPartContentType) {
// content-type is not formatted as expected
return false;
}
static size_t const boundaryLength = 9; // strlen("boundary=");
// trim 2nd part and lowercase it
StringUtils::trimInPlace(parts[1]);
std::string p = parts[1].substr(0, boundaryLength);
StringUtils::tolowerInPlace(&p);
if (p != "boundary=") {
return false;
}
std::string boundary = parts[1].substr(boundaryLength);
if ((boundary.length() > 1) && (boundary[0] == '"') &&
(boundary[boundary.length() - 1] == '"')) {
StringUtils::trimInPlace(boundary, "\"");
} else if ((boundary.length() > 1) && (boundary[0] == '\'') &&
(boundary[boundary.length() - 1] == '\'')) {
StringUtils::trimInPlace(boundary, "'");
}
if (boundary.size() < 3) {
// 3 bytes is min length for boundary (without "--")
return false;
}
result = "--" + boundary;
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief extract the boundary of a multipart message
////////////////////////////////////////////////////////////////////////////////
bool RestBatchHandler::getBoundary(std::string& result) {
TRI_ASSERT(_request);
// try peeking at header first
if (getBoundaryHeader(result)) {
return true;
}
// boundary not found in header, now peek in body
return getBoundaryBody(result);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief extract the next part from a multipart message
////////////////////////////////////////////////////////////////////////////////
bool RestBatchHandler::extractPart(SearchHelper& helper) {
TRI_ASSERT(helper.searchStart != nullptr);
// init the response
helper.foundStart = nullptr;
helper.foundLength = 0;
helper.containsMore = false;
helper.contentId = nullptr;
helper.contentIdLength = 0;
char const* searchEnd = helper.message.messageEnd;
if (helper.searchStart >= searchEnd) {
// we're at the end already
return false;
}
// search for boundary
char const* found = strstr(helper.searchStart, helper.message.boundary);
if (found == nullptr) {
// not contained. this is an error
return false;
}
if (found != helper.searchStart) {
// boundary not located at beginning. this is an error
return false;
}
found += helper.message.boundaryLength;
if (found + 1 >= searchEnd) {
// we're outside the buffer. this is an error
return false;
}
while (found < searchEnd && *found == ' ') {
++found;
}
if (found + 2 >= searchEnd) {
// we're outside the buffer. this is an error
return false;
}
if (*found == '\r') {
++found;
}
if (*found != '\n') {
// no linebreak found
return false;
}
++found;
bool hasTypeHeader = false;
int breakLength = 1;
while (found < searchEnd) {
while (*found == ' ' && found < searchEnd) {
++found;
}
// try Windows linebreak first
breakLength = 2;
char const* eol = strstr(found, "\r\n");
if (eol == nullptr) {
breakLength = 1;
eol = strchr(found, '\n');
if (eol == found) {
break;
}
} else {
char const* eol2 = strchr(found, '\n');
if (eol2 != nullptr && eol2 < eol) {
breakLength = 1;
eol = eol2;
}
}
if (eol == nullptr || eol == found) {
break;
}
// split key/value of header
char const* colon = static_cast<char const*>(memchr(found, (int)':', eol - found));
if (nullptr == colon) {
// invalid header, not containing ':'
return false;
}
// set up the key
std::string key(found, colon - found);
StringUtils::trimInPlace(key);
if (key[0] == 'c' || key[0] == 'C') {
// got an interesting key. now process it
StringUtils::tolowerInPlace(&key);
// skip the colon itself
++colon;
// skip any whitespace
while (*colon == ' ') {
++colon;
}
if (key == StaticStrings::ContentTypeHeader) {
// extract the value, too
std::string value(colon, eol - colon);
StringUtils::trimInPlace(value);
if (value == StaticStrings::BatchContentType) {
hasTypeHeader = true;
} else {
LOG_TOPIC("f7836", WARN, arangodb::Logger::REPLICATION)
<< "unexpected content-type '" << value << "' for multipart-message. expected: '"
<< StaticStrings::BatchContentType << "'";
}
} else if ("content-id" == key) {
helper.contentId = colon;
helper.contentIdLength = eol - colon;
} else {
// ignore other headers
}
}
found = eol + breakLength; // plus the \n
}
found += breakLength; // for 2nd \n
if (!hasTypeHeader) {
// no Content-Type header. this is an error
return false;
}
// we're at the start of the body part. set the return value
helper.foundStart = found;
// search for the end of the boundary
found = strstr(helper.foundStart, helper.message.boundary);
if (found == nullptr || found >= searchEnd) {
// did not find the end. this is an error
return false;
}
helper.foundLength = found - helper.foundStart;
char const* p = found + helper.message.boundaryLength;
if (p + 2 > searchEnd) {
// end of boundary is outside the buffer
return false;
}
if (*p != '-' || *(p + 1) != '-') {
// we've not reached the end yet
helper.containsMore = true;
helper.searchStart = found;
}
return true;
}
|
/*
MANGO Multimedia Development Platform
Copyright (C) 2012-2020 Twilight Finland 3D Oy Ltd. All rights reserved.
*/
#include <vector>
#include <mango/math/math.hpp>
using namespace mango;
void example1()
{
float32x4 a(1.0f, 2.0f, 3.0f, 4.0f);
float32x4 b = sin(a);
float32x4 c = cross(a, b) * 1.5f - a * 2.0f * dot(a, b * 3.0f);
MANGO_UNREFERENCED(c);
}
float32x4 example2(const float32x4& a, const float32x4& b)
{
float32x4 ab = a.xxyy * 2.0f - b * b.wwww;
return ab / ab.x;
}
float32x4 example3(const float32x4& a, const float32x4& b)
{
// select() is a bit more exotic operation; it uses a mask to select between
// two values. The mask is generated in the comparison operator. This is useful
// for avoiding branching; it is sometimes more efficient to compute both
// results and choose one depending on some predicate (in this case a > b).
float32x4 result = select(a > b, sin(a), cos(b));
return result;
}
float32x4 example4(const float32x4& a, const float32x4& b)
{
float32x4 result;
// Same as example3 but using scalars instead of select()
for (int i = 0; i < 4; ++i)
{
result[i] = a[i] > b[i] ? sin(a[i]) : cos(b[i]);
}
return result;
}
float32x3 example5(float32x3 a, float32x3 b, float32x3 c)
{
// compute triangle normal given three vertices (a, b, c)
float32x3 normal = cross(a - b, a - c);
return normalize(normal);
}
float32x4 example6(float32x4 a)
{
// a slower way to do a.zwxy to demonstrate decomposing vectors
float32x2 low = a.xy;
float32x2 high = a.zw;
return float32x4(high, low);
}
void example7(float32x3 normal, float dist)
{
Plane plane(normal, dist);
float32x3 p(20.0f, 0.0f, 0.0f);
float distanceToPlane = plane.distance(p);
if (distanceToPlane < 0)
{
// point p is behind the plane
}
}
void example8(const Plane& plane, float32x3 point0, float32x3 point1)
{
Ray ray(point0, point1);
Intersect is;
if (is.intersect(ray, plane))
{
// compute point of intersection
float32x3 p = ray.origin + ray.direction * is.t0;
MANGO_UNREFERENCED(p);
}
}
void example9(const Sphere& sphere, const Ray& ray)
{
IntersectRange is;
if (is.intersect(ray, sphere))
{
// compute points where ray enters and leaves the sphere
float32x3 enter = ray.origin + ray.direction * is.t0;
float32x3 leave = ray.origin + ray.direction * is.t1;
MANGO_UNREFERENCED(enter);
MANGO_UNREFERENCED(leave);
}
}
void example10(const std::vector<Box>& boxes, const Ray& ray)
{
// FastRay has precomputed data to accelerate intersection computations
// However, it has overhead for doing this computation so it is best used
// when the same ray is intersected to a lot of different geometry
FastRay fast(ray);
for (auto& box : boxes)
{
IntersectRange is;
if (is.intersect(fast, box))
{
float32x3 enter = ray.origin + ray.direction * is.t0;
float32x3 leave = ray.origin + ray.direction * is.t1;
MANGO_UNREFERENCED(enter);
MANGO_UNREFERENCED(leave);
}
}
}
void example11()
{
float32x4 linear(1.0f, 0.5f, 0.5f, 1.0f);
float32x4 nonlinear = linear_to_srgb(linear);
linear = srgb_to_linear(nonlinear);
}
/*
Portable low level SIMD abstraction. Uses a simple functional API for
getting the job done. Supports multiple architectures using a common interface.
Does only expose functionalty that is efficient and common for all architectures.
The higher-level short vector math library is written on top of the low level
code to be more user-friendly. This abstracts all of the platform specific
minute details into it's own neat compartment for easier maintenance. This also
allows to add more platforms easier; we already have quite a few targets:
- Intel (SSE, SSE2, SSE3, SSE 4.1, AVX, AVX2)
- ARM neon
- PPC Altivec / SPU
*/
void example12()
{
simd::f32x4 a = simd::f32x4_set(1.0f, 2.0f, 2.0f, 1.0f);
simd::f32x4 b = simd::f32x4_set(0.0f, 1.0f, 0.5f, 0.5f);
simd::f32x4 c = simd::add(a, b);
simd::f32x4 d = simd::mul(c, b);
simd::mask32x4 mask = simd::compare_gt(a, b);
simd::f32x4 e = simd::select(mask, d, c);
MANGO_UNREFERENCED(e);
}
// previous example using higher-level "Short Vector Math" abstraction:
void example13()
{
float32x4 a(1.0f, 2.0f, 2.0f, 1.0f);
float32x4 b(0.0f, 1.0f, 0.5f, 0.5f);
float32x4 c = a + b;
float32x4 d = c * b;
float32x4 e = select(a > b, d, c);
MANGO_UNREFERENCED(e);
}
int main()
{
// NOTE: This code is compiled for validation purposes
}
|
/*****************************************************************************/
/* Copyright (c) 2016, Alessandro Pieropan */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* 3. Neither the name of the copyright holder nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*****************************************************************************/
#include "mesh.h"
using namespace std;
namespace rendering{
Mesh::Mesh(vector<Vertex> vertices, vector<GLuint> indices, vector<Texture> textures)
{
vertices_ = vertices;
indices_ = indices;
textures_ = textures;
// Now that we have all the required data, set the vertex buffers and its attribute pointers.
this->setupMesh();
}
// Render the mesh
void Mesh::draw(Shader shader)
{
// Bind appropriate textures
GLuint diffuseNr = 1;
GLuint specularNr = 1;
for(GLuint i = 0; i < this->textures_.size(); i++)
{
glActiveTexture(GL_TEXTURE0 + i); // Active proper texture unit before binding
// Retrieve texture number (the N in diffuse_textureN)
stringstream ss;
string number;
string name = this->textures_[i].type;
if(name == "texture_diffuse")
ss << diffuseNr++; // Transfer GLuint to stream
else if(name == "texture_specular")
ss << specularNr++; // Transfer GLuint to stream
number = ss.str();
// Now set the sampler to the correct texture unit
glUniform1i(glGetUniformLocation(shader.program_id_, (name + number).c_str()), i);
// And finally bind the texture
glBindTexture(GL_TEXTURE_2D, this->textures_[i].id);
}
// Also set each mesh's shininess property to a default value (if you want you could extend this to another mesh property and possibly change this value)
glUniform1f(glGetUniformLocation(shader.program_id_, "material.shininess"), 16.0f);
// Draw mesh
glBindVertexArray(this->VAO);
glDrawElements(GL_TRIANGLES, this->indices_.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
// Always good practice to set everything back to defaults once configured.
for (GLuint i = 0; i < this->textures_.size(); i++)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, 0);
}
}
void Mesh::setupMesh()
{
// Create buffers/arrays
glGenVertexArrays(1, &this->VAO);
glGenBuffers(1, &this->VBO);
glGenBuffers(1, &this->EBO);
glBindVertexArray(this->VAO);
// Load data into vertex buffers
glBindBuffer(GL_ARRAY_BUFFER, this->VBO);
// A great thing about structs is that their memory layout is sequential for all its items.
// The effect is that we can simply pass a pointer to the struct and it translates perfectly to a glm::vec3/2 array which
// again translates to 3/2 floats which translates to a byte array.
glBufferData(GL_ARRAY_BUFFER, this->vertices_.size() * sizeof(Vertex), &this->vertices_[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->indices_.size() * sizeof(GLuint), &this->indices_[0], GL_STATIC_DRAW);
// Set the vertex attribute pointers
// Vertex Positions
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0);
// Vertex Normals
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)offsetof(Vertex, Normal));
// Vertex Texture Coords
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)offsetof(Vertex, TexCoords));
glBindVertexArray(0);
}
} // end namespace
|
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Expected.h>
#include <folly/Portability.h>
#include <folly/portability/GTest.h>
#include <algorithm>
#include <iomanip>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
#include <glog/logging.h>
using std::shared_ptr;
using std::unique_ptr;
namespace folly {
enum class E { E1, E2 };
std::ostream& operator<<(std::ostream& os, E e) {
switch (e) {
case E::E1:
return os << "E::E1";
case E::E2:
return os << "E::E2";
default:;
}
return os;
}
template <class V, class E>
std::ostream& operator<<(std::ostream& os, const Expected<V, E>& e) {
if (e) {
os << "Expected(" << e.value() << ')';
} else {
os << "Unexpected(" << e.error() << ')';
}
return os;
}
struct NoDefault {
NoDefault(int, int) {}
char a, b, c;
};
TEST(Expected, NoDefault) {
static_assert(
std::is_default_constructible<Expected<NoDefault, int>>::value, "");
Expected<NoDefault, int> x{in_place, 42, 42};
EXPECT_TRUE(bool(x));
x.emplace(4, 5);
EXPECT_TRUE(bool(x));
x = makeUnexpected(42);
EXPECT_FALSE(bool(x));
EXPECT_EQ(42, x.error());
}
TEST(Expected, String) {
Expected<std::string, int> maybeString;
EXPECT_FALSE(bool(maybeString));
EXPECT_EQ(0, maybeString.error());
maybeString = "hello";
EXPECT_TRUE(bool(maybeString));
EXPECT_EQ("hello", *maybeString);
}
TEST(Expected, Ambiguous) {
// Potentially ambiguous and confusing construction and assignment disallowed:
EXPECT_FALSE((std::is_constructible<Expected<int, int>, int>::value));
EXPECT_FALSE((std::is_assignable<Expected<int, int>&, int>::value));
}
TEST(Expected, Const) {
{ // default construct
Expected<const int, int> ex;
EXPECT_FALSE(bool(ex));
EXPECT_EQ(0, ex.error());
ex.emplace(4);
EXPECT_EQ(4, *ex);
ex.emplace(5);
EXPECT_EQ(5, *ex);
ex = makeUnexpected(42);
EXPECT_FALSE(bool(ex));
EXPECT_EQ(42, ex.error());
}
{ // copy-constructed
const int x = 6;
Expected<const int, int> ex{in_place, x};
Expected<const int, int> ex2 = ex;
EXPECT_EQ(6, *ex2);
}
{ // move-constructed
const int x = 7;
Expected<const int, int> ex{in_place, std::move(x)};
Expected<const int, int> ex2 = std::move(ex);
EXPECT_EQ(7, *ex2);
}
// no assignment allowed
EXPECT_FALSE((std::is_copy_assignable<Expected<const int, int>>::value));
}
TEST(Expected, Simple) {
Expected<int, int> ex;
EXPECT_FALSE(bool(ex));
EXPECT_EQ(42, ex.value_or(42));
ex.emplace(4);
EXPECT_TRUE(bool(ex));
EXPECT_EQ(4, *ex);
EXPECT_EQ(4, ex.value_or(42));
ex = makeUnexpected(-1);
EXPECT_FALSE(bool(ex));
EXPECT_EQ(-1, ex.error());
EXPECT_EQ(42, ex.value_or(42));
}
class MoveTester {
public:
/* implicit */ MoveTester(const char* s) : s_(s) {}
MoveTester(const MoveTester&) = default;
MoveTester(MoveTester&& other) noexcept {
s_ = std::move(other.s_);
other.s_ = "";
}
MoveTester& operator=(const MoveTester&) = default;
MoveTester& operator=(MoveTester&& other) noexcept {
s_ = std::move(other.s_);
other.s_ = "";
return *this;
}
private:
friend bool operator==(const MoveTester& o1, const MoveTester& o2);
std::string s_;
};
bool operator==(const MoveTester& o1, const MoveTester& o2) {
return o1.s_ == o2.s_;
}
TEST(Expected, value_or_rvalue_arg) {
Expected<MoveTester, int> ex = makeUnexpected(-1);
MoveTester dflt = "hello";
EXPECT_EQ("hello", ex.value_or(dflt));
EXPECT_EQ("hello", dflt);
EXPECT_EQ("hello", ex.value_or(std::move(dflt)));
EXPECT_EQ("", dflt);
EXPECT_EQ("world", ex.value_or("world"));
dflt = "hello";
// Make sure that the const overload works on const objects
const auto& exc = ex;
EXPECT_EQ("hello", exc.value_or(dflt));
EXPECT_EQ("hello", dflt);
EXPECT_EQ("hello", exc.value_or(std::move(dflt)));
EXPECT_EQ("", dflt);
EXPECT_EQ("world", exc.value_or("world"));
dflt = "hello";
ex = "meow";
EXPECT_EQ("meow", ex.value_or(dflt));
EXPECT_EQ("hello", dflt);
EXPECT_EQ("meow", ex.value_or(std::move(dflt)));
EXPECT_EQ("hello", dflt); // only moved if used
}
TEST(Expected, value_or_noncopyable) {
Expected<std::unique_ptr<int>, int> ex{unexpected, 42};
std::unique_ptr<int> dflt(new int(42));
EXPECT_EQ(42, *std::move(ex).value_or(std::move(dflt)));
}
struct ExpectingDeleter {
explicit ExpectingDeleter(int expected_) : expected(expected_) {}
int expected;
void operator()(const int* ptr) {
EXPECT_EQ(*ptr, expected);
delete ptr;
}
};
TEST(Expected, value_move) {
auto ptr = Expected<std::unique_ptr<int, ExpectingDeleter>, int>(
in_place, new int(42), ExpectingDeleter{1337})
.value();
*ptr = 1337;
}
TEST(Expected, dereference_move) {
auto ptr = *Expected<std::unique_ptr<int, ExpectingDeleter>, int>(
in_place, new int(42), ExpectingDeleter{1337});
*ptr = 1337;
}
TEST(Expected, EmptyConstruct) {
Expected<int, int> ex{unexpected, 42};
EXPECT_FALSE(bool(ex));
Expected<int, int> test1(ex);
EXPECT_FALSE(bool(test1));
Expected<int, int> test2(std::move(ex));
EXPECT_FALSE(bool(test2));
EXPECT_EQ(42, test2.error());
}
TEST(Expected, Unique) {
Expected<unique_ptr<int>, int> ex;
ex = makeUnexpected(-1);
EXPECT_FALSE(bool(ex));
// empty->emplaced
ex.emplace(new int(5));
EXPECT_TRUE(bool(ex));
EXPECT_EQ(5, **ex);
ex = makeUnexpected(-1);
// empty->moved
ex = std::make_unique<int>(6);
EXPECT_EQ(6, **ex);
// full->moved
ex = std::make_unique<int>(7);
EXPECT_EQ(7, **ex);
// move it out by move construct
Expected<unique_ptr<int>, int> moved(std::move(ex));
EXPECT_TRUE(bool(moved));
EXPECT_TRUE(bool(ex));
EXPECT_EQ(nullptr, ex->get());
EXPECT_EQ(7, **moved);
EXPECT_TRUE(bool(moved));
ex = std::move(moved); // move it back by move assign
EXPECT_TRUE(bool(moved));
EXPECT_EQ(nullptr, moved->get());
EXPECT_TRUE(bool(ex));
EXPECT_EQ(7, **ex);
}
TEST(Expected, Shared) {
shared_ptr<int> ptr;
Expected<shared_ptr<int>, int> ex{unexpected, -1};
EXPECT_FALSE(bool(ex));
// empty->emplaced
ex.emplace(new int(5));
EXPECT_TRUE(bool(ex));
ptr = ex.value();
EXPECT_EQ(ptr.get(), ex->get());
EXPECT_EQ(2, ptr.use_count());
ex = makeUnexpected(-1);
EXPECT_EQ(1, ptr.use_count());
// full->copied
ex = ptr;
EXPECT_EQ(2, ptr.use_count());
EXPECT_EQ(ptr.get(), ex->get());
ex = makeUnexpected(-1);
EXPECT_EQ(1, ptr.use_count());
// full->moved
ex = std::move(ptr);
EXPECT_EQ(1, ex->use_count());
EXPECT_EQ(nullptr, ptr.get());
{
EXPECT_EQ(1, ex->use_count());
Expected<shared_ptr<int>, int> copied(ex);
EXPECT_EQ(2, ex->use_count());
Expected<shared_ptr<int>, int> moved(std::move(ex));
EXPECT_EQ(2, moved->use_count());
moved.emplace(new int(6));
EXPECT_EQ(1, moved->use_count());
copied = moved;
EXPECT_EQ(2, moved->use_count());
}
}
TEST(Expected, Order) {
std::vector<Expected<int, E>> vect{
{unexpected, E::E1},
{3},
{1},
{unexpected, E::E1},
{2},
};
std::vector<Expected<int, E>> expected{
{unexpected, E::E1},
{unexpected, E::E1},
{1},
{2},
{3},
};
std::sort(vect.begin(), vect.end());
EXPECT_EQ(vect, expected);
}
TEST(Expected, SwapMethod) {
Expected<std::string, E> a;
Expected<std::string, E> b;
a.swap(b);
EXPECT_FALSE(a.hasValue());
EXPECT_FALSE(b.hasValue());
a = "hello";
EXPECT_TRUE(a.hasValue());
EXPECT_FALSE(b.hasValue());
EXPECT_EQ("hello", a.value());
b.swap(a);
EXPECT_FALSE(a.hasValue());
EXPECT_TRUE(b.hasValue());
EXPECT_EQ("hello", b.value());
a = "bye";
EXPECT_TRUE(a.hasValue());
EXPECT_EQ("bye", a.value());
a.swap(b);
EXPECT_EQ("hello", a.value());
EXPECT_EQ("bye", b.value());
}
TEST(Expected, StdSwapFunction) {
Expected<std::string, E> a;
Expected<std::string, E> b;
std::swap(a, b);
EXPECT_FALSE(a.hasValue());
EXPECT_FALSE(b.hasValue());
a = "greeting";
EXPECT_TRUE(a.hasValue());
EXPECT_FALSE(b.hasValue());
EXPECT_EQ("greeting", a.value());
std::swap(a, b);
EXPECT_FALSE(a.hasValue());
EXPECT_TRUE(b.hasValue());
EXPECT_EQ("greeting", b.value());
a = "goodbye";
EXPECT_TRUE(a.hasValue());
EXPECT_EQ("goodbye", a.value());
std::swap(a, b);
EXPECT_EQ("greeting", a.value());
EXPECT_EQ("goodbye", b.value());
}
TEST(Expected, FollySwapFunction) {
Expected<std::string, E> a;
Expected<std::string, E> b;
folly::swap(a, b);
EXPECT_FALSE(a.hasValue());
EXPECT_FALSE(b.hasValue());
a = "salute";
EXPECT_TRUE(a.hasValue());
EXPECT_FALSE(b.hasValue());
EXPECT_EQ("salute", a.value());
folly::swap(a, b);
EXPECT_FALSE(a.hasValue());
EXPECT_TRUE(b.hasValue());
EXPECT_EQ("salute", b.value());
a = "adieu";
EXPECT_TRUE(a.hasValue());
EXPECT_EQ("adieu", a.value());
folly::swap(a, b);
EXPECT_EQ("salute", a.value());
EXPECT_EQ("adieu", b.value());
}
TEST(Expected, Comparisons) {
Expected<int, E> o_;
Expected<int, E> o1(1);
Expected<int, E> o2(2);
EXPECT_TRUE(o_ <= (o_));
EXPECT_TRUE(o_ == (o_));
EXPECT_TRUE(o_ >= (o_));
EXPECT_TRUE(o1 < o2);
EXPECT_TRUE(o1 <= o2);
EXPECT_TRUE(o1 <= (o1));
EXPECT_TRUE(o1 == (o1));
EXPECT_TRUE(o1 != o2);
EXPECT_TRUE(o1 >= (o1));
EXPECT_TRUE(o2 >= o1);
EXPECT_TRUE(o2 > o1);
EXPECT_FALSE(o2 < o1);
EXPECT_FALSE(o2 <= o1);
EXPECT_FALSE(o2 <= o1);
EXPECT_FALSE(o2 == o1);
EXPECT_FALSE(o1 != (o1));
EXPECT_FALSE(o1 >= o2);
EXPECT_FALSE(o1 >= o2);
EXPECT_FALSE(o1 > o2);
/* folly::Expected explicitly doesn't support comparisons with contained value
EXPECT_TRUE(1 < o2);
EXPECT_TRUE(1 <= o2);
EXPECT_TRUE(1 <= o1);
EXPECT_TRUE(1 == o1);
EXPECT_TRUE(2 != o1);
EXPECT_TRUE(1 >= o1);
EXPECT_TRUE(2 >= o1);
EXPECT_TRUE(2 > o1);
EXPECT_FALSE(o2 < 1);
EXPECT_FALSE(o2 <= 1);
EXPECT_FALSE(o2 <= 1);
EXPECT_FALSE(o2 == 1);
EXPECT_FALSE(o2 != 2);
EXPECT_FALSE(o1 >= 2);
EXPECT_FALSE(o1 >= 2);
EXPECT_FALSE(o1 > 2);
*/
}
TEST(Expected, Conversions) {
Expected<bool, E> mbool;
Expected<short, E> mshort;
Expected<char*, E> mstr;
Expected<int, E> mint;
EXPECT_FALSE((std::is_convertible<Expected<bool, E>&, bool>::value));
EXPECT_FALSE((std::is_convertible<Expected<short, E>&, short>::value));
EXPECT_FALSE((std::is_convertible<Expected<char*, E>&, char*>::value));
EXPECT_FALSE((std::is_convertible<Expected<int, E>&, int>::value));
// intended explicit operator bool, for if (ex).
bool b(mbool);
EXPECT_FALSE(b);
// Truthy tests work and are not ambiguous
if (mbool && mshort && mstr && mint) { // only checks not-empty
if (*mbool && *mshort && *mstr && *mint) { // only checks value
;
}
}
mbool = false;
EXPECT_TRUE(bool(mbool));
EXPECT_FALSE(*mbool);
mbool = true;
EXPECT_TRUE(bool(mbool));
EXPECT_TRUE(*mbool);
mbool = {unexpected, E::E1};
EXPECT_FALSE(bool(mbool));
// No conversion allowed; does not compile
// mbool == false;
}
TEST(Expected, Pointee) {
Expected<int, E> x;
EXPECT_FALSE(get_pointer(x));
x = 1;
EXPECT_TRUE(get_pointer(x));
*get_pointer(x) = 2;
EXPECT_TRUE(*x == 2);
x = {unexpected, E::E1};
EXPECT_FALSE(get_pointer(x));
}
TEST(Expected, MakeOptional) {
// const L-value version
const std::string s("abc");
auto exStr = makeExpected<E>(s);
ASSERT_TRUE(exStr.hasValue());
EXPECT_EQ(*exStr, "abc");
*exStr = "cde";
EXPECT_EQ(s, "abc");
EXPECT_EQ(*exStr, "cde");
// L-value version
std::string s2("abc");
auto exStr2 = makeExpected<E>(s2);
ASSERT_TRUE(exStr2.hasValue());
EXPECT_EQ(*exStr2, "abc");
*exStr2 = "cde";
// it's vital to check that s2 wasn't clobbered
EXPECT_EQ(s2, "abc");
// L-value reference version
std::string& s3(s2);
auto exStr3 = makeExpected<E>(s3);
ASSERT_TRUE(exStr3.hasValue());
EXPECT_EQ(*exStr3, "abc");
*exStr3 = "cde";
EXPECT_EQ(s3, "abc");
// R-value ref version
unique_ptr<int> pInt(new int(3));
auto exIntPtr = makeExpected<E>(std::move(pInt));
EXPECT_TRUE(pInt.get() == nullptr);
ASSERT_TRUE(exIntPtr.hasValue());
EXPECT_EQ(**exIntPtr, 3);
}
#if __CLANG_PREREQ(3, 6)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wself-move"
#endif
TEST(Expected, SelfAssignment) {
Expected<std::string, E> a = "42";
a = a;
ASSERT_TRUE(a.hasValue() && a.value() == "42");
Expected<std::string, E> b = "23333333";
b = std::move(b);
ASSERT_TRUE(b.hasValue() && b.value() == "23333333");
}
#if __CLANG_PREREQ(3, 6)
#pragma clang diagnostic pop
#endif
class ContainsExpected {
public:
ContainsExpected() {}
explicit ContainsExpected(int x) : ex_(x) {}
bool hasValue() const {
return ex_.hasValue();
}
int value() const {
return ex_.value();
}
ContainsExpected(const ContainsExpected& other) = default;
ContainsExpected& operator=(const ContainsExpected& other) = default;
ContainsExpected(ContainsExpected&& other) = default;
ContainsExpected& operator=(ContainsExpected&& other) = default;
private:
Expected<int, E> ex_;
};
/**
* Test that a class containing an Expected can be copy and move assigned.
* This was broken under gcc 4.7 until assignment operators were explicitly
* defined.
*/
TEST(Expected, AssignmentContained) {
{
ContainsExpected source(5), target;
target = source;
EXPECT_TRUE(target.hasValue());
EXPECT_EQ(5, target.value());
}
{
ContainsExpected source(5), target;
target = std::move(source);
EXPECT_TRUE(target.hasValue());
EXPECT_EQ(5, target.value());
EXPECT_TRUE(source.hasValue());
}
{
ContainsExpected ex_uninit, target(10);
target = ex_uninit;
EXPECT_FALSE(target.hasValue());
}
}
TEST(Expected, Exceptions) {
Expected<int, E> empty;
EXPECT_THROW(empty.value(), Unexpected<E>::BadExpectedAccess);
}
struct ThrowingBadness {
ThrowingBadness() noexcept(false);
ThrowingBadness(const ThrowingBadness&) noexcept(false);
ThrowingBadness(ThrowingBadness&&) noexcept(false);
ThrowingBadness& operator=(const ThrowingBadness&) noexcept(false);
ThrowingBadness& operator=(ThrowingBadness&&) noexcept(false);
};
TEST(Expected, NoThrowDefaultConstructible) {
EXPECT_TRUE(
(std::is_nothrow_default_constructible<Expected<bool, E>>::value));
EXPECT_TRUE(
(std::is_nothrow_default_constructible<Expected<std::string, E>>::value));
EXPECT_TRUE((std::is_nothrow_default_constructible<
Expected<ThrowingBadness, E>>::value));
EXPECT_FALSE((std::is_nothrow_default_constructible<
Expected<int, ThrowingBadness>>::value));
}
TEST(Expected, NoThrowMoveConstructible) {
EXPECT_TRUE((std::is_nothrow_move_constructible<Expected<bool, E>>::value));
EXPECT_TRUE((std::is_nothrow_move_constructible<
Expected<std::unique_ptr<int>, E>>::value));
EXPECT_FALSE((
std::is_nothrow_move_constructible<Expected<ThrowingBadness, E>>::value));
}
TEST(Expected, NoThrowMoveAssignable) {
EXPECT_TRUE((std::is_nothrow_move_assignable<Expected<bool, E>>::value));
EXPECT_TRUE((std::is_nothrow_move_assignable<
Expected<std::unique_ptr<int>, E>>::value));
EXPECT_FALSE(
(std::is_nothrow_move_assignable<Expected<ThrowingBadness, E>>::value));
}
struct NoSelfAssign {
NoSelfAssign() = default;
NoSelfAssign(NoSelfAssign&&) = default;
NoSelfAssign(const NoSelfAssign&) = default;
NoSelfAssign& operator=(NoSelfAssign&& that) {
EXPECT_NE(this, &that);
return *this;
}
NoSelfAssign& operator=(const NoSelfAssign& that) {
EXPECT_NE(this, &that);
return *this;
}
};
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Wself-move"
#endif
TEST(Expected, NoSelfAssign) {
folly::Expected<NoSelfAssign, int> e{NoSelfAssign{}};
e = e; // @nolint
e = std::move(e); // @nolint
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
struct NoDestructor {};
struct WithDestructor {
~WithDestructor();
};
TEST(Expected, TriviallyDestructible) {
// These could all be static_asserts but EXPECT_* give much nicer output on
// failure.
EXPECT_TRUE(
(std::is_trivially_destructible<Expected<NoDestructor, E>>::value));
EXPECT_TRUE((std::is_trivially_destructible<Expected<int, E>>::value));
EXPECT_FALSE(
(std::is_trivially_destructible<Expected<WithDestructor, E>>::value));
}
struct NoConstructor {};
struct WithConstructor {
WithConstructor();
};
// libstdc++ with GCC 4.x doesn't have std::is_trivially_copyable
#if (defined(__clang__) && !defined(_LIBCPP_VERSION)) || \
!(defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 5)
TEST(Expected, TriviallyCopyable) {
// These could all be static_asserts but EXPECT_* give much nicer output on
// failure.
EXPECT_TRUE((is_trivially_copyable<Expected<int, E>>::value));
EXPECT_TRUE((is_trivially_copyable<Expected<char*, E>>::value));
EXPECT_TRUE((is_trivially_copyable<Expected<NoDestructor, E>>::value));
EXPECT_FALSE((is_trivially_copyable<Expected<WithDestructor, E>>::value));
EXPECT_TRUE((is_trivially_copyable<Expected<NoConstructor, E>>::value));
EXPECT_FALSE((is_trivially_copyable<Expected<std::string, E>>::value));
EXPECT_FALSE((is_trivially_copyable<Expected<int, std::string>>::value));
EXPECT_TRUE((is_trivially_copyable<Expected<WithConstructor, E>>::value));
EXPECT_TRUE((is_trivially_copyable<Expected<Expected<int, E>, E>>::value));
}
#endif
TEST(Expected, Then) {
// Lifting
{
Expected<int, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int> p) { return *p; });
EXPECT_TRUE(bool(ex));
EXPECT_EQ(42, *ex);
}
// Flattening
{
Expected<int, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int> p) { return makeExpected<E>(*p); });
EXPECT_TRUE(bool(ex));
EXPECT_EQ(42, *ex);
}
// Void
{
Expected<Unit, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int>) {});
EXPECT_TRUE(bool(ex));
}
// Non-flattening (different error codes)
{
Expected<Expected<int, int>, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int> p) { return makeExpected<int>(*p); });
EXPECT_TRUE(bool(ex));
EXPECT_TRUE(bool(*ex));
EXPECT_EQ(42, **ex);
}
{
// Error case:
Expected<int, E> ex =
Expected<std::unique_ptr<int>, E>{unexpected, E::E1}.then(
[](std::unique_ptr<int> p) -> int {
ADD_FAILURE();
return *p;
});
EXPECT_FALSE(bool(ex));
EXPECT_EQ(E::E1, ex.error());
}
// Chaining
{
Expected<std::string, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int> p) { return makeExpected<E>(*p); },
[](int i) { return i == 42 ? "yes" : "no"; });
EXPECT_TRUE(bool(ex));
EXPECT_EQ("yes", *ex);
}
// Chaining with errors
{
Expected<std::string, E> ex =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.then(
[](std::unique_ptr<int>) {
return Expected<int, E>(unexpected, E::E1);
},
[](int i) { return i == 42 ? "yes" : "no"; });
EXPECT_FALSE(bool(ex));
EXPECT_EQ(E::E1, ex.error());
}
}
TEST(Expected, ThenOrThrow) {
{
int e =
Expected<std::unique_ptr<int>, E>{in_place, new int(42)}.thenOrThrow(
[](std::unique_ptr<int> p) { return *p; });
EXPECT_EQ(42, e);
}
{
EXPECT_THROW(
(Expected<std::unique_ptr<int>, E>{unexpected, E::E1}.thenOrThrow(
[](std::unique_ptr<int> p) { return *p; })),
Unexpected<E>::BadExpectedAccess);
}
{
EXPECT_THROW(
(Expected<std::unique_ptr<int>, E>{unexpected, E::E1}.thenOrThrow(
[](std::unique_ptr<int> p) { return *p; },
[](E) { return std::runtime_error(""); })),
std::runtime_error);
}
{
EXPECT_THROW(
(Expected<std::unique_ptr<int>, E>{unexpected, E::E1}.thenOrThrow(
[](std::unique_ptr<int> p) { return *p; },
[](E) { throw std::runtime_error(""); })),
std::runtime_error);
}
{
EXPECT_THROW(
(Expected<std::unique_ptr<int>, E>{unexpected, E::E1}.thenOrThrow(
[](std::unique_ptr<int> p) { return *p; }, [](E) {})),
Unexpected<E>::BadExpectedAccess);
}
}
} // namespace folly
|
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "fldserver/base/sql/sql_features.h"
namespace sql
{
namespace features
{
// Enable WAL mode for all SQLite databases.
const base::Feature kEnableWALModeByDefault{"EnableWALModeByDefault",
base::FEATURE_DISABLED_BY_DEFAULT};
} // namespace features
} // namespace sql
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/chromeos/file_system_provider/mount_path_util.h"
#include <memory>
#include <string>
#include "base/files/file.h"
#include "base/memory/ptr_util.h"
#include "chrome/browser/chromeos/file_system_provider/fake_extension_provider.h"
#include "chrome/browser/chromeos/file_system_provider/fake_provided_file_system.h"
#include "chrome/browser/chromeos/file_system_provider/icon_set.h"
#include "chrome/browser/chromeos/file_system_provider/provided_file_system_interface.h"
#include "chrome/browser/chromeos/file_system_provider/service.h"
#include "chrome/browser/chromeos/file_system_provider/service_factory.h"
#include "chrome/browser/chromeos/login/users/fake_chrome_user_manager.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/common/extensions/api/file_system_provider_capabilities/file_system_provider_capabilities_handler.h"
#include "chrome/test/base/testing_browser_process.h"
#include "chrome/test/base/testing_profile.h"
#include "chrome/test/base/testing_profile_manager.h"
#include "components/keyed_service/core/keyed_service.h"
#include "components/user_manager/scoped_user_manager.h"
#include "content/public/browser/browser_context.h"
#include "content/public/test/browser_task_environment.h"
#include "extensions/browser/extension_registry.h"
#include "storage/browser/file_system/external_mount_points.h"
#include "storage/browser/file_system/isolated_context.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace chromeos {
namespace file_system_provider {
namespace util {
namespace {
const char kExtensionId[] = "mbflcebpggnecokmikipoihdbecnjfoj";
const char kFileSystemId[] = "File/System/Id";
const char kDisplayName[] = "Camera Pictures";
const ProviderId kProviderId = ProviderId::CreateFromExtensionId(kExtensionId);
const ProviderId kNativeProviderId = ProviderId::CreateFromNativeId("native");
// Creates a FileSystemURL for tests.
storage::FileSystemURL CreateFileSystemURL(
Profile* profile,
const ProvidedFileSystemInfo& file_system_info,
const base::FilePath& file_path) {
const std::string origin = std::string("chrome-extension://") + kExtensionId;
const base::FilePath mount_path = file_system_info.mount_path();
const storage::ExternalMountPoints* const mount_points =
storage::ExternalMountPoints::GetSystemInstance();
DCHECK(mount_points);
DCHECK(file_path.IsAbsolute());
base::FilePath relative_path(file_path.value().substr(1));
return mount_points->CreateCrackedFileSystemURL(
url::Origin::Create(GURL(origin)), storage::kFileSystemTypeExternal,
base::FilePath(mount_path.BaseName().Append(relative_path)));
}
} // namespace
class FileSystemProviderMountPathUtilTest : public testing::Test {
protected:
FileSystemProviderMountPathUtilTest() {}
~FileSystemProviderMountPathUtilTest() override {}
void SetUp() override {
profile_manager_.reset(
new TestingProfileManager(TestingBrowserProcess::GetGlobal()));
ASSERT_TRUE(profile_manager_->SetUp());
profile_ = profile_manager_->CreateTestingProfile("testing-profile");
user_manager_ = new FakeChromeUserManager();
user_manager_enabler_ = std::make_unique<user_manager::ScopedUserManager>(
base::WrapUnique(user_manager_));
user_manager_->AddUser(
AccountId::FromUserEmail(profile_->GetProfileUserName()));
file_system_provider_service_ = Service::Get(profile_);
file_system_provider_service_->RegisterProvider(
FakeExtensionProvider::Create(kExtensionId));
}
content::BrowserTaskEnvironment task_environment_;
std::unique_ptr<TestingProfileManager> profile_manager_;
TestingProfile* profile_; // Owned by TestingProfileManager.
std::unique_ptr<user_manager::ScopedUserManager> user_manager_enabler_;
FakeChromeUserManager* user_manager_;
Service* file_system_provider_service_; // Owned by its factory.
};
TEST_F(FileSystemProviderMountPathUtilTest, GetMountPath) {
const base::FilePath extension_result =
GetMountPath(profile_, kProviderId, kFileSystemId);
const std::string extension_expected =
"/provided/mbflcebpggnecokmikipoihdbecnjfoj:"
"File%2FSystem%2FId:testing-profile-hash";
EXPECT_EQ(extension_expected, extension_result.AsUTF8Unsafe());
const base::FilePath native_result =
GetMountPath(profile_, kNativeProviderId, kFileSystemId);
const std::string native_expected =
"/provided/@native:"
"File%2FSystem%2FId:testing-profile-hash";
EXPECT_EQ(native_expected, native_result.AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, IsFileSystemProviderLocalPath) {
const base::FilePath mount_path =
GetMountPath(profile_, kProviderId, kFileSystemId);
const base::FilePath file_path =
base::FilePath(FILE_PATH_LITERAL("/hello/world.txt"));
const base::FilePath local_file_path =
mount_path.Append(base::FilePath(file_path.value().substr(1)));
EXPECT_TRUE(IsFileSystemProviderLocalPath(mount_path));
EXPECT_TRUE(IsFileSystemProviderLocalPath(local_file_path));
EXPECT_FALSE(IsFileSystemProviderLocalPath(
base::FilePath(FILE_PATH_LITERAL("provided/hello-world/test.txt"))));
EXPECT_FALSE(IsFileSystemProviderLocalPath(
base::FilePath(FILE_PATH_LITERAL("/provided"))));
EXPECT_FALSE(
IsFileSystemProviderLocalPath(base::FilePath(FILE_PATH_LITERAL("/"))));
EXPECT_FALSE(IsFileSystemProviderLocalPath(base::FilePath()));
}
TEST_F(FileSystemProviderMountPathUtilTest, Parser) {
const base::File::Error result =
file_system_provider_service_->MountFileSystem(
kProviderId, MountOptions(kFileSystemId, kDisplayName));
ASSERT_EQ(base::File::FILE_OK, result);
const ProvidedFileSystemInfo file_system_info =
file_system_provider_service_
->GetProvidedFileSystem(kProviderId, kFileSystemId)
->GetFileSystemInfo();
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("/hello/world.txt"));
const storage::FileSystemURL url =
CreateFileSystemURL(profile_, file_system_info, kFilePath);
EXPECT_TRUE(url.is_valid());
FileSystemURLParser parser(url);
EXPECT_TRUE(parser.Parse());
ProvidedFileSystemInterface* file_system = parser.file_system();
ASSERT_TRUE(file_system);
EXPECT_EQ(kFileSystemId, file_system->GetFileSystemInfo().file_system_id());
EXPECT_EQ(kFilePath.AsUTF8Unsafe(), parser.file_path().AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, Parser_RootPath) {
const base::File::Error result =
file_system_provider_service_->MountFileSystem(
kProviderId, MountOptions(kFileSystemId, kDisplayName));
ASSERT_EQ(base::File::FILE_OK, result);
const ProvidedFileSystemInfo file_system_info =
file_system_provider_service_
->GetProvidedFileSystem(kProviderId, kFileSystemId)
->GetFileSystemInfo();
const base::FilePath kFilePath = base::FilePath(FILE_PATH_LITERAL("/"));
const storage::FileSystemURL url =
CreateFileSystemURL(profile_, file_system_info, kFilePath);
EXPECT_TRUE(url.is_valid());
FileSystemURLParser parser(url);
EXPECT_TRUE(parser.Parse());
ProvidedFileSystemInterface* file_system = parser.file_system();
ASSERT_TRUE(file_system);
EXPECT_EQ(kFileSystemId, file_system->GetFileSystemInfo().file_system_id());
EXPECT_EQ(kFilePath.AsUTF8Unsafe(), parser.file_path().AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, Parser_WrongUrl) {
const ProvidedFileSystemInfo file_system_info(
kProviderId, MountOptions(kFileSystemId, kDisplayName),
GetMountPath(profile_, kProviderId, kFileSystemId),
false /* configurable */, true /* watchable */, extensions::SOURCE_FILE,
IconSet());
const base::FilePath kFilePath = base::FilePath(FILE_PATH_LITERAL("/hello"));
const storage::FileSystemURL url =
CreateFileSystemURL(profile_, file_system_info, kFilePath);
// It is impossible to create a cracked URL for a mount point which doesn't
// exist, therefore is will always be invalid, and empty.
EXPECT_FALSE(url.is_valid());
FileSystemURLParser parser(url);
EXPECT_FALSE(parser.Parse());
}
TEST_F(FileSystemProviderMountPathUtilTest, Parser_IsolatedURL) {
const base::File::Error result =
file_system_provider_service_->MountFileSystem(
kProviderId, MountOptions(kFileSystemId, kDisplayName));
ASSERT_EQ(base::File::FILE_OK, result);
const ProvidedFileSystemInfo file_system_info =
file_system_provider_service_
->GetProvidedFileSystem(kProviderId, kFileSystemId)
->GetFileSystemInfo();
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("/hello/world.txt"));
const storage::FileSystemURL url =
CreateFileSystemURL(profile_, file_system_info, kFilePath);
EXPECT_TRUE(url.is_valid());
// Create an isolated URL for the original one.
storage::IsolatedContext* const isolated_context =
storage::IsolatedContext::GetInstance();
const storage::IsolatedContext::ScopedFSHandle isolated_file_system =
isolated_context->RegisterFileSystemForPath(
storage::kFileSystemTypeProvided, url.filesystem_id(), url.path(),
NULL);
const base::FilePath isolated_virtual_path =
isolated_context->CreateVirtualRootPath(isolated_file_system.id())
.Append(kFilePath.BaseName().value());
const storage::FileSystemURL isolated_url =
isolated_context->CreateCrackedFileSystemURL(
url.origin(),
storage::kFileSystemTypeIsolated,
isolated_virtual_path);
EXPECT_TRUE(isolated_url.is_valid());
FileSystemURLParser parser(isolated_url);
EXPECT_TRUE(parser.Parse());
ProvidedFileSystemInterface* file_system = parser.file_system();
ASSERT_TRUE(file_system);
EXPECT_EQ(kFileSystemId, file_system->GetFileSystemInfo().file_system_id());
EXPECT_EQ(kFilePath.AsUTF8Unsafe(), parser.file_path().AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, LocalPathParser) {
const base::File::Error result =
file_system_provider_service_->MountFileSystem(
kProviderId, MountOptions(kFileSystemId, kDisplayName));
ASSERT_EQ(base::File::FILE_OK, result);
const ProvidedFileSystemInfo file_system_info =
file_system_provider_service_
->GetProvidedFileSystem(kProviderId, kFileSystemId)
->GetFileSystemInfo();
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("/hello/world.txt"));
const base::FilePath kLocalFilePath = file_system_info.mount_path().Append(
base::FilePath(kFilePath.value().substr(1)));
LOG(ERROR) << kLocalFilePath.value();
LocalPathParser parser(profile_, kLocalFilePath);
EXPECT_TRUE(parser.Parse());
ProvidedFileSystemInterface* file_system = parser.file_system();
ASSERT_TRUE(file_system);
EXPECT_EQ(kFileSystemId, file_system->GetFileSystemInfo().file_system_id());
EXPECT_EQ(kFilePath.AsUTF8Unsafe(), parser.file_path().AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, LocalPathParser_RootPath) {
const base::File::Error result =
file_system_provider_service_->MountFileSystem(
kProviderId, MountOptions(kFileSystemId, kDisplayName));
ASSERT_EQ(base::File::FILE_OK, result);
const ProvidedFileSystemInfo file_system_info =
file_system_provider_service_
->GetProvidedFileSystem(kProviderId, kFileSystemId)
->GetFileSystemInfo();
const base::FilePath kFilePath = base::FilePath(FILE_PATH_LITERAL("/"));
const base::FilePath kLocalFilePath = file_system_info.mount_path();
LocalPathParser parser(profile_, kLocalFilePath);
EXPECT_TRUE(parser.Parse());
ProvidedFileSystemInterface* file_system = parser.file_system();
ASSERT_TRUE(file_system);
EXPECT_EQ(kFileSystemId, file_system->GetFileSystemInfo().file_system_id());
EXPECT_EQ(kFilePath.AsUTF8Unsafe(), parser.file_path().AsUTF8Unsafe());
}
TEST_F(FileSystemProviderMountPathUtilTest, LocalPathParser_WrongPath) {
{
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("/hello"));
LocalPathParser parser(profile_, kFilePath);
EXPECT_FALSE(parser.Parse());
}
{
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("/provided"));
LocalPathParser parser(profile_, kFilePath);
EXPECT_FALSE(parser.Parse());
}
{
const base::FilePath kFilePath =
base::FilePath(FILE_PATH_LITERAL("provided/hello/world"));
LocalPathParser parser(profile_, kFilePath);
EXPECT_FALSE(parser.Parse());
}
}
} // namespace util
} // namespace file_system_provider
} // namespace chromeos
|
/*!
@file
@author Shin'ichiro Nakaoka
*/
#include "SceneGraph.h"
#include "SceneNodeClassRegistry.h"
#include "CloneMap.h"
#include "Exception.h"
#include "UTF8.h"
#include <cnoid/stdx/filesystem>
#include <fmt/format.h>
#include <unordered_map>
#include <typeindex>
#include <mutex>
using namespace std;
using namespace cnoid;
using fmt::format;
namespace filesystem = stdx::filesystem;
namespace {
// Id to access the correspondingCloneMap flag
CloneMap::FlagId DisableNonNodeCloning("SgObjectDisableNonNodeCloning");
CloneMap::FlagId DisableMetaSceneCloning("SgObjectDisableMetaSceneCloning");
const BoundingBox emptyBoundingBox;
}
SgObject::SgObject()
{
attributes_ = 0;
hasValidBoundingBoxCache_ = false;
}
SgObject::SgObject(const SgObject& org)
: attributes_(org.attributes_),
hasValidBoundingBoxCache_(false),
name_(org.name_)
{
if(org.uriInfo){
uriInfo.reset(new UriInfo(*org.uriInfo));
}
}
Referenced* SgObject::doClone(CloneMap*) const
{
return new SgObject(*this);
}
void SgObject::setNonNodeCloning(CloneMap& cloneMap, bool on)
{
cloneMap.setFlag(DisableNonNodeCloning, !on);
}
bool SgObject::checkNonNodeCloning(const CloneMap& cloneMap)
{
return !cloneMap.flag(DisableNonNodeCloning);
}
void SgObject::setMetaSceneCloning(CloneMap& cloneMap, bool on)
{
cloneMap.setFlag(DisableMetaSceneCloning, !on);
}
bool SgObject::checkMetaSceneCloning(const CloneMap& cloneMap)
{
return !cloneMap.flag(DisableMetaSceneCloning);
}
int SgObject::numChildObjects() const
{
return 0;
}
SgObject* SgObject::childObject(int /* index */)
{
return nullptr;
}
void SgObject::notifyUpperNodesOfUpdate(SgUpdate& update)
{
notifyUpperNodesOfUpdate(update, update.hasAction(SgUpdate::GeometryModified));
}
void SgObject::notifyUpperNodesOfUpdate(SgUpdate& update, bool doInvalidateBoundingBox)
{
update.pushNode(this);
if(doInvalidateBoundingBox){
invalidateBoundingBox();
}
sigUpdated_(update);
for(const_parentIter p = parents.begin(); p != parents.end(); ++p){
(*p)->notifyUpperNodesOfUpdate(update, doInvalidateBoundingBox);
}
update.popNode();
}
void SgObject::addParent(SgObject* parent, SgUpdateRef update)
{
parents.insert(parent);
if(update){
update->clearPath();
update->pushNode(this);
parent->notifyUpperNodesOfUpdate(
update->withAction(SgUpdate::Added), hasAttribute(Geometry));
}
if(parents.size() == 1){
sigGraphConnection_(true);
}
}
void SgObject::removeParent(SgObject* parent)
{
parents.erase(parent);
if(parents.empty()){
sigGraphConnection_(false);
}
}
const std::string& SgObject::uri() const
{
if(!uriInfo){
uriInfo.reset(new UriInfo);
}
return uriInfo->uri;
}
const std::string& SgObject::absoluteUri() const
{
if(!uriInfo){
uriInfo.reset(new UriInfo);
}
return uriInfo->absoluteUri;
}
const std::string& SgObject::uriFragment() const
{
if(!uriInfo){
uriInfo.reset(new UriInfo);
}
return uriInfo->fragment;
}
void SgObject::setUriByFilePathAndBaseDirectory
(const std::string& filePath, const std::string& baseDirectory)
{
filesystem::path path(fromUTF8(filePath));
if(path.is_relative()){
filesystem::path baseDirPath(fromUTF8(baseDirectory));
if(baseDirPath.is_relative()){
baseDirPath = filesystem::current_path() / baseDirPath;
}
path = baseDirPath / path;
}
setUri(filePath, format("file://{0}", toUTF8(path.generic_string())));
}
void SgObject::setUriByFilePathAndCurrentDirectory(const std::string& filePath)
{
filesystem::path path(fromUTF8(filePath));
if(path.is_relative()){
path = filesystem::current_path() / path;
}
setUri(filePath, format("file://{0}", toUTF8(path.generic_string())));
}
void SgObject::setUri(const std::string& uri, const std::string& absoluteUri)
{
if(!uriInfo){
uriInfo.reset(new UriInfo);
}
uriInfo->uri = uri;
if(absoluteUri.compare(0, 7, "file://") == 0){
uriInfo->absoluteUri = absoluteUri;
} else {
uriInfo->absoluteUri = format("file://{0}", absoluteUri);
}
}
void SgObject::setUriFragment(const std::string& fragment)
{
if(!uriInfo){
uriInfo.reset(new UriInfo);
}
uriInfo->fragment = fragment;
}
int SgNode::findSuperClassId(int classId)
{
return SceneNodeClassRegistry::instance().getSuperClassId(classId);
}
int SgNode::findClassId(const std::type_info& nodeType)
{
return SceneNodeClassRegistry::instance().getClassId(nodeType);
}
int SgNode::registerNodeType(const std::type_info& nodeType, const std::type_info& superType)
{
return SceneNodeClassRegistry::instance().registerClassAsTypeInfo(nodeType, superType);
}
SgNode::SgNode()
{
setAttribute(Node);
classId_ = findClassId<SgNode>();
decorationRefCounter = 0;
}
SgNode::SgNode(int classId)
: classId_(classId)
{
setAttribute(Node);
decorationRefCounter = 0;
}
SgNode::SgNode(const SgNode& org)
: SgObject(org),
classId_(org.classId_)
{
decorationRefCounter = 0;
}
SgNode::~SgNode()
{
}
Referenced* SgNode::doClone(CloneMap*) const
{
return new SgNode(*this);
}
std::string SgNode::className() const
{
return SceneNodeClassRegistry::instance().getClassName(classId_);
}
const BoundingBox& SgNode::boundingBox() const
{
return emptyBoundingBox;
}
const BoundingBox& SgNode::untransformedBoundingBox() const
{
return boundingBox();
}
/**
\note The current implementation of this function does not seem to return the correct T value
*/
static bool findNodeSub(SgNode* node, const std::string& name, SgNodePath& path, Affine3 T, Affine3& out_T)
{
path.push_back(node);
if(auto group = dynamic_cast<SgGroup*>(node)){
if(auto transform = dynamic_cast<SgTransform*>(group)){
Affine3 T0;
transform->getTransform(T0);
T = T * T0;
}
if(node->name() == name){
out_T = T;
return true;
}
for(auto& child : *group){
if(findNodeSub(child, name, path, T, out_T)){
return true;
}
}
} else {
if(node->name() == name){
out_T = T;
return true;
}
}
path.pop_back();
return false;
}
SgNodePath SgNode::findNode(const std::string& name, Affine3& out_T)
{
SgNodePath path;
out_T.setIdentity();
findNodeSub(this, name, path, out_T, out_T);
return path;
}
SgGroup::SgGroup()
: SgNode(findClassId<SgGroup>())
{
setAttribute(GroupNode);
}
SgGroup::SgGroup(int classId)
: SgNode(classId)
{
setAttribute(GroupNode);
}
SgGroup::SgGroup(const SgGroup& org, CloneMap* cloneMap)
: SgNode(org)
{
children.reserve(org.numChildren());
if(cloneMap){
// deep copy
if(checkMetaSceneCloning(*cloneMap)){
for(auto& child : org){
addChild(cloneMap->getClone<SgNode>(child));
}
} else {
for(auto& child : org){
if(!child->hasAttribute(MetaScene)){
addChild(cloneMap->getClone<SgNode>(child));
}
}
}
} else {
// shallow copy
/**
\todo Stop the shallow copy of the child nodes.
Only the attributes of this node should be copied when the clone map is not used.
*/
for(auto& child : org){
addChild(child);
}
}
if(org.hasValidBoundingBoxCache()){
bboxCache = org.bboxCache;
setBoundingBoxCacheReady();
}
}
SgGroup::~SgGroup()
{
for(const_iterator p = begin(); p != end(); ++p){
(*p)->removeParent(this);
}
}
Referenced* SgGroup::doClone(CloneMap* cloneMap) const
{
return new SgGroup(*this, cloneMap);
}
int SgGroup::numChildObjects() const
{
return children.size();
}
SgObject* SgGroup::childObject(int index)
{
return children[index].get();
}
const BoundingBox& SgGroup::boundingBox() const
{
if(hasValidBoundingBoxCache()){
return bboxCache;
}
bboxCache.clear();
for(const_iterator p = begin(); p != end(); ++p){
auto& node = *p;
if(!node->hasAttribute(Marker)){
bboxCache.expandBy(node->boundingBox());
}
}
setBoundingBoxCacheReady();
return bboxCache;
}
bool SgGroup::contains(SgNode* node) const
{
for(const_iterator p = begin(); p != end(); ++p){
if((*p) == node){
return true;
}
}
return false;
}
int SgGroup::findChildIndex(SgNode* child) const
{
for(size_t i=0; i < children.size(); ++i){
if(children[i] == child){
return i;
}
}
return -1;
}
void SgGroup::addChild(SgNode* node, SgUpdateRef update)
{
if(node){
children.push_back(node);
node->addParent(this, update);
}
}
bool SgGroup::addChildOnce(SgNode* node, SgUpdateRef update)
{
if(!contains(node)){
addChild(node, update);
return true;
}
return false;
}
void SgGroup::insertChild(int index, SgNode* node, SgUpdateRef update)
{
if(node){
if(index > static_cast<int>(children.size())){
index = children.size();
}
children.insert(children.begin() + index, node);
node->addParent(this, update);
}
}
void SgGroup::insertChild(SgNode* nextNode, SgNode* node, SgUpdateRef update)
{
int index = findChildIndex(nextNode);
if(index >= 0){
insertChild(index, node, update);
} else {
insertChild(0, node, update);
}
}
void SgGroup::setSingleChild(SgNode* node, SgUpdateRef update)
{
int n = numChildren();
if(n > 0){
bool found = false;
for(int i = n - 1; i >= 0; --i){
if(child(i) == node && !found){
found = true;
continue;
}
removeChildAt(i, update);
}
if(!empty()){
return;
}
}
addChild(node, update);
}
SgGroup::iterator SgGroup::removeChild(iterator childIter, SgUpdateRef update)
{
iterator next;
SgNode* child = *childIter;
child->removeParent(this);
if(!update){
next = children.erase(childIter);
} else {
SgNodePtr childHolder = child;
next = children.erase(childIter);
update->clearPath();
update->pushNode(child);
notifyUpperNodesOfUpdate(
update->withAction(SgUpdate::Removed), child->hasAttribute(Geometry));
}
return next;
}
bool SgGroup::removeChild(SgNode* node, SgUpdateRef update)
{
bool removed = false;
if(node){
iterator p = children.begin();
while(p != children.end()){
if((*p) == node){
p = removeChild(p, update);
removed = true;
} else {
++p;
}
}
}
return removed;
}
void SgGroup::removeChildAt(int index, SgUpdateRef update)
{
removeChild(children.begin() + index, update);
}
void SgGroup::clearChildren(SgUpdateRef update)
{
iterator p = children.begin();
while(p != children.end()){
p = removeChild(p, update);
}
}
void SgGroup::copyChildrenTo(SgGroup* group, SgUpdateRef update)
{
for(size_t i=0; i < children.size(); ++i){
group->addChild(child(i), update);
}
}
void SgGroup::moveChildrenTo(SgGroup* group, SgUpdateRef update)
{
const int destTop = group->children.size();
for(size_t i=0; i < children.size(); ++i){
group->addChild(child(i));
}
clearChildren(update);
if(update){
update->setAction(SgUpdate::Added);
for(int i=destTop; i < group->numChildren(); ++i){
update->clearPath();
group->child(i)->notifyUpdate(*update);
}
}
}
void SgGroup::insertChainedGroup(SgGroup* group, SgUpdateRef update)
{
moveChildrenTo(group);
addChild(group, update);
}
SgGroup* SgGroup::nextChainedGroup()
{
SgGroup* nextGroup = nullptr;
if(children.size() == 1){
nextGroup = dynamic_cast<SgGroup*>(children.front().get());
}
return nextGroup;
}
void SgGroup::removeChainedGroup(SgGroup* group, SgUpdateRef update)
{
SgGroup* parent = this;
auto next = nextChainedGroup();
while(next){
if(next == group){
parent->removeChild(group);
group->moveChildrenTo(parent);
if(update){
update->clearPath();
update->pushNode(group);
notifyUpperNodesOfUpdate(
update->withAction(SgUpdate::Removed), group->hasAttribute(Geometry));
}
break;
}
next = next->nextChainedGroup();
}
}
void SgGroup::throwTypeMismatchError()
{
throw type_mismatch_error();
}
SgInvariantGroup::SgInvariantGroup()
: SgGroup(findClassId<SgInvariantGroup>())
{
}
SgInvariantGroup::SgInvariantGroup(const SgInvariantGroup& org, CloneMap* cloneMap)
: SgGroup(org, cloneMap)
{
}
Referenced* SgInvariantGroup::doClone(CloneMap* cloneMap) const
{
return new SgInvariantGroup(*this, cloneMap);
}
SgTransform::SgTransform(int classId)
: SgGroup(classId)
{
setAttributes(TransformNode | Geometry);
}
SgTransform::SgTransform(const SgTransform& org, CloneMap* cloneMap)
: SgGroup(org, cloneMap)
{
untransformedBboxCache = org.untransformedBboxCache;
}
const BoundingBox& SgTransform::untransformedBoundingBox() const
{
if(!hasValidBoundingBoxCache()){
boundingBox();
}
return untransformedBboxCache;
}
SgPosTransform::SgPosTransform(int classId)
: SgTransform(classId),
T_(Isometry3::Identity())
{
}
SgPosTransform::SgPosTransform()
: SgPosTransform(findClassId<SgPosTransform>())
{
}
SgPosTransform::SgPosTransform(const Isometry3& T)
: SgTransform(findClassId<SgPosTransform>()),
T_(T)
{
}
SgPosTransform::SgPosTransform(const Affine3& T)
: SgTransform(findClassId<SgPosTransform>()),
T_(T.matrix())
{
}
SgPosTransform::SgPosTransform(const SgPosTransform& org, CloneMap* cloneMap)
: SgTransform(org, cloneMap),
T_(org.T_)
{
}
Referenced* SgPosTransform::doClone(CloneMap* cloneMap) const
{
return new SgPosTransform(*this, cloneMap);
}
const BoundingBox& SgPosTransform::boundingBox() const
{
if(hasValidBoundingBoxCache()){
return bboxCache;
}
bboxCache.clear();
for(const_iterator p = begin(); p != end(); ++p){
auto& node = *p;
if(!node->hasAttribute(Marker)){
bboxCache.expandBy(node->boundingBox());
}
}
untransformedBboxCache = bboxCache;
bboxCache.transform(T_);
setBoundingBoxCacheReady();
return bboxCache;
}
void SgPosTransform::getTransform(Affine3& out_T) const
{
out_T = T_;
}
SgScaleTransform::SgScaleTransform(int classId)
: SgTransform(classId)
{
scale_.setOnes();
}
SgScaleTransform::SgScaleTransform()
: SgScaleTransform(findClassId<SgScaleTransform>())
{
}
SgScaleTransform::SgScaleTransform(double scale)
: SgTransform(findClassId<SgScaleTransform>()),
scale_(scale, scale, scale)
{
}
SgScaleTransform::SgScaleTransform(const Vector3& scale)
: SgTransform(findClassId<SgScaleTransform>()),
scale_(scale)
{
}
SgScaleTransform::SgScaleTransform(const SgScaleTransform& org, CloneMap* cloneMap)
: SgTransform(org, cloneMap),
scale_(org.scale_)
{
}
Referenced* SgScaleTransform::doClone(CloneMap* cloneMap) const
{
return new SgScaleTransform(*this, cloneMap);
}
const BoundingBox& SgScaleTransform::boundingBox() const
{
if(hasValidBoundingBoxCache()){
return bboxCache;
}
bboxCache.clear();
for(const_iterator p = begin(); p != end(); ++p){
auto& node = *p;
if(!node->hasAttribute(Marker)){
bboxCache.expandBy(node->boundingBox());
}
}
untransformedBboxCache = bboxCache;
bboxCache.transform(Affine3(scale_.asDiagonal()));
setBoundingBoxCacheReady();
return bboxCache;
}
void SgScaleTransform::getTransform(Affine3& out_T) const
{
out_T = scale_.asDiagonal();
}
SgAffineTransform::SgAffineTransform(int classId)
: SgTransform(classId),
T_(Affine3::Identity())
{
}
SgAffineTransform::SgAffineTransform()
: SgAffineTransform(findClassId<SgAffineTransform>())
{
}
SgAffineTransform::SgAffineTransform(const Affine3& T)
: SgTransform(findClassId<SgAffineTransform>()),
T_(T)
{
}
SgAffineTransform::SgAffineTransform(const SgAffineTransform& org, CloneMap* cloneMap)
: SgTransform(org, cloneMap),
T_(org.T_)
{
}
Referenced* SgAffineTransform::doClone(CloneMap* cloneMap) const
{
return new SgAffineTransform(*this, cloneMap);
}
const BoundingBox& SgAffineTransform::boundingBox() const
{
if(hasValidBoundingBoxCache()){
return bboxCache;
}
bboxCache.clear();
for(const_iterator p = begin(); p != end(); ++p){
auto& node = *p;
if(!node->hasAttribute(Marker)){
bboxCache.expandBy((*p)->boundingBox());
}
}
untransformedBboxCache = bboxCache;
bboxCache.transform(T_);
setBoundingBoxCacheReady();
return bboxCache;
}
void SgAffineTransform::getTransform(Affine3& out_T) const
{
out_T = T_;
}
SgFixedPixelSizeGroup::SgFixedPixelSizeGroup()
: SgFixedPixelSizeGroup(1.0)
{
}
SgFixedPixelSizeGroup::SgFixedPixelSizeGroup(double pixelSizeRatio)
: SgGroup(findClassId<SgFixedPixelSizeGroup>())
{
pixelSizeRatio_ = pixelSizeRatio;
}
SgFixedPixelSizeGroup::SgFixedPixelSizeGroup(int classId)
: SgGroup(classId)
{
pixelSizeRatio_ = 1.0;
}
SgFixedPixelSizeGroup::SgFixedPixelSizeGroup(const SgFixedPixelSizeGroup& org, CloneMap* cloneMap)
: SgGroup(org, cloneMap)
{
pixelSizeRatio_ = org.pixelSizeRatio_;
}
Referenced* SgFixedPixelSizeGroup::doClone(CloneMap* cloneMap) const
{
return new SgFixedPixelSizeGroup(*this, cloneMap);
}
SgSwitch::SgSwitch(bool on)
{
isTurnedOn_ = on;
}
SgSwitch::SgSwitch(const SgSwitch& org)
: SgObject(org)
{
isTurnedOn_ = org.isTurnedOn_;
}
Referenced* SgSwitch::doClone(CloneMap* cloneMap) const
{
return new SgSwitch(*this);
}
void SgSwitch::setTurnedOn(bool on, SgUpdateRef update)
{
if(on != isTurnedOn_){
isTurnedOn_ = on;
if(update){
notifyUpdate(update->withAction(SgUpdate::Modified));
}
}
}
SgSwitchableGroup::SgSwitchableGroup()
: SgGroup(findClassId<SgSwitchableGroup>())
{
isTurnedOn_ = true;
}
SgSwitchableGroup::SgSwitchableGroup(SgSwitch* switchObject)
: SgSwitchableGroup()
{
setSwitch(switchObject);
}
SgSwitchableGroup::SgSwitchableGroup(const SgSwitchableGroup& org, CloneMap* cloneMap)
: SgGroup(org, cloneMap)
{
if(org.switchObject){
if(cloneMap){
switchObject = cloneMap->getClone<SgSwitch>(org.switchObject);
} else {
switchObject = org.switchObject;
}
}
isTurnedOn_ = org.isTurnedOn_;
}
SgSwitchableGroup::~SgSwitchableGroup()
{
if(switchObject){
switchObject->removeParent(this);
}
}
void SgSwitchableGroup::setSwitch(SgSwitch* newSwitchObject)
{
if(switchObject){
switchObject->removeParent(this);
}
switchObject = newSwitchObject;
if(newSwitchObject){
newSwitchObject->addParent(this);
}
}
Referenced* SgSwitchableGroup::doClone(CloneMap* cloneMap) const
{
return new SgSwitchableGroup(*this, cloneMap);
}
void SgSwitchableGroup::setTurnedOn(bool on, SgUpdateRef update)
{
if(switchObject){
switchObject->setTurnedOn(on, update);
} else if(on != isTurnedOn_){
isTurnedOn_ = on;
if(update){
notifyUpdate(update->withAction(SgUpdate::Modified));
}
}
}
SgUnpickableGroup::SgUnpickableGroup()
: SgGroup(findClassId<SgUnpickableGroup>())
{
}
SgUnpickableGroup::SgUnpickableGroup(const SgUnpickableGroup& org, CloneMap* cloneMap)
: SgGroup(org, cloneMap)
{
}
Referenced* SgUnpickableGroup::doClone(CloneMap* cloneMap) const
{
return new SgUnpickableGroup(*this, cloneMap);
}
SgPreprocessed::SgPreprocessed(int classId)
: SgNode(classId)
{
}
SgPreprocessed::SgPreprocessed(const SgPreprocessed& org)
: SgNode(org)
{
}
namespace {
struct NodeClassRegistration {
NodeClassRegistration() {
SceneNodeClassRegistry::instance()
.registerClass<SgNode>("SgNode")
.registerClass<SgGroup, SgNode>("SgGroup")
.registerClass<SgInvariantGroup, SgGroup>("SgInvariantGroup")
.registerClass<SgTransform, SgGroup>("SgTransform")
.registerClass<SgAffineTransform, SgTransform>("SgAffineTransform")
.registerClass<SgPosTransform, SgTransform>("SgPosTransform")
.registerClass<SgScaleTransform, SgTransform>("SgScaleTransform")
.registerClass<SgFixedPixelSizeGroup, SgGroup>("SgFixedPixelSizeGroup")
.registerClass<SgSwitchableGroup, SgGroup>("SgSwitchableGroup")
.registerClass<SgUnpickableGroup, SgGroup>("SgUnpickableGroup")
.registerClass<SgPreprocessed, SgNode>("SgPreprocessed");
}
} registration;
}
|
#ifndef SOUNDDRIVER_HH
#define SOUNDDRIVER_HH
#include <cstdint>
namespace openmsx {
class SoundDriver
{
public:
virtual ~SoundDriver() = default;
/** Mute the sound system
*/
virtual void mute() = 0;
/** Unmute the sound system
*/
virtual void unmute() = 0;
/** Returns the actual sample frequency. This might be different
* from the requested frequency ('frequency' setting).
*/
virtual unsigned getFrequency() const = 0;
/** Get the number of samples that should be created 'per fragment'.
* This is not the same value as the 'samples setting'.
*/
virtual unsigned getSamples() const = 0;
virtual void uploadBuffer(int16_t* buffer, unsigned len) = 0;
protected:
SoundDriver() = default;
};
} // namespace openmsx
#endif
|
#include<bits/stdc++.h>
#include"../include/input.h"
using namespace std;
vector<int> adj[100];
int n;
bool flag=0;
vector<bool> visited;
vector<int> tin, low;
int timer;
void dfs(vector<int>adj[], int n, int v, int p = -1) {
visited[v] = true;
tin[v] = low[v] = timer++;
int children=0;
for (int to=0;to< adj[v].size();to++) {
if (adj[v][to] == p) continue;
if (visited[adj[v][to]]) {
low[v] = min(low[v], tin[adj[v][to]]);
} else {
dfs(adj,n,adj[v][to], v);
low[v] = min(low[v], low[adj[v][to]]);
if (low[adj[v][to]] >= tin[v] && p!=-1)
cout<<v<<" ";
++children;
}
}
if(p == -1 && children > 1)
cout<<v<<" ";
}
void find_cutpoints(vector<int>adj[],int n) {
timer = 0;
visited.assign(n, false);
tin.assign(n, -1);
low.assign(n, -1);
for (int i = 0; i < n; ++i) {
if (!visited[i])
dfs (adj,n,i);
}
}
int main()
{
get_graph(adj,n,flag);//flag for undirected or directed
cout<<"OUTPUT: "<<endl;
find_cutpoints(adj,n);
return 0;
}
|
/*
* BigInt Assignment Operators
* (C) 1999-2007 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
#include <botan/bigint.h>
#include <botan/internal/mp_core.h>
#include <botan/internal/bit_ops.h>
#include <algorithm>
namespace Botan {
/*
* Addition Operator
*/
BigInt& BigInt::operator+=(const BigInt& y)
{
const size_t x_sw = sig_words(), y_sw = y.sig_words();
const size_t reg_size = std::max(x_sw, y_sw) + 1;
grow_to(reg_size);
if(sign() == y.sign())
bigint_add2(get_reg(), reg_size - 1, y.data(), y_sw);
else
{
s32bit relative_size = bigint_cmp(data(), x_sw, y.data(), y_sw);
if(relative_size < 0)
{
SecureVector<word> z(reg_size - 1);
bigint_sub3(z, y.data(), reg_size - 1, data(), x_sw);
copy_mem(®[0], &z[0], z.size());
set_sign(y.sign());
}
else if(relative_size == 0)
{
zeroise(reg);
set_sign(Positive);
}
else if(relative_size > 0)
bigint_sub2(get_reg(), x_sw, y.data(), y_sw);
}
return (*this);
}
/*
* Subtraction Operator
*/
BigInt& BigInt::operator-=(const BigInt& y)
{
const size_t x_sw = sig_words(), y_sw = y.sig_words();
s32bit relative_size = bigint_cmp(data(), x_sw, y.data(), y_sw);
const size_t reg_size = std::max(x_sw, y_sw) + 1;
grow_to(reg_size);
if(relative_size < 0)
{
if(sign() == y.sign())
bigint_sub2_rev(get_reg(), y.data(), y_sw);
else
bigint_add2(get_reg(), reg_size - 1, y.data(), y_sw);
set_sign(y.reverse_sign());
}
else if(relative_size == 0)
{
if(sign() == y.sign())
{
clear();
set_sign(Positive);
}
else
bigint_shl1(get_reg(), x_sw, 0, 1);
}
else if(relative_size > 0)
{
if(sign() == y.sign())
bigint_sub2(get_reg(), x_sw, y.data(), y_sw);
else
bigint_add2(get_reg(), reg_size - 1, y.data(), y_sw);
}
return (*this);
}
/*
* Multiplication Operator
*/
BigInt& BigInt::operator*=(const BigInt& y)
{
const size_t x_sw = sig_words(), y_sw = y.sig_words();
set_sign((sign() == y.sign()) ? Positive : Negative);
if(x_sw == 0 || y_sw == 0)
{
clear();
set_sign(Positive);
}
else if(x_sw == 1 && y_sw)
{
grow_to(y_sw + 2);
bigint_linmul3(get_reg(), y.data(), y_sw, word_at(0));
}
else if(y_sw == 1 && x_sw)
{
grow_to(x_sw + 2);
bigint_linmul2(get_reg(), x_sw, y.word_at(0));
}
else
{
grow_to(size() + y.size());
SecureVector<word> z(data(), x_sw);
SecureVector<word> workspace(size());
bigint_mul(get_reg(), size(), workspace,
z, z.size(), x_sw,
y.data(), y.size(), y_sw);
}
return (*this);
}
/*
* Division Operator
*/
BigInt& BigInt::operator/=(const BigInt& y)
{
if(y.sig_words() == 1 && power_of_2(y.word_at(0)))
(*this) >>= (y.bits() - 1);
else
(*this) = (*this) / y;
return (*this);
}
/*
* Modulo Operator
*/
BigInt& BigInt::operator%=(const BigInt& mod)
{
return (*this = (*this) % mod);
}
/*
* Modulo Operator
*/
word BigInt::operator%=(word mod)
{
if(mod == 0)
throw BigInt::DivideByZero();
if(power_of_2(mod))
{
word result = (word_at(0) & (mod - 1));
clear();
grow_to(2);
get_reg()[0] = result;
return result;
}
word remainder = 0;
for(size_t j = sig_words(); j > 0; --j)
remainder = bigint_modop(remainder, word_at(j-1), mod);
clear();
grow_to(2);
if(remainder && sign() == BigInt::Negative)
get_reg()[0] = mod - remainder;
else
get_reg()[0] = remainder;
set_sign(BigInt::Positive);
return word_at(0);
}
/*
* Left Shift Operator
*/
BigInt& BigInt::operator<<=(size_t shift)
{
if(shift)
{
const size_t shift_words = shift / MP_WORD_BITS,
shift_bits = shift % MP_WORD_BITS,
words = sig_words();
grow_to(words + shift_words + (shift_bits ? 1 : 0));
bigint_shl1(get_reg(), words, shift_words, shift_bits);
}
return (*this);
}
/*
* Right Shift Operator
*/
BigInt& BigInt::operator>>=(size_t shift)
{
if(shift)
{
const size_t shift_words = shift / MP_WORD_BITS,
shift_bits = shift % MP_WORD_BITS;
bigint_shr1(get_reg(), sig_words(), shift_words, shift_bits);
if(is_zero())
set_sign(Positive);
}
return (*this);
}
}
|
// Tencent is pleased to support the open source community by making sluaunreal available.
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the BSD 3-Clause License (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4018)
#endif
#include "LuaVar.h"
#include "UObject/UObjectGlobals.h"
#include "UObject/Class.h"
#include "UObject/UnrealType.h"
#include "UObject/Stack.h"
#include "Blueprint/WidgetTree.h"
#include "LuaState.h"
namespace NS_SLUA {
const int INVALID_INDEX = -1;
LuaVar::LuaVar()
:stateIndex(INVALID_INDEX)
{
vars = nullptr;
numOfVar = 0;
}
LuaVar::LuaVar(lua_Integer v)
:LuaVar()
{
set(v);
}
LuaVar::LuaVar(int v)
:LuaVar()
{
set((lua_Integer)v);
}
LuaVar::LuaVar(size_t v)
:LuaVar()
{
set((lua_Integer)v);
}
LuaVar::LuaVar(lua_Number v)
:LuaVar()
{
set(v);
}
LuaVar::LuaVar(bool v)
:LuaVar()
{
set(v);
}
LuaVar::LuaVar(const char* v)
:LuaVar()
{
set(v,strlen(v));
}
LuaVar::LuaVar(const char* v,size_t len)
: LuaVar()
{
set(v, len);
}
LuaVar::LuaVar(lua_State* l,int p):LuaVar() {
set(l,p);
}
void LuaVar::set(lua_State* l,int p) {
free();
int t = lua_type(l,p);
LuaVar::Type type = LV_NIL;
switch(t) {
case LUA_TNUMBER:
{
if(lua_isinteger(l,p))
type = LV_INT;
else
type = LV_NUMBER;
}
break;
case LUA_TSTRING:
type = LV_STRING;
break;
case LUA_TFUNCTION:
type = LV_FUNCTION;
break;
case LUA_TTABLE:
type = LV_TABLE;
break;
case LUA_TUSERDATA:
type = LV_USERDATA;
break;
case LUA_TLIGHTUSERDATA:
type = LV_LIGHTUD;
break;
case LUA_TBOOLEAN:
type = LV_BOOL;
break;
case LUA_TNIL:
default:
type = LV_NIL;
break;
}
init(l,p,type);
}
LuaVar::LuaVar(lua_State* l,int p,LuaVar::Type type):LuaVar() {
init(l,p,type);
}
// used to create number n of tuple
// it used for return value from lua
// don't call it to create n element of tuple
LuaVar::LuaVar(lua_State* l,size_t n):LuaVar() {
init(l,n,LV_TUPLE);
}
lua_State* LuaVar::getState() const
{
auto ls = LuaState::get(stateIndex);
return ls ? ls->getLuaState() : nullptr;
}
void LuaVar::init(lua_State* l,int p,LuaVar::Type type) {
auto state = LuaState::get(l);
stateIndex = state->stateIndex();
switch(type) {
case LV_NIL:
break;
case LV_INT:
set(lua_tointeger(l,p));
break;
case LV_NUMBER:
set(lua_tonumber(l,p));
break;
case LV_STRING: {
size_t len;
const char* buf = lua_tolstring(l, p, &len);
set(buf,len);
break;
}
case LV_BOOL:
set(!!lua_toboolean(l,p));
break;
case LV_LIGHTUD:
alloc(1);
vars[0].ptr = lua_touserdata(l,p);
vars[0].luatype = type;
break;
case LV_FUNCTION:
case LV_TABLE:
case LV_USERDATA:
alloc(1);
lua_pushvalue(l,p);
vars[0].ref = new RefRef(l);
vars[0].luatype=type;
break;
case LV_TUPLE:
ensure(p>0 && lua_gettop(l)>=p);
initTuple(l,p);
break;
default:
break;
}
}
void LuaVar::initTuple(lua_State* l,size_t n) {
ensure(lua_gettop(l)>=n);
alloc(n);
int f = lua_gettop(l)-n+1;
for(size_t i=0;i<n;i++) {
int p = i+f;
int t = lua_type(l,p);
switch(t) {
case LUA_TBOOLEAN:
vars[i].luatype = LV_BOOL;
vars[i].b = !!lua_toboolean(l, p);
break;
case LUA_TNUMBER:
{
if(lua_isinteger(l,p)) {
vars[i].luatype = LV_INT;
vars[i].i = lua_tointeger(l,p);
}
else {
vars[i].luatype = LV_NUMBER;
vars[i].d = lua_tonumber(l,p);
}
}
break;
case LUA_TSTRING: {
vars[i].luatype = LV_STRING;
size_t len;
const char* buf = lua_tolstring(l, p, &len);
vars[i].s = new RefStr(buf,len);
break;
}
case LUA_TFUNCTION:
vars[i].luatype = LV_FUNCTION;
lua_pushvalue(l,p);
vars[i].ref = new RefRef(l);
break;
case LUA_TTABLE:
vars[i].luatype = LV_TABLE;
lua_pushvalue(l,p);
vars[i].ref = new RefRef(l);
break;
case LUA_TUSERDATA:
vars[i].luatype = LV_USERDATA;
lua_pushvalue(l, p);
vars[i].ref = new RefRef(l);
break;
case LUA_TLIGHTUSERDATA:
vars[i].luatype = LV_LIGHTUD;
vars[i].ptr = lua_touserdata(l, p);
break;
case LUA_TNIL:
default:
vars[i].luatype = LV_NIL;
break;
}
}
}
LuaVar::~LuaVar() {
free();
}
LuaVar::RefRef::RefRef(lua_State* l)
:LuaVar::Ref()
{
ref=luaL_ref(l,LUA_REGISTRYINDEX);
stateIndex = LuaState::get(l)->stateIndex();
}
LuaVar::RefRef::~RefRef() {
if(LuaState::isValid(stateIndex)) {
auto state = LuaState::get(stateIndex);
luaL_unref(state->getLuaState(),LUA_REGISTRYINDEX,ref);
}
}
void LuaVar::free() {
for(size_t n=0;n<numOfVar;n++) {
if( (vars[n].luatype==LV_FUNCTION || vars[n].luatype==LV_TABLE || vars[n].luatype == LV_USERDATA)
&& vars[n].ref->isValid() )
vars[n].ref->release();
else if(vars[n].luatype==LV_STRING)
vars[n].s->release();
}
numOfVar = 0;
delete[] vars;
vars = nullptr;
}
void LuaVar::alloc(int n) {
if(n>0) {
vars = new lua_var[n];
numOfVar = n;
}
}
bool LuaVar::next(LuaVar& key,LuaVar& value) {
if(!isTable())
return false;
auto L = getState();
push(L);
key.push(L);
if(lua_next(L,-2)!=0) {
key.set(L,-2);
value.set(L,-1);
lua_pop(L,3);
return true;
}
else {
key.free();
value.free();
lua_pop(L,1);
return false;
}
}
const char* LuaVar::toString() {
auto L = getState();
push(L);
const char* ret;
ret = luaL_tolstring(L,-1,NULL);
lua_pop(L,2);
return ret;
}
size_t LuaVar::count() const {
if(isTable()) {
auto L = getState();
push(L);
size_t n = lua_rawlen(L,-1);
lua_pop(L,1);
return n;
}
return numOfVar;
}
int LuaVar::asInt() const {
ensure(numOfVar==1);
switch(vars[0].luatype) {
case LV_INT:
return vars[0].i;
case LV_NUMBER:
return vars[0].d;
default:
return -1;
}
}
int64 LuaVar::asInt64() const {
ensure(numOfVar==1);
switch(vars[0].luatype) {
case LV_INT:
return vars[0].i;
case LV_NUMBER:
return vars[0].d;
default:
return -1;
}
}
float LuaVar::asFloat() const {
ensure(numOfVar==1);
switch(vars[0].luatype) {
case LV_INT:
return vars[0].i;
case LV_NUMBER:
return vars[0].d;
default:
return NAN;
}
}
double LuaVar::asDouble() const {
ensure(numOfVar==1);
switch(vars[0].luatype) {
case LV_INT:
return vars[0].i;
case LV_NUMBER:
return vars[0].d;
default:
return NAN;
}
}
const char* LuaVar::asString(size_t* outlen) const {
ensure(numOfVar==1 && vars[0].luatype==LV_STRING);
if(outlen) *outlen = vars[0].s->length;
return vars[0].s->buf;
}
LuaLString LuaVar::asLString() const
{
ensure(numOfVar == 1 && vars[0].luatype == LV_STRING);
return { vars[0].s->buf,vars[0].s->length };
}
bool LuaVar::asBool() const {
ensure(numOfVar==1 && vars[0].luatype==LV_BOOL);
return vars[0].b;
}
void* LuaVar::asLightUD() const {
ensure(numOfVar==1 && vars[0].luatype==LV_LIGHTUD);
return vars[0].ptr;
}
LuaVar LuaVar::getAt(size_t index) const {
auto L = getState();
if(isTable()) {
push(L); // push this table
lua_geti(L,-1,index); // get by index
LuaVar r(L,-1); // construct LuaVar
lua_pop(L,2); // pop table and value
return r;
}
else {
ensure(index>0);
ensure(numOfVar>=index);
LuaVar r;
r.alloc(1);
r.stateIndex = this->stateIndex;
varClone(r.vars[0],vars[index-1]);
return r;
}
}
void LuaVar::set(lua_Integer v) {
free();
alloc(1);
vars[0].i = v;
vars[0].luatype = LV_INT;
}
void LuaVar::set(int v) {
free();
alloc(1);
vars[0].i = v;
vars[0].luatype = LV_INT;
}
void LuaVar::set(lua_Number v) {
free();
alloc(1);
vars[0].d = v;
vars[0].luatype = LV_NUMBER;
}
void LuaVar::set(const char* v,size_t len) {
free();
alloc(1);
vars[0].s = new RefStr(v,len);
vars[0].luatype = LV_STRING;
}
void LuaVar::set(const LuaLString & lstr)
{
set(lstr.buf, lstr.len);
}
void LuaVar::set(bool b) {
free();
alloc(1);
vars[0].b = b;
vars[0].luatype = LV_BOOL;
}
void LuaVar::pushVar(lua_State* l,const lua_var& ov) const {
switch(ov.luatype) {
case LV_INT:
lua_pushinteger(l,ov.i);
break;
case LV_NUMBER:
lua_pushnumber(l,ov.d);
break;
case LV_BOOL:
lua_pushboolean(l,ov.b);
break;
case LV_STRING:
lua_pushlstring(l,ov.s->buf,ov.s->length);
break;
case LV_FUNCTION:
case LV_TABLE:
case LV_USERDATA:
ov.ref->push(l);
break;
case LV_LIGHTUD:
lua_pushlightuserdata(l,ov.ptr);
break;
default:
lua_pushnil(l);
break;
}
}
int LuaVar::push(lua_State* l) const {
if(l==nullptr) l=getState();
if(l==nullptr) return 0;
if(vars==nullptr || numOfVar==0) {
lua_pushnil(l);
return 1;
}
if(numOfVar==1) {
const lua_var& ov = vars[0];
pushVar(l,ov);
return 1;
}
for(size_t n=0;n<numOfVar;n++) {
const lua_var& ov = vars[n];
pushVar(l,ov);
}
return numOfVar;
}
bool LuaVar::isValid() const {
return numOfVar>0 && stateIndex>0 && LuaState::isValid(stateIndex);
}
bool LuaVar::isNil() const {
return vars==nullptr || numOfVar==0;
}
bool LuaVar::isFunction() const {
return numOfVar==1 && vars[0].luatype==LV_FUNCTION;
}
bool LuaVar::isTuple() const {
return numOfVar>1;
}
bool LuaVar::isTable() const {
return numOfVar==1 && vars[0].luatype==LV_TABLE;
}
bool LuaVar::isInt() const {
return numOfVar==1 && vars[0].luatype==LV_INT;
}
bool LuaVar::isNumber() const {
return numOfVar==1 && vars[0].luatype==LV_NUMBER;
}
bool LuaVar::isBool() const {
return numOfVar==1 && vars[0].luatype==LV_BOOL;
}
bool LuaVar::isUserdata(const char* t) const {
if(numOfVar==1 && vars[0].luatype==LV_USERDATA) {
auto L = getState();
push(L);
void* p = luaL_testudata(L, -1, t);
lua_pop(L,1);
return p!=nullptr;
}
return false;
}
bool LuaVar::isLightUserdata() const {
return numOfVar==1 && vars[0].luatype==LV_LIGHTUD;
}
bool LuaVar::isString() const {
return numOfVar==1 && vars[0].luatype==LV_STRING;
}
LuaVar::Type LuaVar::type() const {
if(numOfVar==0)
return LV_NIL;
else if(numOfVar==1)
return vars[0].luatype;
else
return LV_TUPLE;
}
int LuaVar::docall(int argn) const {
if(!isValid()) {
Log::Error("State of lua function is invalid");
return 0;
}
auto L = getState();
int top = lua_gettop(L);
top=top-argn+1;
LuaState::pushErrorHandler(L);
lua_insert(L,top);
vars[0].ref->push(L);
{
LuaScriptCallGuard g(L);
lua_insert(L, top + 1);
// top is err handler
if (lua_pcallk(L, argn, LUA_MULTRET, top, NULL, NULL))
lua_pop(L, 1);
lua_remove(L, top); // remove err handler;
}
return lua_gettop(L)-top+1;
}
int LuaVar::pushArgByParms(UProperty* prop,uint8* parms) {
auto L = getState();
if (LuaObject::push(L,prop,parms,false))
return prop->ElementSize;
return 0;
}
bool LuaVar::callByUFunction(UFunction* func,uint8* parms, LuaVar* pSelf, FOutParmRec* OutParms) {
if(!func) return false;
if(!isValid()) {
Log::Error("State of lua function is invalid");
return false;
}
const bool bHasReturnParam = func->ReturnValueOffset != MAX_uint16;
if(func->ParmsSize==0 && !bHasReturnParam) {
int nArg = 0;
if (pSelf) {
pSelf->push();
nArg++;
}
auto L = getState();
int n = docall(nArg);
lua_pop(L, n);
return true;
}
// push self if valid
int n=0;
if (pSelf) {
pSelf->push();
n++;
}
// push arguments to lua state
for(TFieldIterator<UProperty> it(func);it && (it->PropertyFlags&CPF_Parm);++it) {
UProperty* prop = *it;
uint64 propflag = prop->GetPropertyFlags();
if((propflag&CPF_ReturnParm) || IsRealOutParam(propflag))
continue;
pushArgByParms(prop,parms+prop->GetOffset_ForInternal());
n++;
}
auto L = getState();
int retCount = docall(n);
int remain = retCount;
// if lua return value
// we only handle first lua return value
if(remain >0 && bHasReturnParam) {
auto prop = func->GetReturnProperty();
auto checkder = prop?LuaObject::getChecker(prop):nullptr;
if (checkder) {
(*checkder)(L, prop, parms+prop->GetOffset_ForInternal(), lua_absindex(L, -remain));
}
remain--;
}
// fill lua return value to blueprint stack if argument is out param
for (TFieldIterator<UProperty> it(func); remain >0 && it && (it->PropertyFlags&CPF_Parm); ++it) {
UProperty* prop = *it;
uint64 propflag = prop->GetPropertyFlags();
if (IsRealOutParam(propflag))
{
auto checkder = prop ? LuaObject::getChecker(prop) : nullptr;
uint8* outPamams = OutParms ? OutParms->PropAddr : parms + prop->GetOffset_ForInternal();
if (checkder) {
(*checkder)(L, prop, outPamams, lua_absindex(L, -remain));
}
if(OutParms) OutParms = OutParms->NextOutParm;
remain--;
}
}
// pop returned value
lua_pop(L, retCount);
return true;
}
// clone luavar
void LuaVar::varClone(lua_var& tv,const lua_var& ov) const {
switch(ov.luatype) {
case LV_INT:
tv.i = ov.i;
break;
case LV_BOOL:
tv.b = ov.b;
break;
case LV_NUMBER:
tv.d = ov.d;
break;
case LV_STRING:
tv.s = ov.s;
tv.s->addRef();
break;
case LV_FUNCTION:
case LV_TABLE:
case LV_USERDATA:
tv.ref = ov.ref;
tv.ref->addRef();
break;
case LV_LIGHTUD:
tv.ptr = ov.ptr;
break;
// nil and tuple not need to clone
case LV_NIL:
case LV_TUPLE:
break;
}
tv.luatype = ov.luatype;
}
void LuaVar::clone(const LuaVar& other) {
stateIndex = other.stateIndex;
numOfVar = other.numOfVar;
if(numOfVar>0 && other.vars) {
vars = new lua_var[numOfVar];
for(size_t n=0;n<numOfVar;n++) {
varClone( vars[n], other.vars[n] );
}
}
}
void LuaVar::move(LuaVar&& other) {
stateIndex = other.stateIndex;
numOfVar = other.numOfVar;
vars = other.vars;
other.numOfVar = 0;
other.vars = nullptr;
}
bool LuaVar::toProperty(UProperty* p,uint8* ptr) {
auto func = LuaObject::getChecker(p);
if(func) {
// push var's value to top of stack
auto L = getState();
push(L);
(*func)(L,p,ptr,lua_absindex(L,-1));
lua_pop(L,1);
return true;
}
return false;
}
}
#ifdef _WIN32
#pragma warning (pop)
#endif
|
/*
* Automatically Generated from Mathematica.
* Fri 5 Nov 2021 16:10:34 GMT-04:00
*/
#ifndef JS_PLEFTTOEBARBCONT_RIGHTSTANCE_HH
#define JS_PLEFTTOEBARBCONT_RIGHTSTANCE_HH
#ifdef MATLAB_MEX_FILE
// No need for external definitions
#else // MATLAB_MEX_FILE
#include "math2mat.hpp"
#include "mdefs.hpp"
namespace RightStance
{
void Js_pleftToeBarBCont_RightStance_raw(double *p_output1, const double *var1);
inline void Js_pleftToeBarBCont_RightStance(Eigen::MatrixXd &p_output1, const Eigen::VectorXd &var1)
{
// Check
// - Inputs
assert_size_matrix(var1, 1, 1);
// - Outputs
assert_size_matrix(p_output1, 2, 2);
// set zero the matrix
p_output1.setZero();
// Call Subroutine with raw data
Js_pleftToeBarBCont_RightStance_raw(p_output1.data(), var1.data());
}
}
#endif // MATLAB_MEX_FILE
#endif // JS_PLEFTTOEBARBCONT_RIGHTSTANCE_HH
|
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise hyperbolic tangent operation.
class NGRAPH_API Tanh : public util::UnaryElementwiseArithmetic
{
public:
static constexpr NodeTypeInfo type_info{"Tanh", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a hyperbolic tangent operation.
///
/// \param arg Node that produces the input tensor.
Tanh(const Output<Node>& arg);
Tanh() = default;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
};
}
using v0::Tanh;
}
}
|
#include <test/jtx/impl/offer.cpp>
#include <test/jtx/impl/owners.cpp>
#include <test/jtx/impl/paths.cpp>
#include <test/jtx/impl/pay.cpp>
#include <test/jtx/impl/quality2.cpp>
#include <test/jtx/impl/rate.cpp>
#include <test/jtx/impl/regkey.cpp>
#include <test/jtx/impl/sendmax.cpp>
#include <test/jtx/impl/seq.cpp>
#include <test/jtx/impl/sig.cpp>
#include <test/jtx/impl/tag.cpp>
#include <test/jtx/impl/ticket.cpp>
#include <test/jtx/impl/trust.cpp>
#include <test/jtx/impl/txflags.cpp>
#include <test/jtx/impl/utility.cpp>
#include <test/jtx/impl/WSClient.cpp>
|
/*
* If not stated otherwise in this file or this component's LICENSE file the
* following copyright and licenses apply:
*
* Copyright 2020 Metrological
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TestAdministrator.h"
namespace WPEFramework {
namespace TestCore {
/* static */ TestAdministrator& TestAdministrator::Instance()
{
static TestAdministrator _singleton;
return (_singleton);
}
void TestAdministrator::Announce(Exchange::ITestController::ICategory* category)
{
ASSERT(category != nullptr);
_adminLock.Lock();
auto found = _testsCategories.find(category->Name());
ASSERT((found == _testsCategories.end()) && "Category already exists!");
if (found == _testsCategories.end()) {
_testsCategories[category->Name()] = category;
}
_adminLock.Unlock();
}
void TestAdministrator::Revoke(Exchange::ITestController::ICategory* category)
{
ASSERT(category != nullptr);
_adminLock.Lock();
_testsCategories.erase(category->Name());
_adminLock.Unlock();
}
Exchange::ITestController::ICategory::IIterator* TestAdministrator::Categories()
{
_adminLock.Lock();
auto iterator = Core::Service<TestCore::CategoryIterator>::Create<Exchange::ITestController::ICategory::IIterator>(_testsCategories);
_adminLock.Unlock();
return iterator;
}
Exchange::ITestController::ICategory* TestAdministrator::Category(const string& name)
{
Exchange::ITestController::ICategory* result = nullptr;
_adminLock.Lock();
auto found = _testsCategories.find(name);
if (found != _testsCategories.end()) {
result = found->second;
}
_adminLock.Unlock();
return result;
}
} // namespace TestCore
} // namespace WPEFramework
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_OPERATOR_FUNCTIONS_SCALAR_DIVIDES_HPP_INCLUDED
#define BOOST_SIMD_OPERATOR_FUNCTIONS_SCALAR_DIVIDES_HPP_INCLUDED
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable: 4723) // potential divide by 0
#endif
namespace boost { namespace simd { namespace ext
{
BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::divides_, tag::cpu_, (A0)
, (scalar_< fundamental_<A0> >)
(scalar_< fundamental_<A0> >)
)
{
typedef A0 result_type;
BOOST_SIMD_FUNCTOR_CALL_REPEAT(2) { return a0/a1; }
};
} } }
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
#endif
|
/*!
* Copyright (c) 2015 by Contributors
* \file sequence_mask.cc
* \brief
* \author Sebastian Bodenstein
*/
#include "./sequence_mask-inl.h"
namespace mshadow {
template <typename DType>
inline void SequenceMask(const Tensor<cpu, 3, DType> &dst,
const Tensor<cpu, 1, DType> label, DType value) {
for (index_t b = 0; b < dst.size(1); ++b)
for (index_t s = label[b]; s < dst.size(0); ++s)
for (index_t r = 0; r < dst.size(2); ++r)
dst[s][b][r] = value;
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<cpu>(SequenceMaskParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType,
{ op = new SequenceMaskOp<cpu, DType>(param); })
return op;
}
// DO_BIND_DISPATCH comes from operator_common.h
Operator *SequenceMaskProp::CreateOperatorEx(Context ctx,
std::vector<TShape> *in_shape,
std::vector<int> *in_type) const {
std::vector<TShape> out_shape, aux_shape;
std::vector<int> out_type, aux_type;
CHECK(InferType(in_type, &out_type, &aux_type));
CHECK(InferShape(in_shape, &out_shape, &aux_shape));
DO_BIND_DISPATCH(CreateOp, param_, (*in_type)[0]);
}
DMLC_REGISTER_PARAMETER(SequenceMaskParam);
MXNET_REGISTER_OP_PROPERTY(SequenceMask, SequenceMaskProp)
.describe(
"Sets all elements outside the sequence to a constant value. Takes an n-dimensional tensor of the "
"form [max sequence length, batchsize, other dims] and returns a tensor of the same "
"shape. This operator takes an optional input tensor sequence_length of positive ints of "
"dimension [batchsize] when the sequence_length option is set to true. This allows the "
"operator to handle variable-length sequences. If sequence_length is false, then each "
"example in the batch is assumed to have the max sequence length, and this operator becomes "
"the identity operator."
)
.add_argument("data", "Symbol",
"n-dimensional input tensor of the form [max sequence "
"length, batchsize, other dims]")
.add_argument("sequence_length", "Symbol",
"vector of sequence lengths of size batchsize")
.add_arguments(SequenceMaskParam::__FIELDS__());
} // namespace op
} // namespace mxnet
|
#ifndef OTHELLO_HEURISTICPLAYER_HPP
#define OTHELLO_HEURISTICPLAYER_HPP
//Othello headers:
#include <othello/game/IPlayer.hpp>
#include <othello/game/Game.hpp>
#include <othello/ai/MoveEvaluator.hpp>
namespace othello
{
namespace ai
{
////////////////////////////////////////////////////////////////
/// \class HeuristicPlayer
///
/// \brief An AI player that plays moves using a heuristic
/// positional table. It is completely deterministic
///
////////////////////////////////////////////////////////////////
class HeuristicPlayer : public game::IPlayer
{
public:
////////////////////////////////////////////////////////////////
/// \brief Function that is called when the player should make a
/// move. This function uses a positional table to pick a
/// move
///
/// \param game A const reference to the game to make a move in
/// \param player The index of this player in the game. (0 is
/// player 1, 1 is player 2)
/// \param possibleMoves A vector of the possible moves
///
/// \return A const pointer to a const move that will be played
/// by the current player. This pointer must point to a
/// move in possibleMoves
///
////////////////////////////////////////////////////////////////
const game::Move* makeMove(const game::Game& game, const uint8_t& player,
const std::vector<game::Move>& possibleMoves) override
{
std::pair<std::size_t, int64_t> bestMove = {0, INT64_MIN};
//todo maybe worker thread manager
//Iterate over the possible moves
game::Board tmpBoard;
int64_t tmpValue;
for (std::size_t i = 0; i < game.getBoard().getPossibleMoves().size(); ++i)
{
//Copy the board
tmpBoard = game.getBoard();
//Make the move
tmpBoard.makeMove(&tmpBoard.getPossibleMoves()[i]);
//Calculate its value
tmpValue = ai::MoveEvaluator::evaluate(tmpBoard, player);
//If it's better
if (tmpValue > bestMove.second)
{
bestMove.first = i;
bestMove.second = tmpValue;
}
}
return &possibleMoves[bestMove.first];
}
};
}
}
#endif //OTHELLO_HEURISTICPLAYER_HPP
|
bin = 0
p = 0
q = 0
r = 0
x = 0
bin=1
q=0
print("\t\t\tDisplay pascal Triangle",end='')
print("\n\n\nHow Many Row Do you want to input:",end='')
r = int(input())
print("\nPascal's Triangle:\n",end='')
while(q<r):
for p in range (40-3*q,0,-1):
print(" ",end='')
for x in range (0,q+1,1):
if(x==0 or q==0):
bin=1
else :
bin=(bin*(q-x+1))/x
print(" ",end='')
print(int(bin),end='')
print("\n\n\n",end='')
q += 1
input()
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Copyright (c) 2014-2019 The Dash Core developers
// Copyright (c) 2020 The roubzi developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/roubzi-config.h"
#endif
#include "fs.h"
#include "intro.h"
#include "ui_intro.h"
#include "guiutil.h"
#include "util.h"
#include <QFileDialog>
#include <QSettings>
#include <QMessageBox>
#include <cmath>
static const uint64_t GB_BYTES = 1000000000LL;
/* Minimum free space (in GB) needed for data directory */
static const uint64_t BLOCK_CHAIN_SIZE = 1;
/* Minimum free space (in GB) needed for data directory when pruned; Does not include prune target */
static const uint64_t CHAIN_STATE_SIZE = 1;
/* Total required space (in GB) depending on user choice (prune, not prune) */
static uint64_t requiredSpace;
/* Check free space asynchronously to prevent hanging the UI thread.
Up to one request to check a path is in flight to this thread; when the check()
function runs, the current path is requested from the associated Intro object.
The reply is sent back through a signal.
This ensures that no queue of checking requests is built up while the user is
still entering the path, and that always the most recently entered path is checked as
soon as the thread becomes available.
*/
class FreespaceChecker : public QObject
{
Q_OBJECT
public:
FreespaceChecker(Intro *intro);
enum Status {
ST_OK,
ST_ERROR
};
public Q_SLOTS:
void check();
Q_SIGNALS:
void reply(int status, const QString &message, quint64 available);
private:
Intro *intro;
};
#include "intro.moc"
FreespaceChecker::FreespaceChecker(Intro *_intro)
{
this->intro = _intro;
}
void FreespaceChecker::check()
{
QString dataDirStr = intro->getPathToCheck();
fs::path dataDir = GUIUtil::qstringToBoostPath(dataDirStr);
uint64_t freeBytesAvailable = 0;
int replyStatus = ST_OK;
QString replyMessage = tr("A new data directory will be created.");
/* Find first parent that exists, so that fs::space does not fail */
fs::path parentDir = dataDir;
fs::path parentDirOld = fs::path();
while(parentDir.has_parent_path() && !fs::exists(parentDir))
{
parentDir = parentDir.parent_path();
/* Check if we make any progress, break if not to prevent an infinite loop here */
if (parentDirOld == parentDir)
break;
parentDirOld = parentDir;
}
try {
freeBytesAvailable = fs::space(parentDir).available;
if(fs::exists(dataDir))
{
if(fs::is_directory(dataDir))
{
QString separator = "<code>" + QDir::toNativeSeparators("/") + tr("name") + "</code>";
replyStatus = ST_OK;
replyMessage = tr("Directory already exists. Add %1 if you intend to create a new directory here.").arg(separator);
} else {
replyStatus = ST_ERROR;
replyMessage = tr("Path already exists, and is not a directory.");
}
}
} catch (const fs::filesystem_error&)
{
/* Parent directory does not exist or is not accessible */
replyStatus = ST_ERROR;
replyMessage = tr("Cannot create data directory here.");
}
Q_EMIT reply(replyStatus, replyMessage, freeBytesAvailable);
}
Intro::Intro(QWidget *parent) :
QDialog(parent),
ui(new Ui::Intro),
thread(0),
signalled(false)
{
ui->setupUi(this);
ui->welcomeLabel->setText(ui->welcomeLabel->text().arg(tr(PACKAGE_NAME)));
ui->storageLabel->setText(ui->storageLabel->text().arg(tr(PACKAGE_NAME)));
ui->lblExplanation1->setText(ui->lblExplanation1->text()
.arg(tr(PACKAGE_NAME))
.arg(BLOCK_CHAIN_SIZE)
.arg(2021)
.arg("roubzi")
);
ui->lblExplanation2->setText(ui->lblExplanation2->text().arg(tr(PACKAGE_NAME)));
uint64_t pruneTarget = std::max<int64_t>(0, gArgs.GetArg("-prune", 0));
requiredSpace = BLOCK_CHAIN_SIZE;
QString storageRequiresMsg = tr("At least %1 GB of data will be stored in this directory, and it will grow over time.");
if (pruneTarget) {
uint64_t prunedGBs = std::ceil(pruneTarget * 1024 * 1024.0 / GB_BYTES);
if (prunedGBs <= requiredSpace) {
requiredSpace = prunedGBs;
storageRequiresMsg = tr("Approximately %1 GB of data will be stored in this directory.");
}
ui->lblExplanation3->setVisible(true);
} else {
ui->lblExplanation3->setVisible(false);
}
requiredSpace += CHAIN_STATE_SIZE;
ui->sizeWarningLabel->setText(
tr("%1 will download and store a copy of the roubzi block chain.").arg(tr(PACKAGE_NAME)) + " " +
storageRequiresMsg.arg(requiredSpace) + " " +
tr("The wallet will also be stored in this directory.")
);
startThread();
}
Intro::~Intro()
{
delete ui;
/* Ensure thread is finished before it is deleted */
Q_EMIT stopThread();
thread->wait();
}
QString Intro::getDataDirectory()
{
return ui->dataDirectory->text();
}
void Intro::setDataDirectory(const QString &dataDir)
{
ui->dataDirectory->setText(dataDir);
if(dataDir == getDefaultDataDirectory())
{
ui->dataDirDefault->setChecked(true);
ui->dataDirectory->setEnabled(false);
ui->ellipsisButton->setEnabled(false);
} else {
ui->dataDirCustom->setChecked(true);
ui->dataDirectory->setEnabled(true);
ui->ellipsisButton->setEnabled(true);
}
}
QString Intro::getDefaultDataDirectory()
{
return GUIUtil::boostPathToQString(GetDefaultDataDir());
}
bool Intro::pickDataDirectory()
{
QSettings settings;
/* If data directory provided on command line, no need to look at settings
or show a picking dialog */
if(!gArgs.GetArg("-datadir", "").empty())
return true;
/* 1) Default data directory for operating system */
QString dataDirDefaultCurrent = getDefaultDataDirectory();
/* 2) Allow QSettings to override default dir */
QString dataDir = settings.value("strDataDir", dataDirDefaultCurrent).toString();
/* 3) Check to see if default datadir is the one we expect */
QString dataDirDefaultSettings = settings.value("strDataDirDefault").toString();
if(!fs::exists(GUIUtil::qstringToBoostPath(dataDir)) || gArgs.GetBoolArg("-choosedatadir", DEFAULT_CHOOSE_DATADIR) || dataDirDefaultCurrent != dataDirDefaultSettings)
{
/* Let the user choose one */
Intro intro;
intro.setDataDirectory(dataDirDefaultCurrent);
intro.setWindowIcon(QIcon(":icons/bitcoin"));
while(true)
{
if(!intro.exec())
{
/* Cancel clicked */
return false;
}
dataDir = intro.getDataDirectory();
try {
TryCreateDirectories(GUIUtil::qstringToBoostPath(dataDir));
break;
} catch (const fs::filesystem_error&) {
QMessageBox::critical(0, tr(PACKAGE_NAME),
tr("Error: Specified data directory \"%1\" cannot be created.").arg(dataDir));
/* fall through, back to choosing screen */
}
}
settings.setValue("strDataDir", dataDir);
settings.setValue("strDataDirDefault", dataDirDefaultCurrent);
}
/* Only override -datadir if different from the default, to make it possible to
* override -datadir in the roubzi.conf file in the default data directory
* (to be consistent with roubzid behavior)
*/
if(dataDir != dataDirDefaultCurrent)
gArgs.SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); // use OS locale for path setting
return true;
}
void Intro::setStatus(int status, const QString &message, quint64 bytesAvailable)
{
switch(status)
{
case FreespaceChecker::ST_OK:
ui->errorMessage->setText(message);
ui->errorMessage->setStyleSheet("");
break;
case FreespaceChecker::ST_ERROR:
ui->errorMessage->setText(tr("Error") + ": " + message);
ui->errorMessage->setStyleSheet("QLabel { color: #800000 }");
break;
}
/* Indicate number of bytes available */
if(status == FreespaceChecker::ST_ERROR)
{
ui->freeSpace->setText("");
} else {
QString freeString = tr("%1 GB of free space available").arg(bytesAvailable/GB_BYTES);
if(bytesAvailable < requiredSpace * GB_BYTES)
{
freeString += " " + tr("(of %1 GB needed)").arg(requiredSpace);
ui->freeSpace->setStyleSheet("QLabel { color: #800000 }");
} else {
ui->freeSpace->setStyleSheet("");
}
ui->freeSpace->setText(freeString + ".");
}
/* Don't allow confirm in ERROR state */
ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(status != FreespaceChecker::ST_ERROR);
}
void Intro::on_dataDirectory_textChanged(const QString &dataDirStr)
{
/* Disable OK button until check result comes in */
ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(false);
checkPath(dataDirStr);
}
void Intro::on_ellipsisButton_clicked()
{
QString dir = QDir::toNativeSeparators(QFileDialog::getExistingDirectory(0, "Choose data directory", ui->dataDirectory->text()));
if(!dir.isEmpty())
ui->dataDirectory->setText(dir);
}
void Intro::on_dataDirDefault_clicked()
{
setDataDirectory(getDefaultDataDirectory());
}
void Intro::on_dataDirCustom_clicked()
{
ui->dataDirectory->setEnabled(true);
ui->ellipsisButton->setEnabled(true);
}
void Intro::startThread()
{
thread = new QThread(this);
FreespaceChecker *executor = new FreespaceChecker(this);
executor->moveToThread(thread);
connect(executor, SIGNAL(reply(int,QString,quint64)), this, SLOT(setStatus(int,QString,quint64)));
connect(this, SIGNAL(requestCheck()), executor, SLOT(check()));
/* make sure executor object is deleted in its own thread */
connect(this, SIGNAL(stopThread()), executor, SLOT(deleteLater()));
connect(this, SIGNAL(stopThread()), thread, SLOT(quit()));
thread->start();
}
void Intro::checkPath(const QString &dataDir)
{
mutex.lock();
pathToCheck = dataDir;
if(!signalled)
{
signalled = true;
Q_EMIT requestCheck();
}
mutex.unlock();
}
QString Intro::getPathToCheck()
{
QString retval;
mutex.lock();
retval = pathToCheck;
signalled = false; /* new request can be queued now */
mutex.unlock();
return retval;
}
|
#include <betterchain/chain/rate_limiting.hpp>
namespace betterchain { namespace chain {
} } /// betterchain::chain
|
/*
* Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/io/async/AsyncTimeout.h>
#include <folly/io/async/EventBase.h>
#include <gtest/gtest.h>
namespace folly {
TEST(AsyncTimeout, make) {
int value = 0;
int const expected = 10;
EventBase manager;
auto observer = AsyncTimeout::make(
manager,
[&]() noexcept { value = expected; }
);
observer->scheduleTimeout(std::chrono::milliseconds(100));
manager.loop();
EXPECT_EQ(expected, value);
}
TEST(AsyncTimeout, schedule) {
int value = 0;
int const expected = 10;
EventBase manager;
auto observer = AsyncTimeout::schedule(
std::chrono::milliseconds(100),
manager,
[&]() noexcept { value = expected; }
);
manager.loop();
EXPECT_EQ(expected, value);
}
TEST(AsyncTimeout, cancel_make) {
int value = 0;
int const expected = 10;
EventBase manager;
auto observer = AsyncTimeout::make(
manager,
[&]() noexcept { value = expected; }
);
observer->scheduleTimeout(std::chrono::milliseconds(100));
observer->cancelTimeout();
manager.loop();
EXPECT_NE(expected, value);
}
TEST(AsyncTimeout, cancel_schedule) {
int value = 0;
int const expected = 10;
EventBase manager;
auto observer = AsyncTimeout::schedule(
std::chrono::milliseconds(100),
manager,
[&]() noexcept { value = expected; }
);
observer->cancelTimeout();
manager.loop();
EXPECT_NE(expected, value);
}
} // namespace folly {
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "init.h"
#include "util.h"
#include "sync.h"
#include "ui_interface.h"
#include "base58.h"
#include "bitcoinrpc.h"
#include "db.h"
#undef printf
#include <boost/asio.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/bind.hpp>
#include <boost/filesystem.hpp>
#include <boost/foreach.hpp>
#include <boost/iostreams/concepts.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/shared_ptr.hpp>
#include <list>
#define printf OutputDebugStringF
using namespace std;
using namespace boost;
using namespace boost::asio;
using namespace json_spirit;
void ThreadRPCServer2(void* parg);
static std::string strRPCUserColonPass;
const Object emptyobj;
void ThreadRPCServer3(void* parg);
static inline unsigned short GetDefaultRPCPort()
{
return GetBoolArg("-testnet", false) ? 34452 : 10004;//return GetBoolArg("-testnet", false) ? 30802 : 20802;
}
Object JSONRPCError(int code, const string& message)
{
Object error;
error.push_back(Pair("code", code));
error.push_back(Pair("message", message));
return error;
}
void RPCTypeCheck(const Array& params,
const list<Value_type>& typesExpected,
bool fAllowNull)
{
unsigned int i = 0;
BOOST_FOREACH(Value_type t, typesExpected)
{
if (params.size() <= i)
break;
const Value& v = params[i];
if (!((v.type() == t) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s, got %s",
Value_type_name[t], Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
i++;
}
}
void RPCTypeCheck(const Object& o,
const map<string, Value_type>& typesExpected,
bool fAllowNull)
{
BOOST_FOREACH(const PAIRTYPE(string, Value_type)& t, typesExpected)
{
const Value& v = find_value(o, t.first);
if (!fAllowNull && v.type() == null_type)
throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing %s", t.first.c_str()));
if (!((v.type() == t.second) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s for %s, got %s",
Value_type_name[t.second], t.first.c_str(), Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
}
}
int64 AmountFromValue(const Value& value)
{
double dAmount = value.get_real();
if (dAmount <= 0.0 || dAmount > MAX_MONEY)
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
int64 nAmount = roundint64(dAmount * COIN);
if (!MoneyRange(nAmount))
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
return nAmount;
}
Value ValueFromAmount(int64 amount)
{
return (double)amount / (double)COIN;
}
std::string HexBits(unsigned int nBits)
{
union {
int32_t nBits;
char cBits[4];
} uBits;
uBits.nBits = htonl((int32_t)nBits);
return HexStr(BEGIN(uBits.cBits), END(uBits.cBits));
}
///
/// Note: This interface may still be subject to change.
///
string CRPCTable::help(string strCommand) const
{
string strRet;
set<rpcfn_type> setDone;
for (map<string, const CRPCCommand*>::const_iterator mi = mapCommands.begin(); mi != mapCommands.end(); ++mi)
{
const CRPCCommand *pcmd = mi->second;
string strMethod = mi->first;
// We already filter duplicates, but these deprecated screw up the sort order
if (strMethod.find("label") != string::npos)
continue;
if (strCommand != "" && strMethod != strCommand)
continue;
try
{
Array params;
rpcfn_type pfn = pcmd->actor;
if (setDone.insert(pfn).second)
(*pfn)(params, true);
}
catch (std::exception& e)
{
// Help text is returned in an exception
string strHelp = string(e.what());
if (strCommand == "")
if (strHelp.find('\n') != string::npos)
strHelp = strHelp.substr(0, strHelp.find('\n'));
strRet += strHelp + "\n";
}
}
if (strRet == "")
strRet = strprintf("help: unknown command: %s\n", strCommand.c_str());
strRet = strRet.substr(0,strRet.size()-1);
return strRet;
}
Value help(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"help [command]\n"
"List commands, or get help for a command.");
string strCommand;
if (params.size() > 0)
strCommand = params[0].get_str();
return tableRPC.help(strCommand);
}
Value stop(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"stop <detach>\n"
"<detach> is true or false to detach the database or not for this stop only\n"
"Stop 888diss server (and possibly override the detachdb config value).");
// Shutdown will take long enough that the response should get back
if (params.size() > 0)
bitdb.SetDetach(params[0].get_bool());
StartShutdown();
return "888diss server stopping";
}
//
// Call Table
//
static const CRPCCommand vRPCCommands[] =
{ // name function safemd unlocked
// ------------------------ ----------------------- ------ --------
{ "help", &help, true, true },
{ "stop", &stop, true, true },
{ "getblockcount", &getblockcount, true, false },
{ "getconnectioncount", &getconnectioncount, true, false },
{ "getpeerinfo", &getpeerinfo, true, false },
{ "getdifficulty", &getdifficulty, true, false },
{ "getgenerate", &getgenerate, true, false },
{ "setgenerate", &setgenerate, true, false },
{ "gethashespersec", &gethashespersec, true, false },
{ "getinfo", &getinfo, true, false },
{ "getmininginfo", &getmininginfo, true, false },
{ "getnewaddress", &getnewaddress, true, false },
{ "getnewpubkey", &getnewpubkey, true, false },
{ "getaccountaddress", &getaccountaddress, true, false },
{ "setaccount", &setaccount, true, false },
{ "getaccount", &getaccount, false, false },
{ "getaddressesbyaccount", &getaddressesbyaccount, true, false },
{ "sendtoaddress", &sendtoaddress, false, false },
{ "getreceivedbyaddress", &getreceivedbyaddress, false, false },
{ "getreceivedbyaccount", &getreceivedbyaccount, false, false },
{ "listreceivedbyaddress", &listreceivedbyaddress, false, false },
{ "listreceivedbyaccount", &listreceivedbyaccount, false, false },
{ "backupwallet", &backupwallet, true, false },
{ "keypoolrefill", &keypoolrefill, true, false },
{ "walletpassphrase", &walletpassphrase, true, false },
{ "walletpassphrasechange", &walletpassphrasechange, false, false },
{ "walletlock", &walletlock, true, false },
{ "encryptwallet", &encryptwallet, false, false },
{ "validateaddress", &validateaddress, true, false },
{ "validatepubkey", &validatepubkey, true, false },
{ "getbalance", &getbalance, false, false },
{ "move", &movecmd, false, false },
{ "sendfrom", &sendfrom, false, false },
{ "sendmany", &sendmany, false, false },
{ "addmultisigaddress", &addmultisigaddress, false, false },
{ "getrawmempool", &getrawmempool, true, false },
{ "getblock", &getblock, false, false },
{ "getblockbynumber", &getblockbynumber, false, false },
{ "getblockhash", &getblockhash, false, false },
{ "gettransaction", &gettransaction, false, false },
{ "listtransactions", &listtransactions, false, false },
{ "listaddressgroupings", &listaddressgroupings, false, false },
{ "signmessage", &signmessage, false, false },
{ "verifymessage", &verifymessage, false, false },
{ "getwork", &getwork, true, false },
{ "getworkex", &getworkex, true, false },
{ "listaccounts", &listaccounts, false, false },
{ "settxfee", &settxfee, false, false },
{ "getblocktemplate", &getblocktemplate, true, false },
{ "submitblock", &submitblock, false, false },
{ "listsinceblock", &listsinceblock, false, false },
{ "dumpprivkey", &dumpprivkey, false, false },
{ "importprivkey", &importprivkey, false, false },
{ "listunspent", &listunspent, false, false },
{ "getrawtransaction", &getrawtransaction, false, false },
{ "createrawtransaction", &createrawtransaction, false, false },
{ "decoderawtransaction", &decoderawtransaction, false, false },
{ "signrawtransaction", &signrawtransaction, false, false },
{ "sendrawtransaction", &sendrawtransaction, false, false },
{ "getcheckpoint", &getcheckpoint, true, false },
{ "reservebalance", &reservebalance, false, true},
{ "checkwallet", &checkwallet, false, true},
{ "repairwallet", &repairwallet, false, true},
{ "resendtx", &resendtx, false, true},
{ "makekeypair", &makekeypair, false, true},
{ "sendalert", &sendalert, false, false},
};
CRPCTable::CRPCTable()
{
unsigned int vcidx;
for (vcidx = 0; vcidx < (sizeof(vRPCCommands) / sizeof(vRPCCommands[0])); vcidx++)
{
const CRPCCommand *pcmd;
pcmd = &vRPCCommands[vcidx];
mapCommands[pcmd->name] = pcmd;
}
}
const CRPCCommand *CRPCTable::operator[](string name) const
{
map<string, const CRPCCommand*>::const_iterator it = mapCommands.find(name);
if (it == mapCommands.end())
return NULL;
return (*it).second;
}
//
// HTTP protocol
//
// This ain't Apache. We're just using HTTP header for the length field
// and to be compatible with other JSON-RPC implementations.
//
string HTTPPost(const string& strMsg, const map<string,string>& mapRequestHeaders)
{
ostringstream s;
s << "POST / HTTP/1.1\r\n"
<< "User-Agent: 888diss-json-rpc/" << FormatFullVersion() << "\r\n"
<< "Host: 127.0.0.1\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: " << strMsg.size() << "\r\n"
<< "Connection: close\r\n"
<< "Accept: application/json\r\n";
BOOST_FOREACH(const PAIRTYPE(string, string)& item, mapRequestHeaders)
s << item.first << ": " << item.second << "\r\n";
s << "\r\n" << strMsg;
return s.str();
}
string rfc1123Time()
{
char buffer[64];
time_t now;
time(&now);
struct tm* now_gmt = gmtime(&now);
string locale(setlocale(LC_TIME, NULL));
setlocale(LC_TIME, "C"); // we want POSIX (aka "C") weekday/month strings
strftime(buffer, sizeof(buffer), "%a, %d %b %Y %H:%M:%S +0000", now_gmt);
setlocale(LC_TIME, locale.c_str());
return string(buffer);
}
static string HTTPReply(int nStatus, const string& strMsg, bool keepalive)
{
if (nStatus == HTTP_UNAUTHORIZED)
return strprintf("HTTP/1.0 401 Authorization Required\r\n"
"Date: %s\r\n"
"Server: 888diss-json-rpc/%s\r\n"
"WWW-Authenticate: Basic realm=\"jsonrpc\"\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 296\r\n"
"\r\n"
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\r\n"
"\"http://www.w3.org/TR/1999/REC-html401-19991224/loose.dtd\">\r\n"
"<HTML>\r\n"
"<HEAD>\r\n"
"<TITLE>Error</TITLE>\r\n"
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=ISO-8859-1'>\r\n"
"</HEAD>\r\n"
"<BODY><H1>401 Unauthorized.</H1></BODY>\r\n"
"</HTML>\r\n", rfc1123Time().c_str(), FormatFullVersion().c_str());
const char *cStatus;
if (nStatus == HTTP_OK) cStatus = "OK";
else if (nStatus == HTTP_BAD_REQUEST) cStatus = "Bad Request";
else if (nStatus == HTTP_FORBIDDEN) cStatus = "Forbidden";
else if (nStatus == HTTP_NOT_FOUND) cStatus = "Not Found";
else if (nStatus == HTTP_INTERNAL_SERVER_ERROR) cStatus = "Internal Server Error";
else cStatus = "";
return strprintf(
"HTTP/1.1 %d %s\r\n"
"Date: %s\r\n"
"Connection: %s\r\n"
"Content-Length: %"PRIszu"\r\n"
"Content-Type: application/json\r\n"
"Server: 888diss-json-rpc/%s\r\n"
"\r\n"
"%s",
nStatus,
cStatus,
rfc1123Time().c_str(),
keepalive ? "keep-alive" : "close",
strMsg.size(),
FormatFullVersion().c_str(),
strMsg.c_str());
}
int ReadHTTPStatus(std::basic_istream<char>& stream, int &proto)
{
string str;
getline(stream, str);
vector<string> vWords;
boost::split(vWords, str, boost::is_any_of(" "));
if (vWords.size() < 2)
return HTTP_INTERNAL_SERVER_ERROR;
proto = 0;
const char *ver = strstr(str.c_str(), "HTTP/1.");
if (ver != NULL)
proto = atoi(ver+7);
return atoi(vWords[1].c_str());
}
int ReadHTTPHeader(std::basic_istream<char>& stream, map<string, string>& mapHeadersRet)
{
int nLen = 0;
loop
{
string str;
std::getline(stream, str);
if (str.empty() || str == "\r")
break;
string::size_type nColon = str.find(":");
if (nColon != string::npos)
{
string strHeader = str.substr(0, nColon);
boost::trim(strHeader);
boost::to_lower(strHeader);
string strValue = str.substr(nColon+1);
boost::trim(strValue);
mapHeadersRet[strHeader] = strValue;
if (strHeader == "content-length")
nLen = atoi(strValue.c_str());
}
}
return nLen;
}
int ReadHTTP(std::basic_istream<char>& stream, map<string, string>& mapHeadersRet, string& strMessageRet)
{
mapHeadersRet.clear();
strMessageRet = "";
// Read status
int nProto = 0;
int nStatus = ReadHTTPStatus(stream, nProto);
// Read header
int nLen = ReadHTTPHeader(stream, mapHeadersRet);
if (nLen < 0 || nLen > (int)MAX_SIZE)
return HTTP_INTERNAL_SERVER_ERROR;
// Read message
if (nLen > 0)
{
vector<char> vch(nLen);
stream.read(&vch[0], nLen);
strMessageRet = string(vch.begin(), vch.end());
}
string sConHdr = mapHeadersRet["connection"];
if ((sConHdr != "close") && (sConHdr != "keep-alive"))
{
if (nProto >= 1)
mapHeadersRet["connection"] = "keep-alive";
else
mapHeadersRet["connection"] = "close";
}
return nStatus;
}
bool HTTPAuthorized(map<string, string>& mapHeaders)
{
string strAuth = mapHeaders["authorization"];
if (strAuth.substr(0,6) != "Basic ")
return false;
string strUserPass64 = strAuth.substr(6); boost::trim(strUserPass64);
string strUserPass = DecodeBase64(strUserPass64);
return strUserPass == strRPCUserColonPass;
}
//
// JSON-RPC protocol. Bitcoin speaks version 1.0 for maximum compatibility,
// but uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were
// unspecified (HTTP errors and contents of 'error').
//
// 1.0 spec: http://json-rpc.org/wiki/specification
// 1.2 spec: http://groups.google.com/group/json-rpc/web/json-rpc-over-http
// http://www.codeproject.com/KB/recipes/JSON_Spirit.aspx
//
string JSONRPCRequest(const string& strMethod, const Array& params, const Value& id)
{
Object request;
request.push_back(Pair("method", strMethod));
request.push_back(Pair("params", params));
request.push_back(Pair("id", id));
return write_string(Value(request), false) + "\n";
}
Object JSONRPCReplyObj(const Value& result, const Value& error, const Value& id)
{
Object reply;
if (error.type() != null_type)
reply.push_back(Pair("result", Value::null));
else
reply.push_back(Pair("result", result));
reply.push_back(Pair("error", error));
reply.push_back(Pair("id", id));
return reply;
}
string JSONRPCReply(const Value& result, const Value& error, const Value& id)
{
Object reply = JSONRPCReplyObj(result, error, id);
return write_string(Value(reply), false) + "\n";
}
void ErrorReply(std::ostream& stream, const Object& objError, const Value& id)
{
// Send error reply from json-rpc error object
int nStatus = HTTP_INTERNAL_SERVER_ERROR;
int code = find_value(objError, "code").get_int();
if (code == RPC_INVALID_REQUEST) nStatus = HTTP_BAD_REQUEST;
else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND;
string strReply = JSONRPCReply(Value::null, objError, id);
stream << HTTPReply(nStatus, strReply, false) << std::flush;
}
bool ClientAllowed(const boost::asio::ip::address& address)
{
// Make sure that IPv4-compatible and IPv4-mapped IPv6 addresses are treated as IPv4 addresses
if (address.is_v6()
&& (address.to_v6().is_v4_compatible()
|| address.to_v6().is_v4_mapped()))
return ClientAllowed(address.to_v6().to_v4());
std::string ipv4addr = address.to_string();
if (address == asio::ip::address_v4::loopback()
|| address == asio::ip::address_v6::loopback()
|| (address.is_v4()
// Check whether IPv4 addresses match 127.0.0.0/8 (loopback subnet)
&& (address.to_v4().to_ulong() & 0xff000000) == 0x7f000000))
return true;
const string strAddress = address.to_string();
const vector<string>& vAllow = mapMultiArgs["-rpcallowip"];
BOOST_FOREACH(string strAllow, vAllow)
if (WildcardMatch(strAddress, strAllow))
return true;
return false;
}
//
// IOStream device that speaks SSL but can also speak non-SSL
//
template <typename Protocol>
class SSLIOStreamDevice : public iostreams::device<iostreams::bidirectional> {
public:
SSLIOStreamDevice(asio::ssl::stream<typename Protocol::socket> &streamIn, bool fUseSSLIn) : stream(streamIn)
{
fUseSSL = fUseSSLIn;
fNeedHandshake = fUseSSLIn;
}
void handshake(ssl::stream_base::handshake_type role)
{
if (!fNeedHandshake) return;
fNeedHandshake = false;
stream.handshake(role);
}
std::streamsize read(char* s, std::streamsize n)
{
handshake(ssl::stream_base::server); // HTTPS servers read first
if (fUseSSL) return stream.read_some(asio::buffer(s, n));
return stream.next_layer().read_some(asio::buffer(s, n));
}
std::streamsize write(const char* s, std::streamsize n)
{
handshake(ssl::stream_base::client); // HTTPS clients write first
if (fUseSSL) return asio::write(stream, asio::buffer(s, n));
return asio::write(stream.next_layer(), asio::buffer(s, n));
}
bool connect(const std::string& server, const std::string& port)
{
ip::tcp::resolver resolver(stream.get_io_service());
ip::tcp::resolver::query query(server.c_str(), port.c_str());
ip::tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
ip::tcp::resolver::iterator end;
boost::system::error_code error = asio::error::host_not_found;
while (error && endpoint_iterator != end)
{
stream.lowest_layer().close();
stream.lowest_layer().connect(*endpoint_iterator++, error);
}
if (error)
return false;
return true;
}
private:
bool fNeedHandshake;
bool fUseSSL;
asio::ssl::stream<typename Protocol::socket>& stream;
};
class AcceptedConnection
{
public:
virtual ~AcceptedConnection() {}
virtual std::iostream& stream() = 0;
virtual std::string peer_address_to_string() const = 0;
virtual void close() = 0;
};
template <typename Protocol>
class AcceptedConnectionImpl : public AcceptedConnection
{
public:
AcceptedConnectionImpl(
asio::io_service& io_service,
ssl::context &context,
bool fUseSSL) :
sslStream(io_service, context),
_d(sslStream, fUseSSL),
_stream(_d)
{
}
virtual std::iostream& stream()
{
return _stream;
}
virtual std::string peer_address_to_string() const
{
return peer.address().to_string();
}
virtual void close()
{
_stream.close();
}
typename Protocol::endpoint peer;
asio::ssl::stream<typename Protocol::socket> sslStream;
private:
SSLIOStreamDevice<Protocol> _d;
iostreams::stream< SSLIOStreamDevice<Protocol> > _stream;
};
void ThreadRPCServer(void* parg)
{
// Make this thread recognisable as the RPC listener
RenameThread("bitcoin-rpclist");
try
{
vnThreadsRunning[THREAD_RPCLISTENER]++;
ThreadRPCServer2(parg);
vnThreadsRunning[THREAD_RPCLISTENER]--;
}
catch (std::exception& e) {
vnThreadsRunning[THREAD_RPCLISTENER]--;
PrintException(&e, "ThreadRPCServer()");
} catch (...) {
vnThreadsRunning[THREAD_RPCLISTENER]--;
PrintException(NULL, "ThreadRPCServer()");
}
printf("ThreadRPCServer exited\n");
}
// Forward declaration required for RPCListen
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error);
/**
* Sets up I/O resources to accept and handle a new connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCListen(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL)
{
// Accept connection
AcceptedConnectionImpl<Protocol>* conn = new AcceptedConnectionImpl<Protocol>(acceptor->get_io_service(), context, fUseSSL);
acceptor->async_accept(
conn->sslStream.lowest_layer(),
conn->peer,
boost::bind(&RPCAcceptHandler<Protocol, SocketAcceptorService>,
acceptor,
boost::ref(context),
fUseSSL,
conn,
boost::asio::placeholders::error));
}
/**
* Accept and handle incoming connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error)
{
vnThreadsRunning[THREAD_RPCLISTENER]++;
// Immediately start accepting new connections, except when we're cancelled or our socket is closed.
if (error != asio::error::operation_aborted
&& acceptor->is_open())
RPCListen(acceptor, context, fUseSSL);
AcceptedConnectionImpl<ip::tcp>* tcp_conn = dynamic_cast< AcceptedConnectionImpl<ip::tcp>* >(conn);
// TODO: Actually handle errors
if (error)
{
delete conn;
}
// Restrict callers by IP. It is important to
// do this before starting client thread, to filter out
// certain DoS and misbehaving clients.
else if (tcp_conn
&& !ClientAllowed(tcp_conn->peer.address()))
{
// Only send a 403 if we're not using SSL to prevent a DoS during the SSL handshake.
if (!fUseSSL)
conn->stream() << HTTPReply(HTTP_FORBIDDEN, "", false) << std::flush;
delete conn;
}
// start HTTP client thread
else if (!NewThread(ThreadRPCServer3, conn)) {
printf("Failed to create RPC server client thread\n");
delete conn;
}
vnThreadsRunning[THREAD_RPCLISTENER]--;
}
void ThreadRPCServer2(void* parg)
{
printf("ThreadRPCServer started\n");
strRPCUserColonPass = mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"];
if (mapArgs["-rpcpassword"] == "")
{
unsigned char rand_pwd[32];
RAND_bytes(rand_pwd, 32);
string strWhatAmI = "To use 888dissd";
if (mapArgs.count("-server"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-server\"");
else if (mapArgs.count("-daemon"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-daemon\"");
uiInterface.ThreadSafeMessageBox(strprintf(
_("%s, you must set a rpcpassword in the configuration file:\n %s\n"
"It is recommended you use the following random password:\n"
"rpcuser=bitcoinrpc\n"
"rpcpassword=%s\n"
"(you do not need to remember this password)\n"
"If the file does not exist, create it with owner-readable-only file permissions.\n"),
strWhatAmI.c_str(),
GetConfigFile().string().c_str(),
EncodeBase58(&rand_pwd[0],&rand_pwd[0]+32).c_str()),
_("Error"), CClientUIInterface::OK | CClientUIInterface::MODAL);
StartShutdown();
return;
}
const bool fUseSSL = GetBoolArg("-rpcssl");
asio::io_service io_service;
ssl::context context(io_service, ssl::context::sslv23);
if (fUseSSL)
{
context.set_options(ssl::context::no_sslv2);
filesystem::path pathCertFile(GetArg("-rpcsslcertificatechainfile", "server.cert"));
if (!pathCertFile.is_complete()) pathCertFile = filesystem::path(GetDataDir()) / pathCertFile;
if (filesystem::exists(pathCertFile)) context.use_certificate_chain_file(pathCertFile.string());
else printf("ThreadRPCServer ERROR: missing server certificate file %s\n", pathCertFile.string().c_str());
filesystem::path pathPKFile(GetArg("-rpcsslprivatekeyfile", "server.pem"));
if (!pathPKFile.is_complete()) pathPKFile = filesystem::path(GetDataDir()) / pathPKFile;
if (filesystem::exists(pathPKFile)) context.use_private_key_file(pathPKFile.string(), ssl::context::pem);
else printf("ThreadRPCServer ERROR: missing server private key file %s\n", pathPKFile.string().c_str());
string strCiphers = GetArg("-rpcsslciphers", "TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH");
SSL_CTX_set_cipher_list(context.impl(), strCiphers.c_str());
}
// Try a dual IPv6/IPv4 socket, falling back to separate IPv4 and IPv6 sockets
const bool loopback = !mapArgs.count("-rpcallowip");
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", GetDefaultRPCPort()));
boost::system::error_code v6_only_error;
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(io_service));
boost::signals2::signal<void ()> StopRequests;
bool fListening = false;
std::string strerr;
try
{
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
// Try making the socket dual IPv6/IPv4 (if listening on the "any" address)
acceptor->set_option(boost::asio::ip::v6_only(loopback), v6_only_error);
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, context, fUseSSL);
// Cancel outstanding listen-requests for this acceptor when shutting down
StopRequests.connect(signals2::slot<void ()>(
static_cast<void (ip::tcp::acceptor::*)()>(&ip::tcp::acceptor::close), acceptor.get())
.track(acceptor));
fListening = true;
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
}
try {
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
if (!fListening || loopback || v6_only_error)
{
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
endpoint.address(bindAddress);
acceptor.reset(new ip::tcp::acceptor(io_service));
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, context, fUseSSL);
// Cancel outstanding listen-requests for this acceptor when shutting down
StopRequests.connect(signals2::slot<void ()>(
static_cast<void (ip::tcp::acceptor::*)()>(&ip::tcp::acceptor::close), acceptor.get())
.track(acceptor));
fListening = true;
}
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv4: %s"), endpoint.port(), e.what());
}
if (!fListening) {
uiInterface.ThreadSafeMessageBox(strerr, _("Error"), CClientUIInterface::OK | CClientUIInterface::MODAL);
StartShutdown();
return;
}
vnThreadsRunning[THREAD_RPCLISTENER]--;
while (!fShutdown)
io_service.run_one();
vnThreadsRunning[THREAD_RPCLISTENER]++;
StopRequests();
}
class JSONRequest
{
public:
Value id;
string strMethod;
Array params;
JSONRequest() { id = Value::null; }
void parse(const Value& valRequest);
};
void JSONRequest::parse(const Value& valRequest)
{
// Parse request
if (valRequest.type() != obj_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Invalid Request object");
const Object& request = valRequest.get_obj();
// Parse id now so errors from here on will have the id
id = find_value(request, "id");
// Parse method
Value valMethod = find_value(request, "method");
if (valMethod.type() == null_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Missing method");
if (valMethod.type() != str_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string");
strMethod = valMethod.get_str();
if (strMethod != "getwork" && strMethod != "getblocktemplate")
printf("ThreadRPCServer method=%s\n", strMethod.c_str());
// Parse params
Value valParams = find_value(request, "params");
if (valParams.type() == array_type)
params = valParams.get_array();
else if (valParams.type() == null_type)
params = Array();
else
throw JSONRPCError(RPC_INVALID_REQUEST, "Params must be an array");
}
static Object JSONRPCExecOne(const Value& req)
{
Object rpc_result;
JSONRequest jreq;
try {
jreq.parse(req);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
rpc_result = JSONRPCReplyObj(result, Value::null, jreq.id);
}
catch (Object& objError)
{
rpc_result = JSONRPCReplyObj(Value::null, objError, jreq.id);
}
catch (std::exception& e)
{
rpc_result = JSONRPCReplyObj(Value::null,
JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
}
return rpc_result;
}
static string JSONRPCExecBatch(const Array& vReq)
{
Array ret;
for (unsigned int reqIdx = 0; reqIdx < vReq.size(); reqIdx++)
ret.push_back(JSONRPCExecOne(vReq[reqIdx]));
return write_string(Value(ret), false) + "\n";
}
static CCriticalSection cs_THREAD_RPCHANDLER;
void ThreadRPCServer3(void* parg)
{
// Make this thread recognisable as the RPC handler
RenameThread("bitcoin-rpchand");
{
LOCK(cs_THREAD_RPCHANDLER);
vnThreadsRunning[THREAD_RPCHANDLER]++;
}
AcceptedConnection *conn = (AcceptedConnection *) parg;
bool fRun = true;
loop {
if (fShutdown || !fRun)
{
conn->close();
delete conn;
{
LOCK(cs_THREAD_RPCHANDLER);
--vnThreadsRunning[THREAD_RPCHANDLER];
}
return;
}
map<string, string> mapHeaders;
string strRequest;
ReadHTTP(conn->stream(), mapHeaders, strRequest);
// Check authorization
if (mapHeaders.count("authorization") == 0)
{
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (!HTTPAuthorized(mapHeaders))
{
printf("ThreadRPCServer incorrect password attempt from %s\n", conn->peer_address_to_string().c_str());
/* Deter brute-forcing short passwords.
If this results in a DOS the user really
shouldn't have their RPC port exposed.*/
if (mapArgs["-rpcpassword"].size() < 20)
Sleep(250);
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (mapHeaders["connection"] == "close")
fRun = false;
JSONRequest jreq;
try
{
// Parse request
Value valRequest;
if (!read_string(strRequest, valRequest))
throw JSONRPCError(RPC_PARSE_ERROR, "Parse error");
string strReply;
// singleton request
if (valRequest.type() == obj_type) {
jreq.parse(valRequest);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
// Send reply
strReply = JSONRPCReply(result, Value::null, jreq.id);
// array of requests
} else if (valRequest.type() == array_type)
strReply = JSONRPCExecBatch(valRequest.get_array());
else
throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error");
conn->stream() << HTTPReply(HTTP_OK, strReply, fRun) << std::flush;
}
catch (Object& objError)
{
ErrorReply(conn->stream(), objError, jreq.id);
break;
}
catch (std::exception& e)
{
ErrorReply(conn->stream(), JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
break;
}
}
delete conn;
{
LOCK(cs_THREAD_RPCHANDLER);
vnThreadsRunning[THREAD_RPCHANDLER]--;
}
}
json_spirit::Value CRPCTable::execute(const std::string &strMethod, const json_spirit::Array ¶ms) const
{
// Find method
const CRPCCommand *pcmd = tableRPC[strMethod];
if (!pcmd)
throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Method not found");
// Observe safe mode
string strWarning = GetWarnings("rpc");
if (strWarning != "" && !GetBoolArg("-disablesafemode") &&
!pcmd->okSafeMode)
throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, string("Safe mode: ") + strWarning);
try
{
// Execute
Value result;
{
if (pcmd->unlocked)
result = pcmd->actor(params, false);
else {
LOCK2(cs_main, pwalletMain->cs_wallet);
result = pcmd->actor(params, false);
}
}
return result;
}
catch (std::exception& e)
{
throw JSONRPCError(RPC_MISC_ERROR, e.what());
}
}
Object CallRPC(const string& strMethod, const Array& params)
{
if (mapArgs["-rpcuser"] == "" && mapArgs["-rpcpassword"] == "")
throw runtime_error(strprintf(
_("You must set rpcpassword=<password> in the configuration file:\n%s\n"
"If the file does not exist, create it with owner-readable-only file permissions."),
GetConfigFile().string().c_str()));
// Connect to localhost
bool fUseSSL = GetBoolArg("-rpcssl");
asio::io_service io_service;
ssl::context context(io_service, ssl::context::sslv23);
context.set_options(ssl::context::no_sslv2);
asio::ssl::stream<asio::ip::tcp::socket> sslStream(io_service, context);
SSLIOStreamDevice<asio::ip::tcp> d(sslStream, fUseSSL);
iostreams::stream< SSLIOStreamDevice<asio::ip::tcp> > stream(d);
if (!d.connect(GetArg("-rpcconnect", "127.0.0.1"), GetArg("-rpcport", itostr(GetDefaultRPCPort()))))
throw runtime_error("couldn't connect to server");
// HTTP basic authentication
string strUserPass64 = EncodeBase64(mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"]);
map<string, string> mapRequestHeaders;
mapRequestHeaders["Authorization"] = string("Basic ") + strUserPass64;
// Send request
string strRequest = JSONRPCRequest(strMethod, params, 1);
string strPost = HTTPPost(strRequest, mapRequestHeaders);
stream << strPost << std::flush;
// Receive reply
map<string, string> mapHeaders;
string strReply;
int nStatus = ReadHTTP(stream, mapHeaders, strReply);
if (nStatus == HTTP_UNAUTHORIZED)
throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
else if (nStatus >= 400 && nStatus != HTTP_BAD_REQUEST && nStatus != HTTP_NOT_FOUND && nStatus != HTTP_INTERNAL_SERVER_ERROR)
throw runtime_error(strprintf("server returned HTTP error %d", nStatus));
else if (strReply.empty())
throw runtime_error("no response from server");
// Parse reply
Value valReply;
if (!read_string(strReply, valReply))
throw runtime_error("couldn't parse reply from server");
const Object& reply = valReply.get_obj();
if (reply.empty())
throw runtime_error("expected reply to have result, error and id properties");
return reply;
}
template<typename T>
void ConvertTo(Value& value, bool fAllowNull=false)
{
if (fAllowNull && value.type() == null_type)
return;
if (value.type() == str_type)
{
// reinterpret string as unquoted json value
Value value2;
string strJSON = value.get_str();
if (!read_string(strJSON, value2))
throw runtime_error(string("Error parsing JSON:")+strJSON);
ConvertTo<T>(value2, fAllowNull);
value = value2;
}
else
{
value = value.get_value<T>();
}
}
// Convert strings to command-specific RPC representation
Array RPCConvertValues(const std::string &strMethod, const std::vector<std::string> &strParams)
{
Array params;
BOOST_FOREACH(const std::string ¶m, strParams)
params.push_back(param);
int n = params.size();
//
// Special case non-string parameter types
//
if (strMethod == "stop" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendtoaddress" && n > 1) ConvertTo<double>(params[1]);
if (strMethod == "settxfee" && n > 0) ConvertTo<double>(params[0]);
if (strMethod == "getreceivedbyaddress" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getreceivedbyaccount" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listreceivedbyaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaddress" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "listreceivedbyaccount" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaccount" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getbalance" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getblock" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getblockbynumber" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "getblockbynumber" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getblockhash" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "move" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "move" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "sendfrom" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "sendfrom" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "listtransactions" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listtransactions" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "listaccounts" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "walletpassphrase" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "walletpassphrase" && n > 2) ConvertTo<bool>(params[2]);
if (strMethod == "getblocktemplate" && n > 0) ConvertTo<Object>(params[0]);
if (strMethod == "listsinceblock" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendmany" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "sendmany" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "reservebalance" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "reservebalance" && n > 1) ConvertTo<double>(params[1]);
if (strMethod == "addmultisigaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "addmultisigaddress" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "listunspent" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listunspent" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listunspent" && n > 2) ConvertTo<Array>(params[2]);
if (strMethod == "getrawtransaction" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "createrawtransaction" && n > 0) ConvertTo<Array>(params[0]);
if (strMethod == "createrawtransaction" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "signrawtransaction" && n > 1) ConvertTo<Array>(params[1], true);
if (strMethod == "signrawtransaction" && n > 2) ConvertTo<Array>(params[2], true);
return params;
}
int CommandLineRPC(int argc, char *argv[])
{
string strPrint;
int nRet = 0;
try
{
// Skip switches
while (argc > 1 && IsSwitchChar(argv[1][0]))
{
argc--;
argv++;
}
// Method
if (argc < 2)
throw runtime_error("too few parameters");
string strMethod = argv[1];
// Parameters default to strings
std::vector<std::string> strParams(&argv[2], &argv[argc]);
Array params = RPCConvertValues(strMethod, strParams);
// Execute
Object reply = CallRPC(strMethod, params);
// Parse reply
const Value& result = find_value(reply, "result");
const Value& error = find_value(reply, "error");
if (error.type() != null_type)
{
// Error
strPrint = "error: " + write_string(error, false);
int code = find_value(error.get_obj(), "code").get_int();
nRet = abs(code);
}
else
{
// Result
if (result.type() == null_type)
strPrint = "";
else if (result.type() == str_type)
strPrint = result.get_str();
else
strPrint = write_string(result, true);
}
}
catch (std::exception& e)
{
strPrint = string("error: ") + e.what();
nRet = 87;
}
catch (...)
{
PrintException(NULL, "CommandLineRPC()");
}
if (strPrint != "")
{
fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str());
}
return nRet;
}
#ifdef TEST
int main(int argc, char *argv[])
{
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFile("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
#endif
setbuf(stdin, NULL);
setbuf(stdout, NULL);
setbuf(stderr, NULL);
try
{
if (argc >= 2 && string(argv[1]) == "-server")
{
printf("server ready\n");
ThreadRPCServer(NULL);
}
else
{
return CommandLineRPC(argc, argv);
}
}
catch (std::exception& e) {
PrintException(&e, "main()");
} catch (...) {
PrintException(NULL, "main()");
}
return 0;
}
#endif
const CRPCTable tableRPC;
|
/* -----------------------------------------------------------------------------
* Rule_key_lifetime.hpp
* -----------------------------------------------------------------------------
*
* Producer : com.parse2.aparse.Parser 2.5
* Produced : Mon Jan 08 13:30:55 CET 2018
*
* -----------------------------------------------------------------------------
*/
#ifndef Rule_key_lifetime_hpp
#define Rule_key_lifetime_hpp
#include <string>
#include <vector>
#include "Rule.hpp"
namespace abnf {
class Visitor;
class ParserContext;
class Rule_key_lifetime : public Rule
{
public:
Rule_key_lifetime(const std::string& spelling, const std::vector<Rule*>& rules);
Rule_key_lifetime(const Rule_key_lifetime& rule);
Rule_key_lifetime& operator=(const Rule_key_lifetime& rule);
virtual Rule* clone(void) const;
static Rule_key_lifetime* parse(ParserContext& context);
virtual void* accept(Visitor& visitor);
};
}
#endif
/* -----------------------------------------------------------------------------
* eof
* -----------------------------------------------------------------------------
*/
|
/*************************************************************************/
/* os_unix.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2019 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2019 Godot Engine contributors (cf. AUTHORS.md) */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "os_unix.h"
#ifdef UNIX_ENABLED
#include "core/os/thread_dummy.h"
#include "core/project_settings.h"
#include "drivers/unix/dir_access_unix.h"
#include "drivers/unix/file_access_unix.h"
#include "drivers/unix/mutex_posix.h"
#include "drivers/unix/net_socket_posix.h"
#include "drivers/unix/rw_lock_posix.h"
#include "drivers/unix/semaphore_posix.h"
#include "drivers/unix/thread_posix.h"
#include "servers/visual_server.h"
#ifdef __APPLE__
#include <mach-o/dyld.h>
#include <mach/mach_time.h>
#endif
#if defined(__FreeBSD__) || defined(__OpenBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#endif
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <poll.h>
#include <signal.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <unistd.h>
/// Clock Setup function (used by get_ticks_usec)
static uint64_t _clock_start = 0;
#if defined(__APPLE__)
static double _clock_scale = 0;
static void _setup_clock() {
mach_timebase_info_data_t info;
kern_return_t ret = mach_timebase_info(&info);
ERR_FAIL_COND_MSG(ret != 0, "OS CLOCK IS NOT WORKING!");
_clock_scale = ((double)info.numer / (double)info.denom) / 1000.0;
_clock_start = mach_absolute_time() * _clock_scale;
}
#else
#if defined(CLOCK_MONOTONIC_RAW) && !defined(JAVASCRIPT_ENABLED) // This is a better clock on Linux.
#define GODOT_CLOCK CLOCK_MONOTONIC_RAW
#else
#define GODOT_CLOCK CLOCK_MONOTONIC
#endif
static void _setup_clock() {
struct timespec tv_now = { 0, 0 };
ERR_FAIL_COND_MSG(clock_gettime(GODOT_CLOCK, &tv_now) != 0, "OS CLOCK IS NOT WORKING!");
_clock_start = ((uint64_t)tv_now.tv_nsec / 1000L) + (uint64_t)tv_now.tv_sec * 1000000L;
}
#endif
void OS_Unix::debug_break() {
assert(false);
};
static void handle_interrupt(int sig) {
if (ScriptDebugger::get_singleton() == NULL)
return;
ScriptDebugger::get_singleton()->set_depth(-1);
ScriptDebugger::get_singleton()->set_lines_left(1);
}
void OS_Unix::initialize_debugging() {
if (ScriptDebugger::get_singleton() != NULL) {
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_handler = handle_interrupt;
sigaction(SIGINT, &action, NULL);
}
}
int OS_Unix::unix_initialize_audio(int p_audio_driver) {
return 0;
}
void OS_Unix::initialize_core() {
#ifdef NO_THREADS
ThreadDummy::make_default();
SemaphoreDummy::make_default();
MutexDummy::make_default();
RWLockDummy::make_default();
#else
ThreadPosix::make_default();
SemaphorePosix::make_default();
MutexPosix::make_default();
RWLockPosix::make_default();
#endif
FileAccess::make_default<FileAccessUnix>(FileAccess::ACCESS_RESOURCES);
FileAccess::make_default<FileAccessUnix>(FileAccess::ACCESS_USERDATA);
FileAccess::make_default<FileAccessUnix>(FileAccess::ACCESS_FILESYSTEM);
//FileAccessBufferedFA<FileAccessUnix>::make_default();
DirAccess::make_default<DirAccessUnix>(DirAccess::ACCESS_RESOURCES);
DirAccess::make_default<DirAccessUnix>(DirAccess::ACCESS_USERDATA);
DirAccess::make_default<DirAccessUnix>(DirAccess::ACCESS_FILESYSTEM);
#ifndef NO_NETWORK
NetSocketPosix::make_default();
IP_Unix::make_default();
#endif
_setup_clock();
}
void OS_Unix::finalize_core() {
NetSocketPosix::cleanup();
}
void OS_Unix::alert(const String &p_alert, const String &p_title) {
fprintf(stderr, "ERROR: %s\n", p_alert.utf8().get_data());
}
String OS_Unix::get_stdin_string(bool p_block) {
if (p_block) {
char buff[1024];
String ret = stdin_buf + fgets(buff, 1024, stdin);
stdin_buf = "";
return ret;
}
return "";
}
String OS_Unix::get_name() const {
return "Unix";
}
uint64_t OS_Unix::get_unix_time() const {
return time(NULL);
};
uint64_t OS_Unix::get_system_time_secs() const {
struct timeval tv_now;
gettimeofday(&tv_now, NULL);
return uint64_t(tv_now.tv_sec);
}
uint64_t OS_Unix::get_system_time_msecs() const {
struct timeval tv_now;
gettimeofday(&tv_now, NULL);
return uint64_t(tv_now.tv_sec) * 1000 + uint64_t(tv_now.tv_usec) / 1000;
}
OS::Date OS_Unix::get_date(bool utc) const {
time_t t = time(NULL);
struct tm *lt;
if (utc)
lt = gmtime(&t);
else
lt = localtime(&t);
Date ret;
ret.year = 1900 + lt->tm_year;
// Index starting at 1 to match OS_Unix::get_date
// and Windows SYSTEMTIME and tm_mon follows the typical structure
// of 0-11, noted here: http://www.cplusplus.com/reference/ctime/tm/
ret.month = (Month)(lt->tm_mon + 1);
ret.day = lt->tm_mday;
ret.weekday = (Weekday)lt->tm_wday;
ret.dst = lt->tm_isdst;
return ret;
}
OS::Time OS_Unix::get_time(bool utc) const {
time_t t = time(NULL);
struct tm *lt;
if (utc)
lt = gmtime(&t);
else
lt = localtime(&t);
Time ret;
ret.hour = lt->tm_hour;
ret.min = lt->tm_min;
ret.sec = lt->tm_sec;
get_time_zone_info();
return ret;
}
OS::TimeZoneInfo OS_Unix::get_time_zone_info() const {
time_t t = time(NULL);
struct tm *lt = localtime(&t);
char name[16];
strftime(name, 16, "%Z", lt);
name[15] = 0;
TimeZoneInfo ret;
ret.name = name;
char bias_buf[16];
strftime(bias_buf, 16, "%z", lt);
int bias;
bias_buf[15] = 0;
sscanf(bias_buf, "%d", &bias);
// convert from ISO 8601 (1 minute=1, 1 hour=100) to minutes
int hour = (int)bias / 100;
int minutes = bias % 100;
if (bias < 0)
ret.bias = hour * 60 - minutes;
else
ret.bias = hour * 60 + minutes;
return ret;
}
void OS_Unix::delay_usec(uint32_t p_usec) const {
struct timespec rem = { static_cast<time_t>(p_usec / 1000000), (static_cast<long>(p_usec) % 1000000) * 1000 };
while (nanosleep(&rem, &rem) == EINTR) {
}
}
uint64_t OS_Unix::get_ticks_usec() const {
#if defined(__APPLE__)
uint64_t longtime = mach_absolute_time() * _clock_scale;
#else
// Unchecked return. Static analyzers might complain.
// If _setup_clock() succeeded, we assume clock_gettime() works.
struct timespec tv_now = { 0, 0 };
clock_gettime(GODOT_CLOCK, &tv_now);
uint64_t longtime = ((uint64_t)tv_now.tv_nsec / 1000L) + (uint64_t)tv_now.tv_sec * 1000000L;
#endif
longtime -= _clock_start;
return longtime;
}
Error OS_Unix::execute(const String &p_path, const List<String> &p_arguments, bool p_blocking, ProcessID *r_child_id, String *r_pipe, int *r_exitcode, bool read_stderr, Mutex *p_pipe_mutex) {
#ifdef __EMSCRIPTEN__
// Don't compile this code at all to avoid undefined references.
// Actual virtual call goes to OS_JavaScript.
ERR_FAIL_V(ERR_BUG);
#else
if (p_blocking && r_pipe) {
String argss;
argss = "\"" + p_path + "\"";
for (int i = 0; i < p_arguments.size(); ++i) {
argss += String(" \"") + p_arguments[i] + "\"";
}
if (read_stderr) {
argss += " 2>&1"; // Read stderr too
} else {
argss += " 2>/dev/null"; //silence stderr
}
FILE *f = popen(argss.utf8().get_data(), "r");
ERR_FAIL_COND_V_MSG(!f, ERR_CANT_OPEN, "Cannot pipe stream from process running with following arguments '" + argss + "'.");
char buf[65535];
while (fgets(buf, 65535, f)) {
if (p_pipe_mutex) {
p_pipe_mutex->lock();
}
(*r_pipe) += buf;
if (p_pipe_mutex) {
p_pipe_mutex->unlock();
}
}
int rv = pclose(f);
if (r_exitcode)
*r_exitcode = WEXITSTATUS(rv);
return OK;
}
pid_t pid = fork();
ERR_FAIL_COND_V(pid < 0, ERR_CANT_FORK);
if (pid == 0) {
// is child
if (!p_blocking) {
// For non blocking calls, create a new session-ID so parent won't wait for it.
// This ensures the process won't go zombie at end.
setsid();
}
auto len = p_arguments.size();
std::vector<CharString> cs;
cs.reserve(len + 1);
cs.push_back(p_path.utf8());
for (int i = 0; i < len; ++i)
cs.push_back(p_arguments[i].utf8());
std::vector<char *> args;
args.reserve(cs.size() + 1);
for (auto &&c : cs)
args.push_back((char *)c.get_data());
args.push_back(0);
execvp(p_path.utf8().get_data(), &args[0]);
// still alive? something failed..
fprintf(stderr, "**ERROR** OS_Unix::execute - Could not create child process while executing: %s\n", p_path.utf8().get_data());
abort();
}
if (p_blocking) {
int status;
waitpid(pid, &status, 0);
if (r_exitcode)
*r_exitcode = WEXITSTATUS(status);
} else {
if (r_child_id)
*r_child_id = pid;
}
return OK;
#endif
}
Error OS_Unix::kill(const ProcessID &p_pid) {
int ret = ::kill(p_pid, SIGKILL);
if (!ret) {
//avoid zombie process
int st;
::waitpid(p_pid, &st, 0);
}
return ret ? ERR_INVALID_PARAMETER : OK;
}
int OS_Unix::get_process_id() const {
return getpid();
};
bool OS_Unix::has_environment(const String &p_var) const {
return getenv(p_var.utf8().get_data()) != NULL;
}
String OS_Unix::get_locale() const {
if (!has_environment("LANG"))
return "en";
String locale = get_environment("LANG");
int tp = locale.find(".");
if (tp != -1)
locale = locale.substr(0, tp);
return locale;
}
Error OS_Unix::open_dynamic_library(const String p_path, void *&p_library_handle, bool p_also_set_library_path) {
String path = p_path;
if (FileAccess::exists(path) && path.is_rel_path()) {
// dlopen expects a slash, in this case a leading ./ for it to be interpreted as a relative path,
// otherwise it will end up searching various system directories for the lib instead and finally failing.
path = "./" + path;
}
if (!FileAccess::exists(path)) {
//this code exists so gdnative can load .so files from within the executable path
path = get_executable_path().get_base_dir().plus_file(p_path.get_file());
}
if (!FileAccess::exists(path)) {
//this code exists so gdnative can load .so files from a standard unix location
path = get_executable_path().get_base_dir().plus_file("../lib").plus_file(p_path.get_file());
}
p_library_handle = dlopen(path.utf8().get_data(), RTLD_NOW);
ERR_FAIL_COND_V_MSG(!p_library_handle, ERR_CANT_OPEN, "Can't open dynamic library: " + p_path + ". Error: " + dlerror());
return OK;
}
Error OS_Unix::close_dynamic_library(void *p_library_handle) {
if (dlclose(p_library_handle)) {
return FAILED;
}
return OK;
}
Error OS_Unix::get_dynamic_library_symbol_handle(void *p_library_handle, const String p_name, void *&p_symbol_handle, bool p_optional) {
const char *error;
dlerror(); // Clear existing errors
p_symbol_handle = dlsym(p_library_handle, p_name.utf8().get_data());
error = dlerror();
if (error != NULL) {
ERR_FAIL_COND_V_MSG(!p_optional, ERR_CANT_RESOLVE, "Can't resolve symbol " + p_name + ". Error: " + error + ".");
return ERR_CANT_RESOLVE;
}
return OK;
}
Error OS_Unix::set_cwd(const String &p_cwd) {
if (chdir(p_cwd.utf8().get_data()) != 0)
return ERR_CANT_OPEN;
return OK;
}
String OS_Unix::get_environment(const String &p_var) const {
if (getenv(p_var.utf8().get_data()))
return getenv(p_var.utf8().get_data());
return "";
}
bool OS_Unix::set_environment(const String &p_var, const String &p_value) const {
return setenv(p_var.utf8().get_data(), p_value.utf8().get_data(), /* overwrite: */ true) == 0;
}
int OS_Unix::get_processor_count() const {
return sysconf(_SC_NPROCESSORS_CONF);
}
String OS_Unix::get_user_data_dir() const {
String appname = get_safe_dir_name(ProjectSettings::get_singleton()->get("application/config/name"));
if (appname != "") {
bool use_custom_dir = ProjectSettings::get_singleton()->get("application/config/use_custom_user_dir");
if (use_custom_dir) {
String custom_dir = get_safe_dir_name(ProjectSettings::get_singleton()->get("application/config/custom_user_dir_name"), true);
if (custom_dir == "") {
custom_dir = appname;
}
return get_data_path().plus_file(custom_dir);
} else {
return get_data_path().plus_file(get_godot_dir_name()).plus_file("app_userdata").plus_file(appname);
}
}
return ProjectSettings::get_singleton()->get_resource_path();
}
String OS_Unix::get_executable_path() const {
#ifdef __linux__
//fix for running from a symlink
char buf[256];
memset(buf, 0, 256);
ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf));
String b;
if (len > 0) {
b.parse_utf8(buf, len);
}
if (b == "") {
WARN_PRINT("Couldn't get executable path from /proc/self/exe, using argv[0]");
return OS::get_executable_path();
}
return b;
#elif defined(__OpenBSD__)
char resolved_path[MAXPATHLEN];
realpath(OS::get_executable_path().utf8().get_data(), resolved_path);
return String(resolved_path);
#elif defined(__FreeBSD__)
int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
char buf[MAXPATHLEN];
size_t len = sizeof(buf);
if (sysctl(mib, 4, buf, &len, NULL, 0) != 0) {
WARN_PRINT("Couldn't get executable path from sysctl");
return OS::get_executable_path();
}
String b;
b.parse_utf8(buf);
return b;
#elif defined(__APPLE__)
char temp_path[1];
uint32_t buff_size = 1;
_NSGetExecutablePath(temp_path, &buff_size);
char *resolved_path = new char[buff_size + 1];
if (_NSGetExecutablePath(resolved_path, &buff_size) == 1)
WARN_PRINT("MAXPATHLEN is too small");
String path(resolved_path);
delete[] resolved_path;
return path;
#else
ERR_PRINT("Warning, don't know how to obtain executable path on this OS! Please override this function properly.");
return OS::get_executable_path();
#endif
}
void UnixTerminalLogger::log_error(const char *p_function, const char *p_file, int p_line, const char *p_code, const char *p_rationale, ErrorType p_type) {
if (!should_log(true)) {
return;
}
const char *err_details;
if (p_rationale && p_rationale[0])
err_details = p_rationale;
else
err_details = p_code;
switch (p_type) {
case ERR_WARNING:
logf_error("\E[1;33mWARNING: %s: \E[0m\E[1m%s\n", p_function, err_details);
logf_error("\E[0;33m At: %s:%i.\E[0m\n", p_file, p_line);
break;
case ERR_SCRIPT:
logf_error("\E[1;35mSCRIPT ERROR: %s: \E[0m\E[1m%s\n", p_function, err_details);
logf_error("\E[0;35m At: %s:%i.\E[0m\n", p_file, p_line);
break;
case ERR_SHADER:
logf_error("\E[1;36mSHADER ERROR: %s: \E[0m\E[1m%s\n", p_function, err_details);
logf_error("\E[0;36m At: %s:%i.\E[0m\n", p_file, p_line);
break;
case ERR_ERROR:
default:
logf_error("\E[1;31mERROR: %s: \E[0m\E[1m%s\n", p_function, err_details);
logf_error("\E[0;31m At: %s:%i.\E[0m\n", p_file, p_line);
break;
}
}
UnixTerminalLogger::~UnixTerminalLogger() {}
OS_Unix::OS_Unix() {
std::vector<Logger *> loggers;
loggers.push_back(memnew(UnixTerminalLogger));
_set_logger(memnew(CompositeLogger(loggers)));
}
#endif
|
#include "StdInc.h"
#include <ResourceCacheDeviceV2.h>
#include <mmsystem.h>
#include <mutex>
#include <optional>
#include <variant>
#include <tbb/concurrent_unordered_map.h>
#include <HttpClient.h>
#include <ResourceCache.h>
#include <ResourceManager.h>
#include <ICoreGameInit.h>
#include <StreamingEvents.h>
#include <SHA1.h>
#include <VFSError.h>
#include <pplawait.h>
#include <experimental/resumable>
#include <CoreConsole.h>
#include <Error.h>
#include <IteratorView.h>
extern std::unordered_multimap<std::string, std::pair<std::string, std::string>> g_referenceHashList;
namespace resources
{
size_t RcdBaseStream::GetLength()
{
return m_fetcher->GetLength(m_fileName);
}
bool RcdBaseStream::EnsureRead()
{
if (!m_parentDevice.GetRef() || m_parentHandle == INVALID_DEVICE_HANDLE)
{
try
{
auto task = m_fetcher->FetchEntry(m_fileName);
if (m_fetcher->IsBlocking())
{
task.wait();
}
else
{
if (!task.is_done())
{
return false;
}
}
m_metaData = task.get().metaData;
const auto& localPath = task.get().localPath;
m_parentDevice = vfs::GetDevice(localPath);
assert(m_parentDevice.GetRef());
m_parentHandle = OpenFile(localPath);
assert(m_parentHandle != INVALID_DEVICE_HANDLE);
}
catch (const RcdFetchFailedException& e)
{
m_fetcher->UnfetchEntry(m_fileName);
throw;
}
catch (const std::exception& e)
{
// propagate throw for nonblocking
if (!m_fetcher->IsBlocking())
{
m_fetcher->UnfetchEntry(m_fileName);
throw;
}
FatalError("Unable to ensure read in RCD: %s\n\nPlease report this issue, together with the information from 'Save information' down below on https://forum.fivem.net/.", e.what());
return false;
}
}
return true;
}
RcdStream::~RcdStream()
{
if (m_parentDevice.GetRef() && m_parentHandle != INVALID_DEVICE_HANDLE)
{
CloseFile();
m_parentHandle = INVALID_DEVICE_HANDLE;
}
}
size_t RcdStream::Read(void* outBuffer, size_t size)
{
try
{
if (!EnsureRead())
{
return 0;
}
return m_parentDevice->Read(m_parentHandle, outBuffer, size);
}
catch (const std::exception& e)
{
m_fetcher->PropagateError(e.what());
trace(__FUNCTION__ ": failing read for %s\n", e.what());
return -1;
}
}
size_t RcdStream::Seek(intptr_t off, int at)
{
try
{
if (!EnsureRead())
{
return -1;
}
return m_parentDevice->Seek(m_parentHandle, off, at);
}
catch (const std::exception& e)
{
m_fetcher->PropagateError(e.what());
trace(__FUNCTION__ ": failing seek for %s\n", e.what());
return -1;
}
}
void RcdStream::CloseFile()
{
m_parentDevice->Close(m_parentHandle);
}
vfs::Device::THandle RcdStream::OpenFile(const std::string& localPath)
{
return m_parentDevice->Open(localPath, true);
}
RcdBulkStream::~RcdBulkStream()
{
if (m_parentDevice.GetRef() && m_parentHandle != INVALID_DEVICE_HANDLE)
{
CloseFile();
m_parentHandle = INVALID_DEVICE_HANDLE;
}
}
size_t RcdBulkStream::ReadBulk(uint64_t ptr, void* outBuffer, size_t size)
{
if (size == 0xFFFFFFFC)
{
return m_fetcher->ExistsOnDisk(m_fileName) ? 2048 : 0;
}
try
{
if (!EnsureRead())
{
return 0;
}
if (size == 0xFFFFFFFE || size == 0xFFFFFFFD || size == 0xFFFFFFFC)
{
return 2048;
}
return m_parentDevice->ReadBulk(m_parentHandle, m_parentPtr + ptr, outBuffer, size);
}
catch (const std::exception& e)
{
m_fetcher->PropagateError(e.what());
trace(__FUNCTION__ ": failing read for %s\n", e.what());
return -1;
}
}
vfs::Device::THandle RcdBulkStream::OpenFile(const std::string& localPath)
{
return m_parentDevice->OpenBulk(localPath, &m_parentPtr);
}
void RcdBulkStream::CloseFile()
{
m_parentDevice->CloseBulk(m_parentHandle);
}
std::shared_ptr<RcdStream> ResourceCacheDeviceV2::OpenStream(const std::string& fileName, bool readOnly)
{
if (!readOnly)
{
return {};
}
if (!GetEntryForFileName(fileName))
{
return {};
}
return std::make_shared<RcdStream>(static_cast<RcdFetcher*>(this), fileName);
}
std::shared_ptr<RcdStream> ResourceCacheDeviceV2::CreateStream(const std::string& fileName)
{
return {};
}
std::shared_ptr<RcdBulkStream> ResourceCacheDeviceV2::OpenBulkStream(const std::string& fileName, uint64_t* ptr)
{
if (!GetEntryForFileName(fileName))
{
return {};
}
*ptr = 0;
return std::make_shared<RcdBulkStream>(static_cast<RcdFetcher*>(this), fileName);
}
bool ResourceCacheDeviceV2::ExistsOnDisk(const std::string& fileName)
{
auto entry = GetEntryForFileName(fileName);
if (!entry)
{
return false;
}
auto cacheEntry = m_cache->GetEntryFor(*entry);
if (!cacheEntry)
{
return false;
}
const std::string& localPath = cacheEntry->GetLocalPath();
auto device = vfs::GetDevice(localPath);
if (!device.GetRef())
{
return false;
}
if (device->GetAttributes(localPath) == -1)
{
return false;
}
return true;
}
concurrency::task<RcdFetchResult> ResourceCacheDeviceV2::FetchEntry(const std::string& fileName)
{
auto entry = GetEntryForFileName(fileName);
if (!entry)
{
return {};
}
std::unique_lock<std::mutex> lock(ms_lock);
const auto& e = entry->get();
const auto& referenceHash = e.referenceHash;
auto it = ms_entries.find(referenceHash);
if (it == ms_entries.end() || !it->second)
{
auto retTask = concurrency::create_task(std::bind(&ResourceCacheDeviceV2::DoFetch, this, *entry));
if (it != ms_entries.end())
{
it->second = std::move(retTask);
}
else
{
it = ms_entries.emplace(referenceHash, std::move(retTask)).first;
}
}
return *it->second;
}
void ResourceCacheDeviceV2::UnfetchEntry(const std::string& fileName)
{
auto entry = GetEntryForFileName(fileName);
if (entry)
{
std::unique_lock<std::mutex> lock(ms_lock);
const auto& e = entry->get();
const auto& referenceHash = e.referenceHash;
auto it = ms_entries.find(referenceHash);
if (it != ms_entries.end())
{
it->second = {};
}
}
}
concurrency::task<RcdFetchResult> ResourceCacheDeviceV2::DoFetch(const ResourceCacheEntryList::Entry& entryRef)
{
auto entry = entryRef;
std::optional<RcdFetchResult> result;
auto fillResult = [&result](const ResourceCache::Entry& entry)
{
result = {
entry.GetLocalPath(),
entry.GetMetaData()
};
};
int tries = 0;
std::string lastError = "Unknown error.";
bool downloaded = false;
do
{
auto cacheEntry = m_cache->GetEntryFor(entry);
if (cacheEntry)
{
const std::string& localPath = cacheEntry->GetLocalPath();
auto localStream = GetVerificationStream(entry, *cacheEntry);
if (localStream.GetRef())
{
std::array<uint8_t, 8192> data;
sha1nfo sha1;
size_t numRead;
// initialize context
sha1_init(&sha1);
// read from the stream
while ((numRead = localStream->Read(data.data(), data.size())) > 0)
{
if (numRead == -1)
{
break;
}
sha1_write(&sha1, reinterpret_cast<char*>(&data[0]), numRead);
}
// get the hash result and convert it to a string
uint8_t* hash = sha1_result(&sha1);
auto hashString = fmt::sprintf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19]);
if (hashString == entry.referenceHash)
{
fillResult(*cacheEntry);
}
else
{
trace(__FUNCTION__ ": %s hash %s does not match %s - redownloading\n",
entry.basename,
hashString,
entry.referenceHash);
lastError = fmt::sprintf("%s hash %s does not match %s",
entry.basename,
hashString,
entry.referenceHash);
}
}
}
else if (downloaded)
{
lastError = "Failed to add entry to local storage";
}
if (!result)
{
using FetchResultT = std::tuple<bool, std::variant<size_t, std::string>>;
concurrency::task_completion_event<FetchResultT> tce;
std::string outFileName = fmt::sprintf("%s/unconfirmed/%s_%08x", m_cache->GetCachePath(), "cache", HashString(entry.referenceHash.c_str()));
// log the request starting
uint32_t initTime = timeGetTime();
console::DPrintf("citizen:resources:client", __FUNCTION__ " downloading %s (hash %s) from %s\n",
entry.basename,
entry.referenceHash.empty() ? "[direct]" : entry.referenceHash.c_str(),
entry.remoteUrl);
HttpRequestOptions options;
options.progressCallback = [this, entry](const ProgressInfo& info)
{
if (info.downloadTotal != 0)
{
fx::OnCacheDownloadStatus(fmt::sprintf("%s%s/%s", m_pathPrefix, entry.resourceName, entry.basename), info.downloadNow, info.downloadTotal);
}
};
//options.weight = GetWeightForFileName(handleData->entry.basename);
std::string connectionToken;
if (Instance<ICoreGameInit>::Get()->GetData("connectionToken", &connectionToken))
{
options.headers["X-CitizenFX-Token"] = connectionToken;
}
options.addErrorBody = true;
auto req = Instance<HttpClient>::Get()->DoFileGetRequest(entry.remoteUrl, vfs::GetDevice(outFileName), outFileName, options, [tce, outFileName](bool result, const char* errorData, size_t outSize)
{
if (result)
{
auto device = vfs::GetDevice(outFileName);
outSize = device->GetLength(outFileName);
tce.set({ true, outSize });
}
else
{
tce.set({ false, std::string(errorData, outSize) });
}
});
auto fetchResult = co_await concurrency::task<FetchResultT>{tce};
if (std::get<bool>(fetchResult))
{
auto size = std::get<size_t>(std::get<1>(fetchResult));
// add the file to the resource cache
std::map<std::string, std::string> metaData;
metaData["filename"] = entry.basename;
metaData["resource"] = entry.resourceName;
metaData["from"] = entry.remoteUrl;
metaData["reference"] = entry.referenceHash;
AddEntryToCache(outFileName, metaData, entry);
downloaded = true;
console::DPrintf("citizen:resources:client", "ResourceCacheDevice: downloaded %s in %d msec (size %d)\n", entry.basename, (timeGetTime() - initTime), size);
}
else
{
auto error = std::get<std::string>(std::get<1>(fetchResult));
lastError = fmt::sprintf("Failure downloading %s: %s", entry.basename, error);
trace("^3ResourceCacheDevice reporting failure downloading %s: %s\n", entry.basename, error);
}
}
if (tries >= 4)
{
throw RcdFetchFailedException(lastError);
}
tries++;
} while (!result);
co_return *result;
}
fwRefContainer<vfs::Stream> ResourceCacheDeviceV2::GetVerificationStream(const ResourceCacheEntryList::Entry& entry, const ResourceCache::Entry& cacheEntry)
{
return vfs::OpenRead(cacheEntry.GetLocalPath());
}
void ResourceCacheDeviceV2::AddEntryToCache(const std::string& outFileName, std::map<std::string, std::string>& metaData, const ResourceCacheEntryList::Entry& entry)
{
m_cache->AddEntry(outFileName, metaData);
}
std::optional<std::reference_wrapper<const ResourceCacheEntryList::Entry>> ResourceCacheDeviceV2::GetEntryForFileName(std::string_view fileName)
{
// strip the path prefix
std::string_view relativeName = fileName.substr(m_pathPrefix.length());
// relative paths are {resource}/{filepath}
int slashOffset = relativeName.find_first_of('/');
std::string_view resourceName = relativeName.substr(0, slashOffset);
std::string_view itemName = relativeName.substr(slashOffset + 1);
// get the relative resource
fx::ResourceManager* resourceManager = Instance<fx::ResourceManager>::Get();
fwRefContainer<fx::Resource> resource = resourceManager->GetResource(std::string(resourceName));
// TODO: handle this some better way
if (!resource.GetRef())
{
return {};
}
// get the entry list component
fwRefContainer<ResourceCacheEntryList> entryList = resource->GetComponent<ResourceCacheEntryList>();
// get the entry from the component
auto entry = entryList->GetEntry(itemName);
if (!entry)
{
return {};
}
return entry;
}
#define VFS_GET_RAGE_PAGE_FLAGS 0x20001
struct ResourceFlags
{
uint32_t flag1;
uint32_t flag2;
};
struct GetRagePageFlagsExtension
{
const char* fileName; // in
int version;
ResourceFlags flags; // out
};
#define VFS_GET_RCD_DEBUG_INFO 0x30001
struct GetRcdDebugInfoExtension
{
const char* fileName; // in
std::string outData; // out
};
bool ResourceCacheDeviceV2::ExtensionCtl(int controlIdx, void* controlData, size_t controlSize)
{
if (controlIdx == VFS_GET_RAGE_PAGE_FLAGS)
{
GetRagePageFlagsExtension* data = (GetRagePageFlagsExtension*)controlData;
auto entry = GetEntryForFileName(data->fileName);
if (entry)
{
auto extData = entry->get().extData;
data->version = atoi(extData["rscVersion"].c_str());
data->flags.flag1 = strtoul(extData["rscPagesVirtual"].c_str(), nullptr, 10);
data->flags.flag2 = strtoul(extData["rscPagesPhysical"].c_str(), nullptr, 10);
return true;
}
}
else if (controlIdx == VFS_GET_DEVICE_LAST_ERROR)
{
vfs::GetLastErrorExtension* data = (vfs::GetLastErrorExtension*)controlData;
data->outError = m_lastError;
return true;
}
else if (controlIdx == VFS_GET_RCD_DEBUG_INFO)
{
GetRcdDebugInfoExtension* data = (GetRcdDebugInfoExtension*)controlData;
auto entry = GetEntryForFileName(data->fileName);
if (entry)
{
auto extData = entry->get().extData;
std::string diskHash = "<unknown>";
auto cacheEntry = m_cache->GetEntryFor(*entry);
if (cacheEntry)
{
auto localStream = GetVerificationStream(*entry, *cacheEntry);
if (localStream.GetRef())
{
std::array<uint8_t, 8192> data;
sha1nfo sha1;
size_t numRead;
// initialize context
sha1_init(&sha1);
// read from the stream
while ((numRead = localStream->Read(data.data(), data.size())) > 0)
{
if (numRead == -1)
{
break;
}
sha1_write(&sha1, reinterpret_cast<char*>(&data[0]), numRead);
}
// get the hash result and convert it to a string
uint8_t* hash = sha1_result(&sha1);
diskHash = fmt::sprintf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], hash[8], hash[9],
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15], hash[16], hash[17], hash[18], hash[19]);
}
}
data->outData = fmt::sprintf("RSC version: %d\nRSC page flags: virt %08x/phys %08x\nResource name: %s\nReference hash: %s\nDisk hash: %s\nFile size: %d\n",
atoi(extData["rscVersion"].c_str()),
strtoul(extData["rscPagesVirtual"].c_str(), nullptr, 10),
strtoul(extData["rscPagesPhysical"].c_str(), nullptr, 10),
entry->get().resourceName,
entry->get().referenceHash,
diskHash,
entry->get().size);
data->outData += "Resources for hash:\n";
for (auto& views : fx::GetIteratorView(g_referenceHashList.equal_range(entry->get().referenceHash)))
{
data->outData += fmt::sprintf("-> %s/%s\n", views.second.first, views.second.second);
}
return true;
}
}
return false;
}
size_t ResourceCacheDeviceV2::GetLength(const std::string& fileName)
{
auto entry = GetEntryForFileName(fileName);
if (entry)
{
return entry->get().size;
}
return -1;
}
ResourceCacheDeviceV2::ResourceCacheDeviceV2(const std::shared_ptr<ResourceCache>& cache, bool blocking)
: m_cache(cache), m_blocking(blocking)
{
}
std::mutex ResourceCacheDeviceV2::ms_lock;
tbb::concurrent_unordered_map<std::string, std::optional<concurrency::task<RcdFetchResult>>> ResourceCacheDeviceV2::ms_entries;
}
void MountResourceCacheDeviceV2(std::shared_ptr<ResourceCache> cache)
{
vfs::Mount(new resources::ResourceCacheDeviceV2(cache, true), "cache:/");
vfs::Mount(new resources::ResourceCacheDeviceV2(cache, false), "cache_nb:/");
}
|
// $Id: Log_Msg_Test.cpp 90802 2010-06-23 14:19:07Z vzykov $
// ============================================================================
//
// = LIBRARY
// tests
//
// = FILENAME
// Log_Msg_Test.cpp
//
// = DESCRIPTION
// This program tests the <ACE_Log_Msg> class in various ways and
// also illustrates many of the features of the <ACE_Log_Msg> For
// instance, this program tests the <ACE_Log_Msg> abstraction wrt
// writing to stderr and to a file. It also tests writing to user
// defined callback objects.
//
// = AUTHOR
// Douglas C. Schmidt <schmidt@cs.wustl.edu>
//
// ============================================================================
#include "test_config.h"
// FUZZ: disable check_for_streams_include
#include "ace/streams.h"
#include "ace/FILE_Connector.h"
#include "ace/Auto_Ptr.h"
#include "ace/Log_Msg_Callback.h"
#include "ace/Log_Record.h"
#include "ace/OS_NS_fcntl.h"
#include "ace/OS_NS_string.h"
#include "ace/OS_NS_unistd.h"
#include "ace/OS_Memory.h"
#include "ace/OS_NS_sys_time.h"
#include "ace/OS_NS_time.h"
#include "ace/Time_Value.h"
ACE_RCSID(tests, Log_Msg_Test, "$Id: Log_Msg_Test.cpp 90802 2010-06-23 14:19:07Z vzykov $")
static void
cleanup (void)
{
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("cleanup hook (%P)!\n")));
}
static void
cause_error (void)
{
errno = EWOULDBLOCK;
ACE_ERROR ((LM_DEBUG,
ACE_TEXT ("would block\n")));
}
class Logger : public ACE_Log_Msg_Callback
{
public:
Logger (bool be_recursive = true);
// Constructor sets whether we're testing "recursive" callback
// logging!
void log (ACE_Log_Record &log_record);
// Logging callback
void verbose (bool be_verbose);
private:
bool verbose_logging_;
// Flag for testing verbose logging.
bool recursive_;
// Flag for testing recursive callback logging.
};
void
Logger::verbose (bool be_verbose)
{
this->verbose_logging_ = be_verbose;
}
Logger::Logger (bool be_recursive)
: recursive_ (be_recursive)
{
}
void
Logger::log (ACE_Log_Record &log_record)
{
bool use_log_msg = false;
if (this->recursive_)
{
this->recursive_ = false;
use_log_msg = true;
}
if (!this->verbose_logging_)
{
if (use_log_msg)
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("Logger::log->%s\n"),
log_record.msg_data ()));
#if !defined (ACE_LACKS_IOSTREAM_TOTALLY)
else
*ace_file_stream::instance ()->output_file ()
<< "Recursive Logger callback = "
<< log_record.msg_data ()
<< endl;
#endif /* ACE_LACKS_IOSTREAM_TOTALLY */
}
else
{
ACE_TCHAR verbose_msg[ACE_Log_Record::MAXVERBOSELOGMSGLEN];
int result = log_record.format_msg (ACE_LOG_MSG->local_host (),
ACE_LOG_MSG->flags (),
verbose_msg);
if (result == 0)
{
if (use_log_msg)
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("Logger::log->%s\n"),
verbose_msg));
#if !defined (ACE_LACKS_IOSTREAM_TOTALLY)
else
*ace_file_stream::instance ()->output_file ()
<< "Recursive Logger callback = "
<< log_record.msg_data ()
<< endl;
#endif /* ACE_LACKS_IOSTREAM_TOTALLY */
}
}
// Cleanup on the way out.
if (use_log_msg)
this->recursive_ = true;
}
static void
test_callbacks (void)
{
// This message should show up in stderr.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) first message\n")));
ACE_LOG_MSG->clr_flags (ACE_Log_Msg::OSTREAM);
// This message should not show up anywhere.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) second message\n")));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::MSG_CALLBACK);
// This message should not show up anywhere since no callback object
// has been specified.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) third message\n")));
// Create a callback object and make it "verbose".
Logger logger;
logger.verbose (1);
// Set the callback object.
ACE_LOG_MSG->msg_callback (&logger);
// This message should show up via the Logger callback.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) forth message\n")));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::VERBOSE_LITE);
// This message should show up via the Logger callback (somewhat
// verbosely).
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) fifth message\n")));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::VERBOSE);
// This message should show up via the Logger callback (really
// verbosely).
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) sixth message\n")));
logger.verbose (0);
// This message should show up via the Logger callback (not
// verbosely).
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) seventh message\n")));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::OSTREAM);
// This message should show up in stderr and the Logger callback.
// The one from the Logger callback will not be verbose, but the one
// from stderr should be verbose.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%t) eighth message\n")));
ACE_LOG_MSG->msg_callback (0);
}
static void
test_log_msg_features (const ACE_TCHAR *program)
{
// Note that the default behavior is to log to STDERR...
int counter = 1 ;
if (ACE_LOG_MSG->open (program) == -1)
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("cannot open logger!!!\n")));
cause_error ();
// Check to see what happened.
if (ACE_LOG_MSG->op_status () == -1
&& ACE_LOG_MSG->errnum () == EWOULDBLOCK)
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("op_status and errnum work!\n")));
else
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("op_status and errnum failed!\n")));
const char *badname = "badname";
// We use the DEBUG messages instead of error messages. This is to
// help the scripts. If we print out error messages the scripts
// start catching them as errors.
if (ACE_OS::open (badname,
O_RDONLY) == ACE_INVALID_HANDLE)
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("%n: (%x), can't open %C%r\n"),
10000,
badname,
cleanup));
// Try a log operation that would overflow the logging buffer if not
// properly guarded.
ACE_TCHAR big[ACE_Log_Record::MAXLOGMSGLEN + 1];
size_t i = 0;
static const ACE_TCHAR alphabet[] = ACE_TEXT ("abcdefghijklmnopqrstuvwxyz");
size_t j = ACE_OS::strlen (alphabet);
while (i < ACE_Log_Record::MAXLOGMSGLEN)
{
size_t const index = i++;
big[index] = alphabet[i % j];
}
big[ACE_Log_Record::MAXLOGMSGLEN] = ACE_TEXT ('\0');
ACE_DEBUG ((LM_INFO, ACE_TEXT ("This is too big: %s\n"), big));
// Exercise many different combinations of OSTREAM.
double f = 3.1416 * counter++;
int n = 10000;
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("%10f, %*s%s = %d\n"),
f,
8,
ACE_TEXT (""),
ACE_TEXT ("hello"),
n));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::OSTREAM);
ACE_LOG_MSG->msg_ostream (ace_file_stream::instance ()->output_file ());
f = 3.1416 * counter;
n = 10000 * counter++;
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("%10f, %*s%s = %d\n"),
f,
8,
ACE_TEXT (""),
ACE_TEXT ("world"),
n));
ACE_LOG_MSG->clr_flags (ACE_Log_Msg::OSTREAM);
// The next two messages shouldn't print.
f = 3.1416 * counter;
n = 10000 * counter++;
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("%10f, %*s%s = %d\n"),
f,
8,
ACE_TEXT (""),
ACE_TEXT ("world"),
n));
f = 3.1416 * counter;
n = 10000 * counter++;
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("%10f, %*s%s = %d\n"),
f,
8,
ACE_TEXT (""),
ACE_TEXT ("world"),
n));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::OSTREAM);
f = 3.1416 * counter;
n = 10000 * counter++;
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("%10f, %*s%s = %d\n"),
f,
8,
ACE_TEXT (""),
ACE_TEXT ("world"),
n));
static int array[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048};
// Print out the binary bytes of the array in hex form.
ACE_LOG_MSG->log_hexdump (LM_DEBUG,
(char *) array,
sizeof array);
// Disable the LM_DEBUG and LM_INFO messages.
u_long priority_mask =
ACE_LOG_MSG->priority_mask (ACE_Log_Msg::PROCESS);
ACE_CLR_BITS (priority_mask,
LM_DEBUG | LM_INFO);
ACE_LOG_MSG->priority_mask (priority_mask,
ACE_Log_Msg::PROCESS);
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("This LM_INFO message should not print!\n")));
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("This LM_DEBUG message should not print!\n")));
ACE_SET_BITS (priority_mask,
LM_INFO);
ACE_LOG_MSG->priority_mask (priority_mask,
ACE_Log_Msg::PROCESS);
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("This LM_INFO message should print!\n")));
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("This LM_DEBUG message should not print!\n")));
ACE_CLR_BITS (priority_mask, LM_INFO);
ACE_LOG_MSG->priority_mask (priority_mask,
ACE_Log_Msg::PROCESS);
ACE_DEBUG ((LM_INFO,
ACE_TEXT ("This LM_INFO message should not print!\n")));
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("This LM_DEBUG message should not print!\n")));
}
static int
test_ostream (void)
{
// This message should show up in the log file.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("first message\n")));
ACE_LOG_MSG->clr_flags (ACE_Log_Msg::OSTREAM);
// This message should not show up anywhere.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("second message\n")));
ACE_LOG_MSG->set_flags (ACE_Log_Msg::OSTREAM);
#if !defined (ACE_LACKS_IOSTREAM_TOTALLY)
// Create a persistent store.
const ACE_TCHAR *filename = ACE_TEXT ("output");
ofstream myostream (ACE_TEXT_ALWAYS_CHAR (filename), ios::out | ios::trunc);
// Check for errors.
if (myostream.bad ())
return -1;
OFSTREAM *old_stream = ace_file_stream::instance ()->output_file ();
// Set the ostream.
ACE_LOG_MSG->msg_ostream (&myostream);
// This message should show up in the ostream.
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("fourth message\n")));
// Set the ostream back to the test's log file.
ACE_LOG_MSG->msg_ostream (old_stream);
// Now close the ostream file and check its contents.
myostream.close ();
ACE_FILE_Connector connector;
ACE_FILE_IO file;
ACE_FILE_Addr file_addr (filename);
// Open up the file.
if (connector.connect (file, file_addr) == -1)
{
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("connect failed for %p\n"),
filename),
1);
}
#if !defined (ACE_VXWORKS) && !defined (ACE_HAS_PHARLAP) || (defined(ACE_VXWORKS) && (ACE_VXWORKS > 0x680))
# define TEST_CAN_UNLINK_IN_ADVANCE
#endif
#if defined (TEST_CAN_UNLINK_IN_ADVANCE)
// Unlink this file right away so that it is automatically removed
// when the process exits.Ignore error returns in case this operation
// is not supported.
ACE_OS::unlink(filename);
#endif
ACE_FILE_Info info;
if (file.get_info (info) == -1)
{
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("get_info failed on %p\n"),
filename),
-1);
}
// Allocate the input buffer
char *buffer = 0;
ACE_NEW_RETURN (buffer,
char[info.size_ + 1],
-1);
// Make sure <buffer> is released automagically.
ACE_Auto_Basic_Array_Ptr<char> b (buffer);
// Read the file into the buffer.
ssize_t size = file.recv (buffer,
info.size_);
if (size != info.size_)
{
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("Read %d bytes, rather than expected %d bytes\n"),
size,
info.size_),
-1);
}
// Make sure to NUL-terminate this turkey!
buffer[size] = '\0';
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("%C"),
buffer));
#if !defined (TEST_CAN_UNLINK_IN_ADVANCE)
file.close ();
if (file.unlink () == -1)
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("unlink failed for %p\n"),
file_addr.get_path_name ()),
1);
#endif
#endif /* ACE_LACKS_IOSTREAM_TOTALLY */
// This message should show up in stderr and the ostream (without
// ACE_LACKS_IOSTREAM_TOTALLY).
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("fifth message\n")));
return 0;
}
// For testing the format specifiers, a class is defined as a callback
// mechanism. It will get the formatted messages and check them for
// correctness. The test_format_specs() function will set the first
// few characters to say which test is being run, and the Log_Spec_Verify
// class will use that to decide how to verify the results.
class Log_Spec_Verify : public ACE_Log_Msg_Callback
{
public:
Log_Spec_Verify (bool be_recursive = true) : fail_ (0), tests_ (0), recursive_ (be_recursive) {};
void log (ACE_Log_Record &log_record);
// Logging callback
int result ();
private:
int fail_;
// Count how many tests failed.
int tests_;
// Count how many tests we run
bool recursive_;
};
void
Log_Spec_Verify::log (ACE_Log_Record &log_record)
{
bool use_log_msg = false;
if (this->recursive_)
{
this->recursive_ = false;
use_log_msg = true;
}
if (!use_log_msg)
{
#if !defined (ACE_LACKS_IOSTREAM_TOTALLY)
*ace_file_stream::instance ()->output_file ()
<< "Logger callback = "
<< log_record.msg_data ()
<< endl;
#endif /* ACE_LACKS_IOSTREAM_TOTALLY */
}
else
{
const ACE_TCHAR *b = log_record.msg_data ();
const ACE_TCHAR *expect = 0;
++this->tests_;
if (ACE_OS::strncmp (b, ACE_TEXT ("l1:"), 3) == 0)
{
expect = ACE_TEXT ("42");
b += 3;
}
else if (ACE_OS::strncmp (b, ACE_TEXT ("l2:"), 3) == 0)
{
expect = ACE_TEXT (" 42");
b += 3;
}
else if (ACE_OS::strncmp (b, ACE_TEXT ("l3N1:"), 4) == 0)
{
expect = ACE_TEXT ("0042,Log_Msg");
b += 5;
}
else if (ACE_OS::strncmp (b, ACE_TEXT ("l4:"), 3) == 0)
{
b += 3;
// Check if we have a string, exact length could vary
if (b != log_record.msg_data () && ACE_OS::strlen (b) < 15)
{
ACE_ERROR ((LM_ERROR, ACE_TEXT ("Test %s failed; expected %d\n"),
log_record.msg_data (), ACE_OS::strlen (b)));
++this->fail_;
}
}
else if (ACE_OS::strncmp (b, ACE_TEXT ("l5:"), 3) == 0)
{
b += 3;
switch (log_record.type())
{
case (LM_SHUTDOWN): expect = ACE_TEXT("S"); break;
case (LM_TRACE): expect = ACE_TEXT("T"); break;
case (LM_DEBUG): expect = ACE_TEXT("D"); break;
case (LM_INFO): expect = ACE_TEXT("I"); break;
case (LM_NOTICE): expect = ACE_TEXT("N"); break;
case (LM_WARNING): expect = ACE_TEXT("W"); break;
case (LM_STARTUP): expect = ACE_TEXT("U"); break;
case (LM_ERROR): expect = ACE_TEXT("E"); break;
case (LM_CRITICAL): expect = ACE_TEXT("C"); break;
case (LM_ALERT): expect = ACE_TEXT("A"); break;
case (LM_EMERGENCY): expect = ACE_TEXT("!"); break;
default: expect = ACE_TEXT("?"); break;
}
}
else
{
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("Log_Spec_Verify, unrecognized test: %s\n"),
b));
++this->fail_;
}
if (b != log_record.msg_data () && expect && ACE_OS::strcmp (b, expect) != 0)
{
ACE_ERROR ((LM_ERROR, ACE_TEXT ("Test %s failed; expected %s\n"),
log_record.msg_data (), expect));
++this->fail_;
}
}
// Cleanup on the way out.
if (use_log_msg)
this->recursive_ = true;
}
int
Log_Spec_Verify::result (void)
{
if (this->fail_ == 0)
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("All logging specifier tests passed.\n")));
else
ACE_ERROR ((LM_ERROR, ACE_TEXT ("%d logging specifier tests failed!\n"),
this->fail_));
if (this->tests_ != 15)
{
ACE_ERROR ((LM_ERROR, ACE_TEXT ("Expected number of tests run is %d, not 15!\n"),
this->tests_));
++this->fail_;
}
return this->fail_;
}
static int
test_format_specs (void)
{
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("l1:%l\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("l2:%5l\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("l3N1:%0*l,%.7N\n"), 4));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%*ISTART INDENTING %{\n"), 4));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%IONE%{\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%ITWO%{\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%ITHREE\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%}%ITWO\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%}%IONE\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%}%IENDINDENTING\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%W\n"), ACE_TEXT_WIDE ("My string test\n")));
ACE_TCHAR* nill_string = 0;
char* char_nill_string = 0;
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%W\n"), nill_string));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%s\n"), nill_string));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%C\n"), char_nill_string));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%m %p\n"), nill_string));
errno = ENOENT;
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%m %p\n"), ACE_TEXT("perror")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%S\n"), SIGINT));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%S\n"), ACE_NSIG));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%D\n")));
ACE_Time_Value tv = ACE_OS::gettimeofday ();
tv += ACE_Time_Value (25*60*60); // + 25 hours
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%#D\n"), &tv));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%T\n")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%#T\n"), &tv));
Log_Spec_Verify verifier;
ACE_LOG_MSG->msg_callback (&verifier);
ACE_LOG_MSG->clr_flags (ACE_Log_Msg::VERBOSE_LITE);
ACE_LOG_MSG->clr_flags (ACE_Log_Msg::VERBOSE);
ACE_LOG_MSG->linenum (42);
ACE_LOG_MSG->file ("Log_Msg_Test.cpp");
ACE_LOG_MSG->log (LM_DEBUG, ACE_TEXT ("l1:%l"));
ACE_LOG_MSG->log (LM_DEBUG, ACE_TEXT ("l2:%5l"));
ACE_LOG_MSG->log (LM_DEBUG, ACE_TEXT ("l3N1:%0*l,%.7N"), 4);
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("l4:%T")));
ACE_LOG_MSG->priority_mask (LM_SHUTDOWN |
LM_TRACE |
LM_DEBUG |
LM_INFO |
LM_NOTICE |
LM_WARNING |
LM_STARTUP |
LM_ERROR |
LM_CRITICAL |
LM_ALERT |
LM_EMERGENCY,
ACE_Log_Msg::PROCESS);
ACE_DEBUG ((LM_SHUTDOWN, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_TRACE, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_INFO, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_NOTICE, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_WARNING, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_STARTUP, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_ERROR, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_CRITICAL, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_ALERT, ACE_TEXT ("l5:%.1M")));
ACE_DEBUG ((LM_EMERGENCY, ACE_TEXT ("l5:%.1M")));
ACE_LOG_MSG->msg_ostream (ace_file_stream::instance ()->output_file ());
ACE_LOG_MSG->msg_callback (0);
ACE_LOG_MSG->set_flags (ACE_Log_Msg::OSTREAM);
ACE_LOG_MSG->set_flags (ACE_Log_Msg::VERBOSE_LITE);
return verifier.result ();
}
// Main function.
int
run_main (int argc, ACE_TCHAR *argv[])
{
ACE_START_TEST (ACE_TEXT ("Log_Msg_Test"));
int status = 0;
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("**** running ostream test\n")));
// Test the <ACE_Log_Msg> abstraction wrt writing to stderr and to a
// file.
test_ostream ();
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("%M **** running callback test\n")));
// Test the <ACE_Log_Msg> callback mechanism.
test_callbacks ();
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("**** running features test\n")));
// Test various features of the <ACE_Log_Msg>.
test_log_msg_features ((argc > 0 ? argv[0] : ACE_TEXT ("program")));
// Test the format specifiers
// Restore this mask so diags and the shutdown message will print correctly!
ACE_LOG_MSG->priority_mask (ACE_LOG_MSG->priority_mask () | LM_DEBUG | LM_ERROR,
ACE_Log_Msg::PROCESS);
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("**** running format specifiers test\n")));
if (status += test_format_specs ())
{
ACE_ERROR ((LM_ERROR, ACE_TEXT ("logging specifier tests failed!\n")));
status = 1;
}
ACE_END_TEST;
return status;
}
|
// stdafx.cpp : source file that includes just the standard includes
// Lesson 4.pch will be the pre-compiled header
// stdafx.obj will contain the pre-compiled type information
#include "stdafx.h"
|
/**
* The MIT License (MIT)
*
* Copyright (c) 2013-2020 Winlin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <srs_kernel_buffer.hpp>
using namespace std;
#include <srs_kernel_log.hpp>
#include <srs_kernel_error.hpp>
#include <srs_kernel_utility.hpp>
ISrsEncoder::ISrsEncoder()
{
}
ISrsEncoder::~ISrsEncoder()
{
}
ISrsCodec::ISrsCodec()
{
}
ISrsCodec::~ISrsCodec()
{
}
SrsBuffer::SrsBuffer(char* b, int nn)
{
p = bytes = b;
nb_bytes = nn;
}
SrsBuffer::~SrsBuffer()
{
}
SrsBuffer* SrsBuffer::copy()
{
SrsBuffer* cp = new SrsBuffer(bytes, nb_bytes);
cp->p = p;
return cp;
}
char* SrsBuffer::data()
{
return bytes;
}
char* SrsBuffer::head()
{
return p;
}
int SrsBuffer::size()
{
return nb_bytes;
}
void SrsBuffer::set_size(int v)
{
nb_bytes = v;
}
int SrsBuffer::pos()
{
return (int)(p - bytes);
}
int SrsBuffer::left()
{
return nb_bytes - (int)(p - bytes);
}
bool SrsBuffer::empty()
{
return !bytes || (p >= bytes + nb_bytes);
}
bool SrsBuffer::require(int required_size)
{
srs_assert(required_size >= 0);
return required_size <= nb_bytes - (p - bytes);
}
void SrsBuffer::skip(int size)
{
srs_assert(p);
srs_assert(p + size >= bytes);
srs_assert(p + size <= bytes + nb_bytes);
p += size;
}
int8_t SrsBuffer::read_1bytes()
{
srs_assert(require(1));
return (int8_t)*p++;
}
int16_t SrsBuffer::read_2bytes()
{
srs_assert(require(2));
int16_t value;
char* pp = (char*)&value;
pp[1] = *p++;
pp[0] = *p++;
return value;
}
int16_t SrsBuffer::read_le2bytes()
{
srs_assert(require(2));
int16_t value;
char* pp = (char*)&value;
pp[0] = *p++;
pp[1] = *p++;
return value;
}
int32_t SrsBuffer::read_3bytes()
{
srs_assert(require(3));
int32_t value = 0x00;
char* pp = (char*)&value;
pp[2] = *p++;
pp[1] = *p++;
pp[0] = *p++;
return value;
}
int32_t SrsBuffer::read_le3bytes()
{
srs_assert(require(3));
int32_t value = 0x00;
char* pp = (char*)&value;
pp[0] = *p++;
pp[1] = *p++;
pp[2] = *p++;
return value;
}
int32_t SrsBuffer::read_4bytes()
{
srs_assert(require(4));
int32_t value;
char* pp = (char*)&value;
pp[3] = *p++;
pp[2] = *p++;
pp[1] = *p++;
pp[0] = *p++;
return value;
}
int32_t SrsBuffer::read_le4bytes()
{
srs_assert(require(4));
int32_t value;
char* pp = (char*)&value;
pp[0] = *p++;
pp[1] = *p++;
pp[2] = *p++;
pp[3] = *p++;
return value;
}
int64_t SrsBuffer::read_8bytes()
{
srs_assert(require(8));
int64_t value;
char* pp = (char*)&value;
pp[7] = *p++;
pp[6] = *p++;
pp[5] = *p++;
pp[4] = *p++;
pp[3] = *p++;
pp[2] = *p++;
pp[1] = *p++;
pp[0] = *p++;
return value;
}
int64_t SrsBuffer::read_le8bytes()
{
srs_assert(require(8));
int64_t value;
char* pp = (char*)&value;
pp[0] = *p++;
pp[1] = *p++;
pp[2] = *p++;
pp[3] = *p++;
pp[4] = *p++;
pp[5] = *p++;
pp[6] = *p++;
pp[7] = *p++;
return value;
}
string SrsBuffer::read_string(int len)
{
srs_assert(require(len));
std::string value;
value.append(p, len);
p += len;
return value;
}
void SrsBuffer::read_bytes(char* data, int size)
{
srs_assert(require(size));
memcpy(data, p, size);
p += size;
}
void SrsBuffer::write_1bytes(int8_t value)
{
srs_assert(require(1));
*p++ = value;
}
void SrsBuffer::write_2bytes(int16_t value)
{
srs_assert(require(2));
char* pp = (char*)&value;
*p++ = pp[1];
*p++ = pp[0];
}
void SrsBuffer::write_le2bytes(int16_t value)
{
srs_assert(require(2));
char* pp = (char*)&value;
*p++ = pp[0];
*p++ = pp[1];
}
void SrsBuffer::write_4bytes(int32_t value)
{
srs_assert(require(4));
char* pp = (char*)&value;
*p++ = pp[3];
*p++ = pp[2];
*p++ = pp[1];
*p++ = pp[0];
}
void SrsBuffer::write_le4bytes(int32_t value)
{
srs_assert(require(4));
char* pp = (char*)&value;
*p++ = pp[0];
*p++ = pp[1];
*p++ = pp[2];
*p++ = pp[3];
}
void SrsBuffer::write_3bytes(int32_t value)
{
srs_assert(require(3));
char* pp = (char*)&value;
*p++ = pp[2];
*p++ = pp[1];
*p++ = pp[0];
}
void SrsBuffer::write_le3bytes(int32_t value)
{
srs_assert(require(3));
char* pp = (char*)&value;
*p++ = pp[0];
*p++ = pp[1];
*p++ = pp[2];
}
void SrsBuffer::write_8bytes(int64_t value)
{
srs_assert(require(8));
char* pp = (char*)&value;
*p++ = pp[7];
*p++ = pp[6];
*p++ = pp[5];
*p++ = pp[4];
*p++ = pp[3];
*p++ = pp[2];
*p++ = pp[1];
*p++ = pp[0];
}
void SrsBuffer::write_le8bytes(int64_t value)
{
srs_assert(require(8));
char* pp = (char*)&value;
*p++ = pp[0];
*p++ = pp[1];
*p++ = pp[2];
*p++ = pp[3];
*p++ = pp[4];
*p++ = pp[5];
*p++ = pp[6];
*p++ = pp[7];
}
void SrsBuffer::write_string(string value)
{
srs_assert(require((int)value.length()));
memcpy(p, value.data(), value.length());
p += value.length();
}
void SrsBuffer::write_bytes(char* data, int size)
{
srs_assert(require(size));
memcpy(p, data, size);
p += size;
}
SrsBitBuffer::SrsBitBuffer(SrsBuffer* b)
{
cb = 0;
cb_left = 0;
stream = b;
}
SrsBitBuffer::~SrsBitBuffer()
{
}
bool SrsBitBuffer::empty() {
if (cb_left) {
return false;
}
return stream->empty();
}
int8_t SrsBitBuffer::read_bit() {
if (!cb_left) {
srs_assert(!stream->empty());
cb = stream->read_1bytes();
cb_left = 8;
}
int8_t v = (cb >> (cb_left - 1)) & 0x01;
cb_left--;
return v;
}
|
#include <iostream>
#include "sc2api/sc2_api.h"
#include "bot.h"
sc2::PlayerSetup CreateBot(Agent *bot) {
// return( sc2::CreateParticipant(sc2::Race::Zerg, bot) );
return( sc2::CreateParticipant(sc2::Race::Terran, bot) );
}
// code for local testing
#ifndef LADDEREXE
int main(int argc, char* argv[]) {
sc2::Coordinator coordinator;
coordinator.LoadSettings(argc, argv);
std::cout << "main" << std::endl;
Bot bot;
coordinator.SetParticipants({
//CreateBot(&bot),
sc2::CreateParticipant(sc2::Race::Terran, &bot),
CreateComputer(Race::Zerg, sc2::Easy),
});
std::cout << "botted" << std::endl;
coordinator.LaunchStarcraft();
std::cout << "launched" << std::endl;
// this value is VERY SENSITIVE and depends on having the map file in a particular place,
// otherwise when it launches it will just hang with a black screen....
// coordinator.StartGame(sc2::kMapBelShirVestigeLE);
coordinator.StartGame("InterloperLE.SC2Map");
std::cout << "started" << std::endl;
while (coordinator.Update()) {
}
return 0;
}
/////////////////////////////////////////////////////////////
// code for LadderManager
#else
#include <iostream>
#include "sc2lib/sc2_lib.h"
#include "sc2utils/sc2_arg_parser.h"
struct ConnectionOptions
{
int32_t GamePort;
int32_t StartPort;
std::string ServerAddress;
};
void ParseArguments(int argc, char *argv[], ConnectionOptions &connect_options)
{
sc2::ArgParser arg_parser(argv[0]);
arg_parser.AddOptions({
{ "-g", "--GamePort", "Port of client to connect to", false },
{ "-o", "--StartPort", "Starting server port", false },
{ "-l", "--LadderServer", "Ladder server address", false },
});
arg_parser.Parse(argc, argv);
std::string GamePortStr;
if (arg_parser.Get("GamePort", GamePortStr)) {
connect_options.GamePort = atoi(GamePortStr.c_str());
}
std::string StartPortStr;
if (arg_parser.Get("StartPort", StartPortStr)) {
connect_options.StartPort = atoi(StartPortStr.c_str());
}
arg_parser.Get("LadderServer", connect_options.ServerAddress);
}
//*************************************************************************************************
int main(int argc, char* argv[]) {
ConnectionOptions Options;
ParseArguments(argc, argv, Options);
sc2::Coordinator coordinator;
if (!coordinator.LoadSettings(argc, argv)) {
return 1;
}
// Add the custom bot, it will control the players.
Bot bot;
coordinator.SetParticipants({
CreateBot(&bot),
});
// Start the game.
// Step forward the game simulation.
std::cout << "Connecting to port " << Options.GamePort << std::endl;
coordinator.Connect(Options.GamePort);
coordinator.SetupPorts(2, Options.StartPort, false);
// Step forward the game simulation.
coordinator.JoinGame();
coordinator.SetTimeoutMS(10000);
std::cout << " Successfully joined game" << std::endl;
while (coordinator.Update()) {
}
return 0;
}
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/*++
Module Name:
virtual.cpp
Abstract:
Implementation of virtual memory management functions.
--*/
#include "pal/dbgmsg.h"
SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL); // some headers have code with asserts, so do this first
#include "pal/thread.hpp"
#include "pal/cs.hpp"
#include "pal/malloc.hpp"
#include "pal/file.hpp"
#include "pal/seh.hpp"
#include "pal/virtual.h"
#include "pal/map.h"
#include "pal/init.h"
#include "pal/utils.h"
#include "common.h"
#include <sys/types.h>
#include <sys/mman.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <limits.h>
#if HAVE_VM_ALLOCATE
#include <mach/vm_map.h>
#include <mach/mach_init.h>
#endif // HAVE_VM_ALLOCATE
using namespace CorUnix;
CRITICAL_SECTION virtual_critsec;
// The first node in our list of allocated blocks.
static PCMI pVirtualMemory;
static size_t s_virtualPageSize = 0;
/* We need MAP_ANON. However on some platforms like HP-UX, it is defined as MAP_ANONYMOUS */
#if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
#define MAP_ANON MAP_ANONYMOUS
#endif
/*++
Function:
ReserveVirtualMemory()
Helper function that is used by Virtual* APIs and ExecutableMemoryAllocator
to reserve virtual memory from the OS.
--*/
static LPVOID ReserveVirtualMemory(
IN CPalThread *pthrCurrent, /* Currently executing thread */
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize, /* Size of Region */
IN DWORD fAllocationType); /* Allocation Type */
// A memory allocator that allocates memory from a pre-reserved region
// of virtual memory that is located near the CoreCLR library.
static ExecutableMemoryAllocator g_executableMemoryAllocator;
//
//
// Virtual Memory Logging
//
// We maintain a lightweight in-memory circular buffer recording virtual
// memory operations so that we can better diagnose failures and crashes
// caused by one of these operations mishandling memory in some way.
//
//
namespace VirtualMemoryLogging
{
// Specifies the operation being logged
enum class VirtualOperation
{
Allocate = 0x10,
Reserve = 0x20,
Commit = 0x30,
Decommit = 0x40,
Release = 0x50,
Reset = 0x60,
ReserveFromExecutableMemoryAllocatorWithinRange = 0x70
};
// Indicates that the attempted operation has failed
const DWORD FailedOperationMarker = 0x80000000;
// An entry in the in-memory log
struct LogRecord
{
LONG RecordId;
DWORD Operation;
LPVOID CurrentThread;
LPVOID RequestedAddress;
LPVOID ReturnedAddress;
SIZE_T Size;
DWORD AllocationType;
DWORD Protect;
};
// Maximum number of records in the in-memory log
const LONG MaxRecords = 128;
// Buffer used to store the logged data
volatile LogRecord logRecords[MaxRecords];
// Current record number. Use (recordNumber % MaxRecords) to determine
// the current position in the circular buffer.
volatile LONG recordNumber = 0;
// Record an entry in the in-memory log
void LogVaOperation(
IN VirtualOperation operation,
IN LPVOID requestedAddress,
IN SIZE_T size,
IN DWORD flAllocationType,
IN DWORD flProtect,
IN LPVOID returnedAddress,
IN BOOL result)
{
LONG i = InterlockedIncrement(&recordNumber) - 1;
LogRecord* curRec = (LogRecord*)&logRecords[i % MaxRecords];
curRec->RecordId = i;
curRec->CurrentThread = (LPVOID)pthread_self();
curRec->RequestedAddress = requestedAddress;
curRec->ReturnedAddress = returnedAddress;
curRec->Size = size;
curRec->AllocationType = flAllocationType;
curRec->Protect = flProtect;
curRec->Operation = static_cast<DWORD>(operation) | (result ? 0 : FailedOperationMarker);
}
}
/*++
Function:
VIRTUALInitialize()
Initializes this section's critical section.
Return value:
TRUE if initialization succeeded
FALSE otherwise.
--*/
extern "C"
BOOL
VIRTUALInitialize(bool initializeExecutableMemoryAllocator)
{
s_virtualPageSize = getpagesize();
TRACE("Initializing the Virtual Critical Sections. \n");
InternalInitializeCriticalSection(&virtual_critsec);
pVirtualMemory = NULL;
if (initializeExecutableMemoryAllocator)
{
g_executableMemoryAllocator.Initialize();
}
return TRUE;
}
/***
*
* VIRTUALCleanup()
* Deletes this section's critical section.
*
*/
extern "C"
void VIRTUALCleanup()
{
PCMI pEntry;
PCMI pTempEntry;
CPalThread * pthrCurrent = InternalGetCurrentThread();
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
// Clean up the allocated memory.
pEntry = pVirtualMemory;
while ( pEntry )
{
WARN( "The memory at %d was not freed through a call to VirtualFree.\n",
pEntry->startBoundary );
free(pEntry->pAllocState);
free(pEntry->pProtectionState );
pTempEntry = pEntry;
pEntry = pEntry->pNext;
free(pTempEntry );
}
pVirtualMemory = NULL;
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
TRACE( "Deleting the Virtual Critical Sections. \n" );
DeleteCriticalSection( &virtual_critsec );
}
/***
*
* VIRTUALContainsInvalidProtectionFlags()
* Returns TRUE if an invalid flag is specified. FALSE otherwise.
*/
static BOOL VIRTUALContainsInvalidProtectionFlags( IN DWORD flProtect )
{
if ( ( flProtect & ~( PAGE_NOACCESS | PAGE_READONLY |
PAGE_READWRITE | PAGE_EXECUTE | PAGE_EXECUTE_READ |
PAGE_EXECUTE_READWRITE ) ) != 0 )
{
return TRUE;
}
else
{
return FALSE;
}
}
/****
*
* VIRTUALIsPageCommitted
*
* SIZE_T nBitToRetrieve - Which page to check.
*
* Returns TRUE if committed, FALSE otherwise.
*
*/
static BOOL VIRTUALIsPageCommitted( SIZE_T nBitToRetrieve, CONST PCMI pInformation )
{
SIZE_T nByteOffset = 0;
UINT nBitOffset = 0;
UINT byteMask = 0;
if ( !pInformation )
{
ERROR( "pInformation was NULL!\n" );
return FALSE;
}
nByteOffset = nBitToRetrieve / CHAR_BIT;
nBitOffset = nBitToRetrieve % CHAR_BIT;
byteMask = 1 << nBitOffset;
if ( pInformation->pAllocState[ nByteOffset ] & byteMask )
{
return TRUE;
}
else
{
return FALSE;
}
}
/*********
*
* VIRTUALGetAllocationType
*
* IN SIZE_T Index - The page within the range to retrieve
* the state for.
*
* IN pInformation - The virtual memory object.
*
*/
static INT VIRTUALGetAllocationType( SIZE_T Index, CONST PCMI pInformation )
{
if ( VIRTUALIsPageCommitted( Index, pInformation ) )
{
return MEM_COMMIT;
}
else
{
return MEM_RESERVE;
}
}
/****
*
* VIRTUALSetPageBits
*
* IN UINT nStatus - Bit set / reset [0: reset, any other value: set].
* IN SIZE_T nStartingBit - The bit to set.
*
* IN SIZE_T nNumberOfBits - The range of bits to set.
* IN BYTE* pBitArray - A pointer the array to be manipulated.
*
* Returns TRUE on success, FALSE otherwise.
* Turn on/off memory status bits.
*
*/
static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
SIZE_T nNumberOfBits, BYTE * pBitArray )
{
/* byte masks for optimized modification of partial bytes (changing less
than 8 bits in a single byte). note that bits are treated in little
endian order : value 1 is bit 0; value 128 is bit 7. in the binary
representations below, bit 0 is on the right */
/* start masks : for modifying bits >= n while preserving bits < n.
example : if nStartignBit%8 is 3, then bits 0, 1, 2 remain unchanged
while bits 3..7 are changed; startmasks[3] can be used for this. */
static const BYTE startmasks[8] = {
0xff, /* start at 0 : 1111 1111 */
0xfe, /* start at 1 : 1111 1110 */
0xfc, /* start at 2 : 1111 1100 */
0xf8, /* start at 3 : 1111 1000 */
0xf0, /* start at 4 : 1111 0000 */
0xe0, /* start at 5 : 1110 0000 */
0xc0, /* start at 6 : 1100 0000 */
0x80 /* start at 7 : 1000 0000 */
};
/* end masks : for modifying bits <= n while preserving bits > n.
example : if the last bit to change is 5, then bits 6 & 7 stay unchanged
while bits 1..5 are changed; endmasks[5] can be used for this. */
static const BYTE endmasks[8] = {
0x01, /* end at 0 : 0000 0001 */
0x03, /* end at 1 : 0000 0011 */
0x07, /* end at 2 : 0000 0111 */
0x0f, /* end at 3 : 0000 1111 */
0x1f, /* end at 4 : 0001 1111 */
0x3f, /* end at 5 : 0011 1111 */
0x7f, /* end at 6 : 0111 1111 */
0xff /* end at 7 : 1111 1111 */
};
/* last example : if only the middle of a byte must be changed, both start
and end masks can be combined (bitwise AND) to obtain the correct mask.
if we want to change bits 2 to 4 :
startmasks[2] : 0xfc 1111 1100 (change 2,3,4,5,6,7)
endmasks[4]: 0x1f 0001 1111 (change 0,1,2,3,4)
bitwise AND : 0x1c 0001 1100 (change 2,3,4)
*/
BYTE byte_mask;
SIZE_T nLastBit;
SIZE_T nFirstByte;
SIZE_T nLastByte;
SIZE_T nFullBytes;
TRACE( "VIRTUALSetPageBits( nStatus = %d, nStartingBit = %d, "
"nNumberOfBits = %d, pBitArray = 0x%p )\n",
nStatus, nStartingBit, nNumberOfBits, pBitArray );
if ( 0 == nNumberOfBits )
{
ERROR( "nNumberOfBits was 0!\n" );
return FALSE;
}
nLastBit = nStartingBit+nNumberOfBits-1;
nFirstByte = nStartingBit / 8;
nLastByte = nLastBit / 8;
/* handle partial first byte (if any) */
if(0 != (nStartingBit % 8))
{
byte_mask = startmasks[nStartingBit % 8];
/* if 1st byte is the only changing byte, combine endmask to preserve
trailing bits (see 3rd example above) */
if( nLastByte == nFirstByte)
{
byte_mask &= endmasks[nLastBit % 8];
}
/* byte_mask contains 1 for bits to change, 0 for bits to leave alone */
if(0 == nStatus)
{
/* bits to change must be set to 0 : invert byte_mask (giving 0 for
bits to change), use bitwise AND */
pBitArray[nFirstByte] &= ~byte_mask;
}
else
{
/* bits to change must be set to 1 : use bitwise OR */
pBitArray[nFirstByte] |= byte_mask;
}
/* stop right away if only 1 byte is being modified */
if(nLastByte == nFirstByte)
{
return TRUE;
}
/* we're done with the 1st byte; skip over it */
nFirstByte++;
}
/* number of bytes to change, excluding the last byte (handled separately)*/
nFullBytes = nLastByte - nFirstByte;
if(0 != nFullBytes)
{
// Turn off/on dirty bits
memset( &(pBitArray[nFirstByte]), (0 == nStatus) ? 0 : 0xFF, nFullBytes );
}
/* handle last (possibly partial) byte */
byte_mask = endmasks[nLastBit % 8];
/* byte_mask contains 1 for bits to change, 0 for bits to leave alone */
if(0 == nStatus)
{
/* bits to change must be set to 0 : invert byte_mask (giving 0 for
bits to change), use bitwise AND */
pBitArray[nLastByte] &= ~byte_mask;
}
else
{
/* bits to change must be set to 1 : use bitwise OR */
pBitArray[nLastByte] |= byte_mask;
}
return TRUE;
}
/****
*
* VIRTUALSetAllocState
*
* IN UINT nAction - Which action to perform.
* IN SIZE_T nStartingBit - The bit to set.
*
* IN SIZE_T nNumberOfBits - The range of bits to set.
* IN PCMI pStateArray - A pointer the array to be manipulated.
*
* Returns TRUE on success, FALSE otherwise.
* Turn bit on to indicate committed, turn bit off to indicate reserved.
*
*/
static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit,
SIZE_T nNumberOfBits, CONST PCMI pInformation )
{
TRACE( "VIRTUALSetAllocState( nAction = %d, nStartingBit = %d, "
"nNumberOfBits = %d, pStateArray = 0x%p )\n",
nAction, nStartingBit, nNumberOfBits, pInformation );
if ( !pInformation )
{
ERROR( "pInformation was invalid!\n" );
return FALSE;
}
return VIRTUALSetPageBits((MEM_COMMIT == nAction) ? 1 : 0, nStartingBit,
nNumberOfBits, pInformation->pAllocState);
}
/****
*
* VIRTUALFindRegionInformation( )
*
* IN UINT_PTR address - The address to look for.
*
* Returns the PCMI if found, NULL otherwise.
*/
static PCMI VIRTUALFindRegionInformation( IN UINT_PTR address )
{
PCMI pEntry = NULL;
TRACE( "VIRTUALFindRegionInformation( %#x )\n", address );
pEntry = pVirtualMemory;
while( pEntry )
{
if ( pEntry->startBoundary > address )
{
/* Gone past the possible location in the list. */
pEntry = NULL;
break;
}
if ( pEntry->startBoundary + pEntry->memSize > address )
{
break;
}
pEntry = pEntry->pNext;
}
return pEntry;
}
/*++
Function :
VIRTUALReleaseMemory
Removes a PCMI entry from the list.
Returns true on success. FALSE otherwise.
--*/
static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased )
{
BOOL bRetVal = TRUE;
if ( !pMemoryToBeReleased )
{
ASSERT( "Invalid pointer.\n" );
return FALSE;
}
if ( pMemoryToBeReleased == pVirtualMemory )
{
/* This is either the first entry, or the only entry. */
pVirtualMemory = pMemoryToBeReleased->pNext;
if ( pMemoryToBeReleased->pNext )
{
pMemoryToBeReleased->pNext->pPrevious = NULL;
}
}
else /* Could be anywhere in the list. */
{
/* Delete the entry from the linked list. */
if ( pMemoryToBeReleased->pPrevious )
{
pMemoryToBeReleased->pPrevious->pNext = pMemoryToBeReleased->pNext;
}
if ( pMemoryToBeReleased->pNext )
{
pMemoryToBeReleased->pNext->pPrevious = pMemoryToBeReleased->pPrevious;
}
}
free( pMemoryToBeReleased->pAllocState );
pMemoryToBeReleased->pAllocState = NULL;
free( pMemoryToBeReleased->pProtectionState );
pMemoryToBeReleased->pProtectionState = NULL;
free( pMemoryToBeReleased );
pMemoryToBeReleased = NULL;
return bRetVal;
}
/****
* VIRTUALConvertWinFlags() -
* Converts win32 protection flags to
* internal VIRTUAL flags.
*
*/
static BYTE VIRTUALConvertWinFlags( IN DWORD flProtect )
{
BYTE MemAccessControl = 0;
switch ( flProtect & 0xff )
{
case PAGE_NOACCESS :
MemAccessControl = VIRTUAL_NOACCESS;
break;
case PAGE_READONLY :
MemAccessControl = VIRTUAL_READONLY;
break;
case PAGE_READWRITE :
MemAccessControl = VIRTUAL_READWRITE;
break;
case PAGE_EXECUTE :
MemAccessControl = VIRTUAL_EXECUTE;
break;
case PAGE_EXECUTE_READ :
MemAccessControl = VIRTUAL_EXECUTE_READ;
break;
case PAGE_EXECUTE_READWRITE:
MemAccessControl = VIRTUAL_EXECUTE_READWRITE;
break;
default :
MemAccessControl = 0;
ERROR( "Incorrect or no protection flags specified.\n" );
break;
}
return MemAccessControl;
}
/****
* VIRTUALConvertVirtualFlags() -
* Converts internal virtual protection
* flags to their win32 counterparts.
*/
static DWORD VIRTUALConvertVirtualFlags( IN BYTE VirtualProtect )
{
DWORD MemAccessControl = 0;
if ( VirtualProtect == VIRTUAL_READONLY )
{
MemAccessControl = PAGE_READONLY;
}
else if ( VirtualProtect == VIRTUAL_READWRITE )
{
MemAccessControl = PAGE_READWRITE;
}
else if ( VirtualProtect == VIRTUAL_EXECUTE_READWRITE )
{
MemAccessControl = PAGE_EXECUTE_READWRITE;
}
else if ( VirtualProtect == VIRTUAL_EXECUTE_READ )
{
MemAccessControl = PAGE_EXECUTE_READ;
}
else if ( VirtualProtect == VIRTUAL_EXECUTE )
{
MemAccessControl = PAGE_EXECUTE;
}
else if ( VirtualProtect == VIRTUAL_NOACCESS )
{
MemAccessControl = PAGE_NOACCESS;
}
else
{
MemAccessControl = 0;
ERROR( "Incorrect or no protection flags specified.\n" );
}
return MemAccessControl;
}
/***
* Displays the linked list.
*
*/
#if defined _DEBUG
static void VIRTUALDisplayList( void )
{
if (!DBG_ENABLED(DLI_TRACE, defdbgchan))
return;
PCMI p;
SIZE_T count;
SIZE_T index;
CPalThread * pthrCurrent = InternalGetCurrentThread();
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
p = pVirtualMemory;
count = 0;
while ( p ) {
DBGOUT( "Entry %d : \n", count );
DBGOUT( "\t startBoundary %#x \n", p->startBoundary );
DBGOUT( "\t memSize %d \n", p->memSize );
DBGOUT( "\t pAllocState " );
for ( index = 0; index < p->memSize / GetVirtualPageSize(); index++)
{
DBGOUT( "[%d] ", VIRTUALGetAllocationType( index, p ) );
}
DBGOUT( "\t pProtectionState " );
for ( index = 0; index < p->memSize / GetVirtualPageSize(); index++ )
{
DBGOUT( "[%d] ", (UINT)p->pProtectionState[ index ] );
}
DBGOUT( "\n" );
DBGOUT( "\t accessProtection %d \n", p->accessProtection );
DBGOUT( "\t allocationType %d \n", p->allocationType );
DBGOUT( "\t pNext %p \n", p->pNext );
DBGOUT( "\t pLast %p \n", p->pPrevious );
count++;
p = p->pNext;
}
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
}
#endif
#ifdef DEBUG
void VerifyRightEntry(PCMI pEntry)
{
volatile PCMI pRight = pEntry->pNext;
SIZE_T endAddress;
if (pRight != nullptr)
{
endAddress = ((SIZE_T)pEntry->startBoundary) + pEntry->memSize;
_ASSERTE(endAddress <= (SIZE_T)pRight->startBoundary);
}
}
void VerifyLeftEntry(PCMI pEntry)
{
volatile PCMI pLeft = pEntry->pPrevious;
SIZE_T endAddress;
if (pLeft != NULL)
{
endAddress = ((SIZE_T)pLeft->startBoundary) + pLeft->memSize;
_ASSERTE(endAddress <= (SIZE_T)pEntry->startBoundary);
}
}
#endif // DEBUG
/****
* VIRTUALStoreAllocationInfo()
*
* Stores the allocation information in the linked list.
* NOTE: The caller must own the critical section.
*/
static BOOL VIRTUALStoreAllocationInfo(
IN UINT_PTR startBoundary, /* Start of the region. */
IN SIZE_T memSize, /* Size of the region. */
IN DWORD flAllocationType, /* Allocation Types. */
IN DWORD flProtection ) /* Protections flags on the memory. */
{
PCMI pNewEntry = nullptr;
PCMI pMemInfo = nullptr;
SIZE_T nBufferSize = 0;
if (!IS_ALIGNED(memSize, GetVirtualPageSize()))
{
ERROR("The memory size was not a multiple of the page size. \n");
return FALSE;
}
if (!(pNewEntry = (PCMI)InternalMalloc(sizeof(*pNewEntry))))
{
ERROR( "Unable to allocate memory for the structure.\n");
return FALSE;
}
pNewEntry->startBoundary = startBoundary;
pNewEntry->memSize = memSize;
pNewEntry->allocationType = flAllocationType;
pNewEntry->accessProtection = flProtection;
nBufferSize = memSize / GetVirtualPageSize() / CHAR_BIT;
if ((memSize / GetVirtualPageSize()) % CHAR_BIT != 0)
{
nBufferSize++;
}
pNewEntry->pAllocState = (BYTE*)InternalMalloc(nBufferSize);
pNewEntry->pProtectionState = (BYTE*)InternalMalloc((memSize / GetVirtualPageSize()));
if (pNewEntry->pAllocState && pNewEntry->pProtectionState)
{
/* Set the intial allocation state, and initial allocation protection. */
VIRTUALSetAllocState(MEM_RESERVE, 0, nBufferSize * CHAR_BIT, pNewEntry);
memset(pNewEntry->pProtectionState,
VIRTUALConvertWinFlags(flProtection),
memSize / GetVirtualPageSize());
}
else
{
ERROR( "Unable to allocate memory for the structure.\n");
if (pNewEntry->pProtectionState) free(pNewEntry->pProtectionState);
pNewEntry->pProtectionState = nullptr;
if (pNewEntry->pAllocState) free(pNewEntry->pAllocState);
pNewEntry->pAllocState = nullptr;
free(pNewEntry);
pNewEntry = nullptr;
return FALSE;
}
pMemInfo = pVirtualMemory;
if (pMemInfo && pMemInfo->startBoundary < startBoundary)
{
/* Look for the correct insert point */
TRACE("Looking for the correct insert location.\n");
while (pMemInfo->pNext && (pMemInfo->pNext->startBoundary < startBoundary))
{
pMemInfo = pMemInfo->pNext;
}
pNewEntry->pNext = pMemInfo->pNext;
pNewEntry->pPrevious = pMemInfo;
if (pNewEntry->pNext)
{
pNewEntry->pNext->pPrevious = pNewEntry;
}
pMemInfo->pNext = pNewEntry;
}
else
{
/* This is the first entry in the list. */
pNewEntry->pNext = pMemInfo;
pNewEntry->pPrevious = nullptr;
if (pNewEntry->pNext)
{
pNewEntry->pNext->pPrevious = pNewEntry;
}
pVirtualMemory = pNewEntry ;
}
#ifdef DEBUG
VerifyRightEntry(pNewEntry);
VerifyLeftEntry(pNewEntry);
#endif // DEBUG
return TRUE;
}
/******
*
* VIRTUALResetMemory() - Helper function that resets the memory
*
*
*/
static LPVOID VIRTUALResetMemory(
IN CPalThread *pthrCurrent, /* Currently executing thread */
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize) /* Size of Region */
{
LPVOID pRetVal = NULL;
UINT_PTR StartBoundary;
SIZE_T MemSize;
TRACE( "Resetting the memory now..\n");
StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary;
int st;
#if HAVE_MADV_FREE
// Try to use MADV_FREE if supported. It tells the kernel that the application doesn't
// need the pages in the range. Freeing the pages can be delayed until a memory pressure
// occurs.
st = madvise((LPVOID)StartBoundary, MemSize, MADV_FREE);
if (st != 0)
#endif
{
// In case the MADV_FREE is not supported, use MADV_DONTNEED
st = madvise((LPVOID)StartBoundary, MemSize, MADV_DONTNEED);
}
if (st == 0)
{
pRetVal = lpAddress;
}
LogVaOperation(
VirtualMemoryLogging::VirtualOperation::Reset,
lpAddress,
dwSize,
0,
0,
pRetVal,
pRetVal != NULL);
return pRetVal;
}
/******
*
* VIRTUALReserveMemory() - Helper function that actually reserves the memory.
*
* NOTE: I call SetLastError in here, because many different error states
* exists, and that would be very complicated to work around.
*
*/
static LPVOID VIRTUALReserveMemory(
IN CPalThread *pthrCurrent, /* Currently executing thread */
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize, /* Size of Region */
IN DWORD flAllocationType, /* Type of allocation */
IN DWORD flProtect) /* Type of access protection */
{
LPVOID pRetVal = NULL;
UINT_PTR StartBoundary;
SIZE_T MemSize;
TRACE( "Reserving the memory now..\n");
// First, figure out where we're trying to reserve the memory and
// how much we need. On most systems, requests to mmap must be
// page-aligned and at multiples of the page size. Unlike on Windows, on
// Unix, the allocation granularity is the page size, so the memory size to
// reserve is not aligned to 64 KB. Nor should the start boundary need to
// to be aligned down to 64 KB, but it is expected that there are other
// components that rely on this alignment when providing a specific address
// (note that mmap itself does not make any such guarantees).
StartBoundary = (UINT_PTR)ALIGN_DOWN(lpAddress, VIRTUAL_64KB);
MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary;
// If this is a request for special executable (JIT'ed) memory then, first of all,
// try to get memory from the executable memory allocator to satisfy the request.
if (((flAllocationType & MEM_RESERVE_EXECUTABLE) != 0) && (lpAddress == NULL))
{
// Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
// ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
SIZE_T reservationSize = ALIGN_UP(MemSize, VIRTUAL_64KB);
pRetVal = g_executableMemoryAllocator.AllocateMemory(reservationSize);
if (pRetVal != nullptr)
{
MemSize = reservationSize;
}
}
if (pRetVal == NULL)
{
// Try to reserve memory from the OS
pRetVal = ReserveVirtualMemory(pthrCurrent, (LPVOID)StartBoundary, MemSize, flAllocationType);
}
if (pRetVal != NULL)
{
if ( !lpAddress )
{
/* Compute the real values instead of the null values. */
StartBoundary = (UINT_PTR) ALIGN_DOWN(pRetVal, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)pRetVal + dwSize, GetVirtualPageSize()) - StartBoundary;
}
if ( !VIRTUALStoreAllocationInfo( StartBoundary, MemSize,
flAllocationType, flProtect ) )
{
ASSERT( "Unable to store the structure in the list.\n");
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
munmap( pRetVal, MemSize );
pRetVal = NULL;
}
}
LogVaOperation(
VirtualMemoryLogging::VirtualOperation::Reserve,
lpAddress,
dwSize,
flAllocationType,
flProtect,
pRetVal,
pRetVal != NULL);
return pRetVal;
}
/******
*
* ReserveVirtualMemory() - Helper function that is used by Virtual* APIs
* and ExecutableMemoryAllocator to reserve virtual memory from the OS.
*
*/
static LPVOID ReserveVirtualMemory(
IN CPalThread *pthrCurrent, /* Currently executing thread */
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize, /* Size of Region */
IN DWORD fAllocationType) /* Allocation type */
{
UINT_PTR StartBoundary = (UINT_PTR)lpAddress;
SIZE_T MemSize = dwSize;
TRACE( "Reserving the memory now.\n");
// Most platforms will only commit memory if it is dirtied,
// so this should not consume too much swap space.
int mmapFlags = 0;
#if HAVE_VM_ALLOCATE
// Allocate with vm_allocate first, then map at the fixed address.
int result = vm_allocate(mach_task_self(),
&StartBoundary,
MemSize,
((LPVOID) StartBoundary != nullptr) ? FALSE : TRUE);
if (result != KERN_SUCCESS)
{
ERROR("vm_allocate failed to allocated the requested region!\n");
pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS);
return nullptr;
}
mmapFlags |= MAP_FIXED;
#endif // HAVE_VM_ALLOCATE
if ((fAllocationType & MEM_LARGE_PAGES) != 0)
{
#if HAVE_MAP_HUGETLB
mmapFlags |= MAP_HUGETLB;
TRACE("MAP_HUGETLB flag set\n");
#elif HAVE_VM_FLAGS_SUPERPAGE_SIZE_ANY
mmapFlags |= VM_FLAGS_SUPERPAGE_SIZE_ANY;
TRACE("VM_FLAGS_SUPERPAGE_SIZE_ANY flag set\n");
#else
TRACE("Large Pages requested, but not supported in this PAL configuration\n");
#endif
}
mmapFlags |= MAP_ANON | MAP_PRIVATE;
LPVOID pRetVal = mmap((LPVOID) StartBoundary,
MemSize,
PROT_NONE,
mmapFlags,
-1 /* fd */,
0 /* offset */);
if (pRetVal == MAP_FAILED)
{
ERROR( "Failed due to insufficient memory.\n" );
#if HAVE_VM_ALLOCATE
vm_deallocate(mach_task_self(), StartBoundary, MemSize);
#endif // HAVE_VM_ALLOCATE
pthrCurrent->SetLastError(ERROR_NOT_ENOUGH_MEMORY);
return nullptr;
}
/* Check to see if the region is what we asked for. */
if (lpAddress != nullptr && StartBoundary != (UINT_PTR)pRetVal)
{
ERROR("We did not get the region we asked for from mmap!\n");
pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS);
munmap(pRetVal, MemSize);
return nullptr;
}
#if MMAP_ANON_IGNORES_PROTECTION
if (mprotect(pRetVal, MemSize, PROT_NONE) != 0)
{
ERROR("mprotect failed to protect the region!\n");
pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS);
munmap(pRetVal, MemSize);
return nullptr;
}
#endif // MMAP_ANON_IGNORES_PROTECTION
return pRetVal;
}
/******
*
* VIRTUALCommitMemory() - Helper function that actually commits the memory.
*
* NOTE: I call SetLastError in here, because many different error states
* exists, and that would be very complicated to work around.
*
*/
static LPVOID
VIRTUALCommitMemory(
IN CPalThread *pthrCurrent, /* Currently executing thread */
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize, /* Size of Region */
IN DWORD flAllocationType, /* Type of allocation */
IN DWORD flProtect) /* Type of access protection */
{
UINT_PTR StartBoundary = 0;
SIZE_T MemSize = 0;
PCMI pInformation = 0;
LPVOID pRetVal = NULL;
BOOL IsLocallyReserved = FALSE;
SIZE_T totalPages;
INT allocationType, curAllocationType;
INT protectionState, curProtectionState;
SIZE_T initialRunStart;
SIZE_T runStart;
SIZE_T runLength;
SIZE_T index;
INT nProtect;
INT vProtect;
if ( lpAddress )
{
StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary;
}
else
{
MemSize = ALIGN_UP(dwSize, GetVirtualPageSize());
}
/* See if we have already reserved this memory. */
pInformation = VIRTUALFindRegionInformation( StartBoundary );
if ( !pInformation )
{
/* According to the new MSDN docs, if MEM_COMMIT is specified,
and the memory is not reserved, you reserve and then commit.
*/
LPVOID pReservedMemory =
VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize,
flAllocationType, flProtect );
TRACE( "Reserve and commit the memory!\n " );
if ( pReservedMemory )
{
/* Re-align the addresses and try again to find the memory. */
StartBoundary = (UINT_PTR) ALIGN_DOWN(pReservedMemory, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)pReservedMemory + dwSize, GetVirtualPageSize()) - StartBoundary;
pInformation = VIRTUALFindRegionInformation( StartBoundary );
if ( !pInformation )
{
ASSERT( "Unable to locate the region information.\n" );
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
pRetVal = NULL;
goto done;
}
IsLocallyReserved = TRUE;
}
else
{
ERROR( "Unable to reserve the memory.\n" );
/* Don't set last error here, it will already be set. */
pRetVal = NULL;
goto done;
}
}
TRACE( "Committing the memory now..\n");
// Pages that aren't already committed need to be committed. Pages that
// are committed don't need to be committed, but they might need to have
// their permissions changed.
// To get this right, we find runs of pages with similar states and
// permissions. If a run is not committed, we commit it and then set
// its permissions. If a run is committed but has different permissions
// from what we're trying to set, we set its permissions. Finally,
// if a run is already committed and has the right permissions,
// we don't need to do anything to it.
totalPages = MemSize / GetVirtualPageSize();
runStart = (StartBoundary - pInformation->startBoundary) /
GetVirtualPageSize(); // Page index
initialRunStart = runStart;
allocationType = VIRTUALGetAllocationType(runStart, pInformation);
protectionState = pInformation->pProtectionState[runStart];
curAllocationType = allocationType;
curProtectionState = protectionState;
runLength = 1;
nProtect = W32toUnixAccessControl(flProtect);
vProtect = VIRTUALConvertWinFlags(flProtect);
if (totalPages > pInformation->memSize / GetVirtualPageSize() - runStart)
{
ERROR("Trying to commit beyond the end of the region!\n");
goto error;
}
while(runStart < initialRunStart + totalPages)
{
// Find the next run of pages
for(index = runStart + 1; index < initialRunStart + totalPages;
index++)
{
curAllocationType = VIRTUALGetAllocationType(index, pInformation);
curProtectionState = pInformation->pProtectionState[index];
if (curAllocationType != allocationType ||
curProtectionState != protectionState)
{
break;
}
runLength++;
}
StartBoundary = pInformation->startBoundary + runStart * GetVirtualPageSize();
pRetVal = (void *)StartBoundary;
MemSize = runLength * GetVirtualPageSize();
if (allocationType != MEM_COMMIT)
{
// Commit the pages
if (mprotect((void *) StartBoundary, MemSize, PROT_WRITE | PROT_READ) != 0)
{
ERROR("mprotect() failed! Error(%d)=%s\n", errno, strerror(errno));
goto error;
}
VIRTUALSetAllocState(MEM_COMMIT, runStart, runLength, pInformation);
if (nProtect == (PROT_WRITE | PROT_READ))
{
// Handle this case specially so we don't bother
// mprotect'ing the region.
memset(pInformation->pProtectionState + runStart,
vProtect, runLength);
}
protectionState = VIRTUAL_READWRITE;
}
if (protectionState != vProtect)
{
// Change permissions.
if (mprotect((void *) StartBoundary, MemSize, nProtect) != -1)
{
memset(pInformation->pProtectionState + runStart,
vProtect, runLength);
}
else
{
ERROR("mprotect() failed! Error(%d)=%s\n",
errno, strerror(errno));
goto error;
}
}
runStart = index;
runLength = 1;
allocationType = curAllocationType;
protectionState = curProtectionState;
}
pRetVal = (void *) (pInformation->startBoundary + initialRunStart * GetVirtualPageSize());
goto done;
error:
if ( flAllocationType & MEM_RESERVE || IsLocallyReserved )
{
munmap( pRetVal, MemSize );
if ( VIRTUALReleaseMemory( pInformation ) == FALSE )
{
ASSERT( "Unable to remove the PCMI entry from the list.\n" );
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
pRetVal = NULL;
goto done;
}
}
pInformation = NULL;
pRetVal = NULL;
done:
LogVaOperation(
VirtualMemoryLogging::VirtualOperation::Commit,
lpAddress,
dwSize,
flAllocationType,
flProtect,
pRetVal,
pRetVal != NULL);
return pRetVal;
}
/*++
Function:
PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange
This function attempts to allocate the requested amount of memory in the specified address range, from the executable memory
allocator. If unable to do so, the function returns nullptr and does not set the last error.
lpBeginAddress - Inclusive beginning of range
lpEndAddress - Exclusive end of range
dwSize - Number of bytes to allocate
--*/
LPVOID
PALAPI
PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(
IN LPCVOID lpBeginAddress,
IN LPCVOID lpEndAddress,
IN SIZE_T dwSize)
{
#ifdef BIT64
PERF_ENTRY(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
ENTRY(
"PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(lpBeginAddress = %p, lpEndAddress = %p, dwSize = %Iu)\n",
lpBeginAddress,
lpEndAddress,
dwSize);
_ASSERTE(lpBeginAddress <= lpEndAddress);
// Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
// ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
SIZE_T reservationSize = ALIGN_UP(dwSize, VIRTUAL_64KB);
CPalThread *currentThread = InternalGetCurrentThread();
InternalEnterCriticalSection(currentThread, &virtual_critsec);
void *address = g_executableMemoryAllocator.AllocateMemoryWithinRange(lpBeginAddress, lpEndAddress, reservationSize);
if (address != nullptr)
{
_ASSERTE(IS_ALIGNED(address, GetVirtualPageSize()));
if (!VIRTUALStoreAllocationInfo((UINT_PTR)address, reservationSize, MEM_RESERVE | MEM_RESERVE_EXECUTABLE, PAGE_NOACCESS))
{
ASSERT("Unable to store the structure in the list.\n");
munmap(address, reservationSize);
address = nullptr;
}
}
LogVaOperation(
VirtualMemoryLogging::VirtualOperation::ReserveFromExecutableMemoryAllocatorWithinRange,
nullptr,
dwSize,
MEM_RESERVE | MEM_RESERVE_EXECUTABLE,
PAGE_NOACCESS,
address,
TRUE);
InternalLeaveCriticalSection(currentThread, &virtual_critsec);
LOGEXIT("PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange returning %p\n", address);
PERF_EXIT(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
return address;
#else // !BIT64
return nullptr;
#endif // BIT64
}
/*++
Function:
VirtualAlloc
Note:
MEM_TOP_DOWN, MEM_PHYSICAL, MEM_WRITE_WATCH are not supported.
Unsupported flags are ignored.
Page size on i386 is set to 4k.
See MSDN doc.
--*/
LPVOID
PALAPI
VirtualAlloc(
IN LPVOID lpAddress, /* Region to reserve or commit */
IN SIZE_T dwSize, /* Size of Region */
IN DWORD flAllocationType, /* Type of allocation */
IN DWORD flProtect) /* Type of access protection */
{
LPVOID pRetVal = NULL;
CPalThread *pthrCurrent;
PERF_ENTRY(VirtualAlloc);
ENTRY("VirtualAlloc(lpAddress=%p, dwSize=%u, flAllocationType=%#x, \
flProtect=%#x)\n", lpAddress, dwSize, flAllocationType, flProtect);
pthrCurrent = InternalGetCurrentThread();
if ( ( flAllocationType & MEM_WRITE_WATCH ) != 0 )
{
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
goto done;
}
/* Test for un-supported flags. */
if ( ( flAllocationType & ~( MEM_COMMIT | MEM_RESERVE | MEM_RESET | MEM_TOP_DOWN | MEM_RESERVE_EXECUTABLE | MEM_LARGE_PAGES ) ) != 0 )
{
ASSERT( "flAllocationType can be one, or any combination of MEM_COMMIT, \
MEM_RESERVE, MEM_TOP_DOWN, MEM_RESERVE_EXECUTABLE, or MEM_LARGE_PAGES.\n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
goto done;
}
if ( VIRTUALContainsInvalidProtectionFlags( flProtect ) )
{
ASSERT( "flProtect can be one of PAGE_READONLY, PAGE_READWRITE, or \
PAGE_EXECUTE_READWRITE || PAGE_NOACCESS. \n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
goto done;
}
if ( flAllocationType & MEM_TOP_DOWN )
{
WARN( "Ignoring the allocation flag MEM_TOP_DOWN.\n" );
}
LogVaOperation(
VirtualMemoryLogging::VirtualOperation::Allocate,
lpAddress,
dwSize,
flAllocationType,
flProtect,
NULL,
TRUE);
if ( flAllocationType & MEM_RESET )
{
if ( flAllocationType != MEM_RESET )
{
ASSERT( "MEM_RESET cannot be used with any other allocation flags in flAllocationType.\n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
goto done;
}
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
pRetVal = VIRTUALResetMemory( pthrCurrent, lpAddress, dwSize );
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
if ( !pRetVal )
{
/* Error messages are already displayed, just leave. */
goto done;
}
}
if ( flAllocationType & MEM_RESERVE )
{
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
pRetVal = VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize, flAllocationType, flProtect );
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
if ( !pRetVal )
{
/* Error messages are already displayed, just leave. */
goto done;
}
}
if ( flAllocationType & MEM_COMMIT )
{
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
if ( pRetVal != NULL )
{
/* We are reserving and committing. */
pRetVal = VIRTUALCommitMemory( pthrCurrent, pRetVal, dwSize,
flAllocationType, flProtect );
}
else
{
/* Just a commit. */
pRetVal = VIRTUALCommitMemory( pthrCurrent, lpAddress, dwSize,
flAllocationType, flProtect );
}
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
}
done:
#if defined _DEBUG
VIRTUALDisplayList();
#endif
LOGEXIT("VirtualAlloc returning %p\n ", pRetVal );
PERF_EXIT(VirtualAlloc);
return pRetVal;
}
/*++
Function:
VirtualFree
See MSDN doc.
--*/
BOOL
PALAPI
VirtualFree(
IN LPVOID lpAddress, /* Address of region. */
IN SIZE_T dwSize, /* Size of region. */
IN DWORD dwFreeType ) /* Operation type. */
{
BOOL bRetVal = TRUE;
CPalThread *pthrCurrent;
PERF_ENTRY(VirtualFree);
ENTRY("VirtualFree(lpAddress=%p, dwSize=%u, dwFreeType=%#x)\n",
lpAddress, dwSize, dwFreeType);
pthrCurrent = InternalGetCurrentThread();
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
/* Sanity Checks. */
if ( !lpAddress )
{
ERROR( "lpAddress cannot be NULL. You must specify the base address of\
regions to be de-committed. \n" );
pthrCurrent->SetLastError( ERROR_INVALID_ADDRESS );
bRetVal = FALSE;
goto VirtualFreeExit;
}
if ( !( dwFreeType & MEM_RELEASE ) && !(dwFreeType & MEM_DECOMMIT ) )
{
ERROR( "dwFreeType must contain one of the following: \
MEM_RELEASE or MEM_DECOMMIT\n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
bRetVal = FALSE;
goto VirtualFreeExit;
}
/* You cannot release and decommit in one call.*/
if ( dwFreeType & MEM_RELEASE && dwFreeType & MEM_DECOMMIT )
{
ERROR( "MEM_RELEASE cannot be combined with MEM_DECOMMIT.\n" );
bRetVal = FALSE;
goto VirtualFreeExit;
}
if ( dwFreeType & MEM_DECOMMIT )
{
UINT_PTR StartBoundary = 0;
SIZE_T MemSize = 0;
if ( dwSize == 0 )
{
ERROR( "dwSize cannot be 0. \n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
bRetVal = FALSE;
goto VirtualFreeExit;
}
/*
* A two byte range straddling 2 pages caues both pages to be either
* released or decommitted. So round the dwSize up to the next page
* boundary and round the lpAddress down to the next page boundary.
*/
StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary;
PCMI pUnCommittedMem;
pUnCommittedMem = VIRTUALFindRegionInformation( StartBoundary );
if (!pUnCommittedMem)
{
ASSERT( "Unable to locate the region information.\n" );
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
bRetVal = FALSE;
goto VirtualFreeExit;
}
TRACE( "Un-committing the following page(s) %d to %d.\n",
StartBoundary, MemSize );
// Explicitly calling mmap instead of mprotect here makes it
// that much more clear to the operating system that we no
// longer need these pages.
if ( mmap( (LPVOID)StartBoundary, MemSize, PROT_NONE,
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0 ) != MAP_FAILED )
{
#if (MMAP_ANON_IGNORES_PROTECTION)
if (mprotect((LPVOID) StartBoundary, MemSize, PROT_NONE) != 0)
{
ASSERT("mprotect failed to protect the region!\n");
pthrCurrent->SetLastError(ERROR_INTERNAL_ERROR);
munmap((LPVOID) StartBoundary, MemSize);
bRetVal = FALSE;
goto VirtualFreeExit;
}
#endif // MMAP_ANON_IGNORES_PROTECTION
SIZE_T index = 0;
SIZE_T nNumOfPagesToChange = 0;
/* We can now commit this memory by calling VirtualAlloc().*/
index = (StartBoundary - pUnCommittedMem->startBoundary) / GetVirtualPageSize();
nNumOfPagesToChange = MemSize / GetVirtualPageSize();
VIRTUALSetAllocState( MEM_RESERVE, index,
nNumOfPagesToChange, pUnCommittedMem );
goto VirtualFreeExit;
}
else
{
ASSERT( "mmap() returned an abnormal value.\n" );
bRetVal = FALSE;
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
goto VirtualFreeExit;
}
}
if ( dwFreeType & MEM_RELEASE )
{
PCMI pMemoryToBeReleased =
VIRTUALFindRegionInformation( (UINT_PTR)lpAddress );
if ( !pMemoryToBeReleased )
{
ERROR( "lpAddress must be the base address returned by VirtualAlloc.\n" );
pthrCurrent->SetLastError( ERROR_INVALID_ADDRESS );
bRetVal = FALSE;
goto VirtualFreeExit;
}
if ( dwSize != 0 )
{
ERROR( "dwSize must be 0 if you are releasing the memory.\n" );
pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER );
bRetVal = FALSE;
goto VirtualFreeExit;
}
TRACE( "Releasing the following memory %d to %d.\n",
pMemoryToBeReleased->startBoundary, pMemoryToBeReleased->memSize );
if ( munmap( (LPVOID)pMemoryToBeReleased->startBoundary,
pMemoryToBeReleased->memSize ) == 0 )
{
if ( VIRTUALReleaseMemory( pMemoryToBeReleased ) == FALSE )
{
ASSERT( "Unable to remove the PCMI entry from the list.\n" );
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
bRetVal = FALSE;
goto VirtualFreeExit;
}
pMemoryToBeReleased = NULL;
}
else
{
ASSERT( "Unable to unmap the memory, munmap() returned an abnormal value.\n" );
pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR );
bRetVal = FALSE;
goto VirtualFreeExit;
}
}
VirtualFreeExit:
LogVaOperation(
(dwFreeType & MEM_DECOMMIT) ? VirtualMemoryLogging::VirtualOperation::Decommit
: VirtualMemoryLogging::VirtualOperation::Release,
lpAddress,
dwSize,
dwFreeType,
0,
NULL,
bRetVal);
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
LOGEXIT( "VirtualFree returning %s.\n", bRetVal == TRUE ? "TRUE" : "FALSE" );
PERF_EXIT(VirtualFree);
return bRetVal;
}
/*++
Function:
VirtualProtect
See MSDN doc.
--*/
BOOL
PALAPI
VirtualProtect(
IN LPVOID lpAddress,
IN SIZE_T dwSize,
IN DWORD flNewProtect,
OUT PDWORD lpflOldProtect)
{
BOOL bRetVal = FALSE;
PCMI pEntry = NULL;
SIZE_T MemSize = 0;
UINT_PTR StartBoundary = 0;
SIZE_T Index = 0;
SIZE_T NumberOfPagesToChange = 0;
SIZE_T OffSet = 0;
CPalThread * pthrCurrent;
PERF_ENTRY(VirtualProtect);
ENTRY("VirtualProtect(lpAddress=%p, dwSize=%u, flNewProtect=%#x, "
"flOldProtect=%p)\n",
lpAddress, dwSize, flNewProtect, lpflOldProtect);
pthrCurrent = InternalGetCurrentThread();
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize());
MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary;
if ( VIRTUALContainsInvalidProtectionFlags( flNewProtect ) )
{
ASSERT( "flProtect can be one of PAGE_NOACCESS, PAGE_READONLY, "
"PAGE_READWRITE, PAGE_EXECUTE, PAGE_EXECUTE_READ "
", or PAGE_EXECUTE_READWRITE. \n" );
SetLastError( ERROR_INVALID_PARAMETER );
goto ExitVirtualProtect;
}
if ( !lpflOldProtect)
{
ERROR( "lpflOldProtect was invalid.\n" );
SetLastError( ERROR_NOACCESS );
goto ExitVirtualProtect;
}
pEntry = VIRTUALFindRegionInformation( StartBoundary );
if ( NULL != pEntry )
{
/* See if the pages are committed. */
Index = OffSet = StartBoundary - pEntry->startBoundary == 0 ?
0 : ( StartBoundary - pEntry->startBoundary ) / GetVirtualPageSize();
NumberOfPagesToChange = MemSize / GetVirtualPageSize();
TRACE( "Number of pages to check %d, starting page %d \n", NumberOfPagesToChange, Index );
for ( ; Index < NumberOfPagesToChange; Index++ )
{
if ( !VIRTUALIsPageCommitted( Index, pEntry ) )
{
ERROR( "You can only change the protection attributes"
" on committed memory.\n" )
SetLastError( ERROR_INVALID_ADDRESS );
goto ExitVirtualProtect;
}
}
}
if ( 0 == mprotect( (LPVOID)StartBoundary, MemSize,
W32toUnixAccessControl( flNewProtect ) ) )
{
/* Reset the access protection. */
TRACE( "Number of pages to change %d, starting page %d \n",
NumberOfPagesToChange, OffSet );
/*
* Set the old protection flags. We only use the first flag, so
* if there were several regions with each with different flags only the
* first region's protection flag will be returned.
*/
if ( pEntry )
{
*lpflOldProtect =
VIRTUALConvertVirtualFlags( pEntry->pProtectionState[ OffSet ] );
memset( pEntry->pProtectionState + OffSet,
VIRTUALConvertWinFlags( flNewProtect ),
NumberOfPagesToChange );
}
else
{
*lpflOldProtect = PAGE_EXECUTE_READWRITE;
}
bRetVal = TRUE;
}
else
{
ERROR( "%s\n", strerror( errno ) );
if ( errno == EINVAL )
{
SetLastError( ERROR_INVALID_ADDRESS );
}
else if ( errno == EACCES )
{
SetLastError( ERROR_INVALID_ACCESS );
}
}
ExitVirtualProtect:
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
#if defined _DEBUG
VIRTUALDisplayList();
#endif
LOGEXIT( "VirtualProtect returning %s.\n", bRetVal == TRUE ? "TRUE" : "FALSE" );
PERF_EXIT(VirtualProtect);
return bRetVal;
}
#if HAVE_VM_ALLOCATE
//---------------------------------------------------------------------------------------
//
// Convert a vm_prot_t flag on the Mach kernel to the corresponding memory protection on Windows.
//
// Arguments:
// protection - Mach protection to be converted
//
// Return Value:
// Return the corresponding memory protection on Windows (e.g. PAGE_READ_WRITE, etc.)
//
static DWORD VirtualMapMachProtectToWinProtect(vm_prot_t protection)
{
if (protection & VM_PROT_READ)
{
if (protection & VM_PROT_WRITE)
{
if (protection & VM_PROT_EXECUTE)
{
return PAGE_EXECUTE_READWRITE;
}
else
{
return PAGE_READWRITE;
}
}
else
{
if (protection & VM_PROT_EXECUTE)
{
return PAGE_EXECUTE_READ;
}
else
{
return PAGE_READONLY;
}
}
}
else
{
if (protection & VM_PROT_WRITE)
{
if (protection & VM_PROT_EXECUTE)
{
return PAGE_EXECUTE_WRITECOPY;
}
else
{
return PAGE_WRITECOPY;
}
}
else
{
if (protection & VM_PROT_EXECUTE)
{
return PAGE_EXECUTE;
}
else
{
return PAGE_NOACCESS;
}
}
}
}
static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer)
{
kern_return_t MachRet;
vm_address_t vm_address;
vm_size_t vm_size;
vm_region_flavor_t vm_flavor;
mach_msg_type_number_t infoCnt;
mach_port_t object_name;
#ifdef BIT64
vm_region_basic_info_data_64_t info;
infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
vm_flavor = VM_REGION_BASIC_INFO_64;
#else
vm_region_basic_info_data_t info;
infoCnt = VM_REGION_BASIC_INFO_COUNT;
vm_flavor = VM_REGION_BASIC_INFO;
#endif
vm_address = (vm_address_t)lpAddress;
#ifdef BIT64
MachRet = vm_region_64(
#else
MachRet = vm_region(
#endif
mach_task_self(),
&vm_address,
&vm_size,
vm_flavor,
(vm_region_info_t)&info,
&infoCnt,
&object_name);
if (MachRet != KERN_SUCCESS) {
return;
}
if (vm_address > (vm_address_t)lpAddress) {
/* lpAddress was pointing into a free region */
lpBuffer->State = MEM_FREE;
return;
}
lpBuffer->BaseAddress = (PVOID)vm_address;
// We don't actually have any information on the Mach kernel which maps to AllocationProtect.
lpBuffer->AllocationProtect = VM_PROT_NONE;
lpBuffer->RegionSize = (SIZE_T)vm_size;
if (info.reserved)
{
lpBuffer->State = MEM_RESERVE;
}
else
{
lpBuffer->State = MEM_COMMIT;
}
lpBuffer->Protect = VirtualMapMachProtectToWinProtect(info.protection);
/* Note that if a mapped region and a private region are adjacent, this
will return MEM_PRIVATE but the region size will span
both the mapped and private regions. */
if (!info.shared)
{
lpBuffer->Type = MEM_PRIVATE;
}
else
{
// What should this be? It's either MEM_MAPPED or MEM_IMAGE, but without an image list,
// we can't determine which one it is.
lpBuffer->Type = MEM_MAPPED;
}
}
#endif // HAVE_VM_ALLOCATE
/*++
Function:
VirtualQuery
See MSDN doc.
--*/
SIZE_T
PALAPI
VirtualQuery(
IN LPCVOID lpAddress,
OUT PMEMORY_BASIC_INFORMATION lpBuffer,
IN SIZE_T dwLength)
{
PCMI pEntry = NULL;
UINT_PTR StartBoundary = 0;
CPalThread * pthrCurrent;
PERF_ENTRY(VirtualQuery);
ENTRY("VirtualQuery(lpAddress=%p, lpBuffer=%p, dwLength=%u)\n",
lpAddress, lpBuffer, dwLength);
pthrCurrent = InternalGetCurrentThread();
InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
if ( !lpBuffer)
{
ERROR( "lpBuffer has to be a valid pointer.\n" );
pthrCurrent->SetLastError( ERROR_NOACCESS );
goto ExitVirtualQuery;
}
if ( dwLength < sizeof( *lpBuffer ) )
{
ERROR( "dwLength cannot be smaller then the size of *lpBuffer.\n" );
pthrCurrent->SetLastError( ERROR_BAD_LENGTH );
goto ExitVirtualQuery;
}
StartBoundary = ALIGN_DOWN((SIZE_T)lpAddress, GetVirtualPageSize());
#if MMAP_IGNORES_HINT
// Make sure we have memory to map before we try to query it.
VIRTUALGetBackingFile(pthrCurrent);
// If we're suballocating, claim that any memory that isn't in our
// suballocated block is already allocated. This keeps callers from
// using these results to try to allocate those blocks and failing.
if (StartBoundary < (UINT_PTR) gBackingBaseAddress ||
StartBoundary >= (UINT_PTR) gBackingBaseAddress + BACKING_FILE_SIZE)
{
if (StartBoundary < (UINT_PTR) gBackingBaseAddress)
{
lpBuffer->RegionSize = (UINT_PTR) gBackingBaseAddress - StartBoundary;
}
else
{
lpBuffer->RegionSize = -StartBoundary;
}
lpBuffer->BaseAddress = (void *) StartBoundary;
lpBuffer->State = MEM_COMMIT;
lpBuffer->Type = MEM_MAPPED;
lpBuffer->AllocationProtect = 0;
lpBuffer->Protect = 0;
goto ExitVirtualQuery;
}
#endif // MMAP_IGNORES_HINT
/* Find the entry. */
pEntry = VIRTUALFindRegionInformation( StartBoundary );
if ( !pEntry )
{
/* Can't find a match, or no list present. */
/* Next, looking for this region in file maps */
if (!MAPGetRegionInfo((LPVOID)StartBoundary, lpBuffer))
{
// When all else fails, call vm_region() if it's available.
// Initialize the State to be MEM_FREE, in which case AllocationBase, AllocationProtect,
// Protect, and Type are all undefined.
lpBuffer->BaseAddress = (LPVOID)StartBoundary;
lpBuffer->RegionSize = 0;
lpBuffer->State = MEM_FREE;
#if HAVE_VM_ALLOCATE
VM_ALLOCATE_VirtualQuery(lpAddress, lpBuffer);
#endif
}
}
else
{
/* Starting page. */
SIZE_T Index = ( StartBoundary - pEntry->startBoundary ) / GetVirtualPageSize();
/* Attributes to check for. */
BYTE AccessProtection = pEntry->pProtectionState[ Index ];
INT AllocationType = VIRTUALGetAllocationType( Index, pEntry );
SIZE_T RegionSize = 0;
TRACE( "Index = %d, Number of Pages = %d. \n",
Index, pEntry->memSize / GetVirtualPageSize() );
while ( Index < pEntry->memSize / GetVirtualPageSize() &&
VIRTUALGetAllocationType( Index, pEntry ) == AllocationType &&
pEntry->pProtectionState[ Index ] == AccessProtection )
{
RegionSize += GetVirtualPageSize();
Index++;
}
TRACE( "RegionSize = %d.\n", RegionSize );
/* Fill the structure.*/
lpBuffer->AllocationProtect = pEntry->accessProtection;
lpBuffer->BaseAddress = (LPVOID)StartBoundary;
lpBuffer->Protect = AllocationType == MEM_COMMIT ?
VIRTUALConvertVirtualFlags( AccessProtection ) : 0;
lpBuffer->RegionSize = RegionSize;
lpBuffer->State =
( AllocationType == MEM_COMMIT ? MEM_COMMIT : MEM_RESERVE );
WARN( "Ignoring lpBuffer->Type. \n" );
}
ExitVirtualQuery:
InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
LOGEXIT( "VirtualQuery returning %d.\n", sizeof( *lpBuffer ) );
PERF_EXIT(VirtualQuery);
return sizeof( *lpBuffer );
}
size_t GetVirtualPageSize()
{
_ASSERTE(s_virtualPageSize);
return s_virtualPageSize;
}
/*++
Function:
GetWriteWatch
See MSDN doc.
--*/
UINT
PALAPI
GetWriteWatch(
IN DWORD dwFlags,
IN PVOID lpBaseAddress,
IN SIZE_T dwRegionSize,
OUT PVOID *lpAddresses,
IN OUT PULONG_PTR lpdwCount,
OUT PULONG lpdwGranularity
)
{
// TODO: implement this method
*lpAddresses = NULL;
*lpdwCount = 0;
// Until it is implemented, return non-zero value as an indicator of failure
return 1;
}
/*++
Function:
ResetWriteWatch
See MSDN doc.
--*/
UINT
PALAPI
ResetWriteWatch(
IN LPVOID lpBaseAddress,
IN SIZE_T dwRegionSize
)
{
// TODO: implement this method
// Until it is implemented, return non-zero value as an indicator of failure
return 1;
}
/*++
Function :
ReserveMemoryFromExecutableAllocator
This function is used to reserve a region of virual memory (not commited)
that is located close to the coreclr library. The memory comes from the virtual
address range that is managed by ExecutableMemoryAllocator.
--*/
void* ReserveMemoryFromExecutableAllocator(CPalThread* pThread, SIZE_T allocationSize)
{
#ifdef BIT64
InternalEnterCriticalSection(pThread, &virtual_critsec);
void* mem = g_executableMemoryAllocator.AllocateMemory(allocationSize);
InternalLeaveCriticalSection(pThread, &virtual_critsec);
return mem;
#else // !BIT64
return nullptr;
#endif // BIT64
}
/*++
Function:
ExecutableMemoryAllocator::Initialize()
This function initializes the allocator. It should be called early during process startup
(when process address space is pretty much empty) in order to have a chance to reserve
sufficient amount of memory that is close to the coreclr library.
--*/
void ExecutableMemoryAllocator::Initialize()
{
m_startAddress = NULL;
m_nextFreeAddress = NULL;
m_totalSizeOfReservedMemory = 0;
m_remainingReservedMemory = 0;
// Enable the executable memory allocator on 64-bit platforms only
// because 32-bit platforms have limited amount of virtual address space.
#ifdef BIT64
TryReserveInitialMemory();
#endif // BIT64
}
/*++
Function:
ExecutableMemoryAllocator::TryReserveInitialMemory()
This function is called during PAL initialization. It opportunistically tries to reserve
a large chunk of virtual memory that can be later used to store JIT'ed code.\
--*/
void ExecutableMemoryAllocator::TryReserveInitialMemory()
{
CPalThread* pthrCurrent = InternalGetCurrentThread();
int32_t sizeOfAllocation = MaxExecutableMemorySizeNearCoreClr;
int32_t preferredStartAddressIncrement;
UINT_PTR preferredStartAddress;
UINT_PTR coreclrLoadAddress;
const int32_t MemoryProbingIncrement = 128 * 1024 * 1024;
// Try to find and reserve an available region of virtual memory that is located
// within 2GB range (defined by the MaxExecutableMemorySizeNearCoreClr constant) from the
// location of the coreclr library.
// Potentially, as a possible future improvement, we can get precise information
// about available memory ranges by parsing data from '/proc/self/maps'.
// But since this code is called early during process startup, the user address space
// is pretty much empty so the simple algorithm that is implemented below is sufficient
// for this purpose.
// First of all, we need to determine the current address of libcoreclr. Please note that depending on
// the OS implementation, the library is usually loaded either at the end or at the start of the user
// address space. If the library is loaded at low addresses then try to reserve memory above libcoreclr
// (thus avoiding reserving memory below 4GB; besides some operating systems do not allow that).
// If libcoreclr is loaded at high addresses then try to reserve memory below its location.
coreclrLoadAddress = (UINT_PTR)PAL_GetSymbolModuleBase((void*)VirtualAlloc);
if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr) < 0xFFFFFFFF))
{
// Try to allocate above the location of libcoreclr
preferredStartAddress = coreclrLoadAddress + CoreClrLibrarySize;
preferredStartAddressIncrement = MemoryProbingIncrement;
}
else
{
// Try to allocate below the location of libcoreclr
preferredStartAddress = coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr;
preferredStartAddressIncrement = 0;
}
// Do actual memory reservation.
do
{
m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation, 0 /* fAllocationType */);
if (m_startAddress != nullptr)
{
break;
}
// Try to allocate a smaller region
sizeOfAllocation -= MemoryProbingIncrement;
preferredStartAddress += preferredStartAddressIncrement;
} while (sizeOfAllocation >= MemoryProbingIncrement);
if (m_startAddress == nullptr)
{
// We were not able to reserve any memory near libcoreclr. Try to reserve approximately 2 GB of address space somewhere
// anyway:
// - This sets aside address space that can be used for executable code, such that jumps/calls between such code may
// continue to use short relative addresses instead of long absolute addresses that would currently require jump
// stubs.
// - The inability to allocate memory in a specific range for jump stubs is an unrecoverable problem. This reservation
// would mitigate such issues that can become prevalent depending on which security features are enabled and to what
// extent, such as in particular, PaX's RANDMMAP:
// - https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options
// - Jump stubs for executable code residing in this region can request memory from this allocator
// - Native images can be loaded into this address space, including any jump stubs that are required for its helper
// table. This satisfies the vast majority of practical cases where the total amount of loaded native image memory
// does not exceed approximately 2 GB.
// - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use
// the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs.
sizeOfAllocation = MaxExecutableMemorySize;
m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation, 0 /* fAllocationType */);
if (m_startAddress == nullptr)
{
return;
}
}
// Memory has been successfully reserved.
m_totalSizeOfReservedMemory = sizeOfAllocation;
// Randomize the location at which we start allocating from the reserved memory range. Alignment to a 64 KB granularity
// should not be necessary, but see AllocateMemory() for the reason why it is done.
int32_t randomOffset = GenerateRandomStartOffset();
m_nextFreeAddress = ALIGN_UP((void*)(((UINT_PTR)m_startAddress) + randomOffset), VIRTUAL_64KB);
_ASSERTE(sizeOfAllocation >= (int32_t)((UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress));
m_remainingReservedMemory =
ALIGN_DOWN(sizeOfAllocation - ((UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress), VIRTUAL_64KB);
}
/*++
Function:
ExecutableMemoryAllocator::AllocateMemory
This function attempts to allocate the requested amount of memory from its reserved virtual
address space. The function will return null if the allocation request cannot
be satisfied by the memory that is currently available in the allocator.
Note: This function MUST be called with the virtual_critsec lock held.
--*/
void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize)
{
#ifdef BIT64
void* allocatedMemory = nullptr;
// Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but
// VIRTUALReserveMemory() aligns down the specified address to a 64 KB granularity, and as long as that is necessary, the
// reservation size here must be aligned to a 64 KB granularity to guarantee that all returned addresses are also aligned to
// a 64 KB granularity. Otherwise, attempting to reserve memory starting from an unaligned address returned by this function
// would fail in VIRTUALReserveMemory.
_ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
// The code below assumes that the caller owns the virtual_critsec lock.
// So the calculations are not done in thread-safe manner.
if ((allocationSize > 0) && (allocationSize <= (SIZE_T)m_remainingReservedMemory))
{
allocatedMemory = m_nextFreeAddress;
m_nextFreeAddress = (void*)(((UINT_PTR)m_nextFreeAddress) + allocationSize);
m_remainingReservedMemory -= allocationSize;
}
return allocatedMemory;
#else // !BIT64
return nullptr;
#endif // BIT64
}
/*++
Function:
AllocateMemory
This function attempts to allocate the requested amount of memory from its reserved virtual
address space, if memory is available within the specified range. The function will return
null if the allocation request cannot satisfied by the memory that is currently available in
the allocator.
Note: This function MUST be called with the virtual_critsec lock held.
--*/
void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize)
{
#ifdef BIT64
_ASSERTE(beginAddress <= endAddress);
// Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
// AllocateMemory() for the reason why it is necessary
_ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
// The code below assumes that the caller owns the virtual_critsec lock.
// So the calculations are not done in thread-safe manner.
if (allocationSize == 0 || allocationSize > (SIZE_T)m_remainingReservedMemory)
{
return nullptr;
}
void *address = m_nextFreeAddress;
if (address < beginAddress)
{
return nullptr;
}
void *nextFreeAddress = (void *)((UINT_PTR)address + allocationSize);
if (nextFreeAddress > endAddress)
{
return nullptr;
}
m_nextFreeAddress = nextFreeAddress;
m_remainingReservedMemory -= allocationSize;
return address;
#else // !BIT64
return nullptr;
#endif // BIT64
}
/*++
Function:
ExecutableMemoryAllocator::GenerateRandomStartOffset()
This function returns a random offset (in multiples of the virtual page size)
at which the allocator should start allocating memory from its reserved memory range.
--*/
int32_t ExecutableMemoryAllocator::GenerateRandomStartOffset()
{
int32_t pageCount;
const int32_t MaxStartPageOffset = 64;
// This code is similar to what coreclr runtime does on Windows.
// It generates a random number of pages to skip between 0...MaxStartPageOffset.
srandom(time(NULL));
pageCount = (int32_t)(MaxStartPageOffset * (int64_t)random() / RAND_MAX);
return pageCount * GetVirtualPageSize();
}
|
/** Model stored as HDF5 file.
*/
#if !defined(geomodelgrids_serial_modelinfo_hh)
#define geomodelgrids_serial_modelinfo_hh
// Include directives -----------------------------------------------------------------------------
#include "serialfwd.hh" // forward declarations
#include <vector> // HASA std::std::vector
#include <string> // HASA std::string
class geomodelgrids::serial::ModelInfo {
friend class TestModelInfo; // Unit testing
// PUBLIC METHODS -----------------------------------------------------------------------------
public:
/// Default constructor.
ModelInfo(void);
/// Destructor
~ModelInfo(void);
/** Get title.
*
* @returns Title of model.
*/
const std::string& getTitle(void) const;
/** Get identifier.
*
* @returns Model identifier.
*/
const std::string& getId(void) const;
/** Get description.
*
* @returns Model description.
*/
const std::string& getDescription(void) const;
/** Get keywords describing model.
*
* @returns Array of keywords.
*/
const std::vector<std::string>& getKeywords(void) const;
/** Get history of model.
*
* @returns Model history.
*/
const std::string& getHistory(void) const;
/** Get comment of model.
*
* @returns Comment.
*/
const std::string& getComment(void) const;
/** Get name of creator.
*
* @returns Name of creator.
*/
const std::string& getCreatorName(void) const;
/** Get institution of creator.
*
* @returns Institution of creator.
*/
const std::string& getCreatorInstitution(void) const;
/** Get email of creator.
*
* @returns Email of creator.
*/
const std::string& getCreatorEmail(void) const;
/** Get acknowledgment.
*
* @returns Acknowledgment for model.
*/
const std::string& getAcknowledgement(void) const;
/** Get authors of model.
*
* @returns Array of author names.
*/
const std::vector<std::string>& getAuthors(void) const;
/** Get references associaed with model.
*
* @returns Array of references.
*/
const std::vector<std::string>& getReferences(void) const;
/** Get name of repository holding model.
*
* @returns Repository name.
*/
const std::string& getRepositoryName(void) const;
/** Get URL of repository holding model.
*
* @returns URL of repository.
*/
const std::string& getRepositoryURL(void) const;
/** Get DOI for model.
*
* @returns Digital Object Identifier.
*/
const std::string& getRepositoryDOI(void) const;
/** Get model version.
*
* @returns ModelInfo version.
*/
const std::string& getVersion(void) const;
/** Get license for model.
*
* @returns License for model.
*/
const std::string& getLicense(void) const;
/** Get auxiliary information.
*
* @returns Json as string.
*/
const std::string& getAuxiliary(void) const;
/** Load metadata.
*/
void load(geomodelgrids::serial::HDF5* const h5);
// PRIVATE MEMBERS ----------------------------------------------------------------------------
private:
std::string _title; ///< Title of model.
std::string _id; ///< ModelInfo identifier.
std::string _description; ///< ModelInfo description.
std::vector<std::string> _keywords; ///< Keywords describing model.
std::string _history; ///< History of model.
std::string _comment; ///< General comment on model.
std::string _creatorName; ///< Name of person creating model.
std::string _creatorInstitution; ///< Institution of creator.
std::string _creatorEmail; ///< Email of creator.
std::string _acknowledgement; ///< Acknowledgments for model.
std::vector<std::string> _authors; ///< Name of authors.
std::vector<std::string> _references; ///< References for model.
std::string _repositoryName; ///< Name of repository containing model.
std::string _repositoryURL; ///< URL of repository containing model.
std::string _repositoryDOI; ///< Digital Object Identifier for model.
std::string _version; ///< ModelInfo version.
std::string _license; ///< License for model.
std::string _auxiliary; ///< Auxiliary information (optional).
// NOT IMPLEMENTED ----------------------------------------------------------------------------
private:
ModelInfo(const ModelInfo&); ///< Not implemented
const ModelInfo& operator=(const ModelInfo&); ///< Not implemented
}; // ModelInfo
#endif // geomodelgrids_serial_modelinfo_hh
// End of file
|
// Copyright (c) 2018-present The Alive2 Authors.
// Distributed under the MIT license that can be found in the LICENSE file.
#include "llvm_util/utils.h"
#include "ir/constant.h"
#include "ir/function.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/raw_ostream.h"
#include <unordered_map>
#include <utility>
#include <vector>
using namespace IR;
using namespace std;
using llvm::cast, llvm::dyn_cast, llvm::isa;
namespace {
// cache Value*'s names
unordered_map<const llvm::Value*, string> value_names;
unsigned value_id_counter; // for %0, %1, etc..
vector<unique_ptr<IntType>> int_types;
vector<unique_ptr<PtrType>> ptr_types;
FloatType half_type("half", FloatType::Half);
FloatType float_type("float", FloatType::Float);
FloatType double_type("double", FloatType::Double);
FloatType quad_type("fp128", FloatType::Quad);
FloatType bfloat_type("bfloat", FloatType::BFloat);
// cache complex types
unordered_map<const llvm::Type*, unique_ptr<Type>> type_cache;
unsigned type_id_counter; // for unamed types
Function *current_fn;
unordered_map<const llvm::Value*, Value*> value_cache;
ostream *out;
const llvm::DataLayout *DL;
bool hasOpaqueType(llvm::Type *ty) {
if (auto aty = llvm::dyn_cast<llvm::StructType>(ty)) {
if (aty->isOpaque())
return true;
for (auto elemty : aty->elements())
if (hasOpaqueType(elemty))
return true;
} else if (auto aty = llvm::dyn_cast<llvm::ArrayType>(ty))
return hasOpaqueType(aty->getElementType());
else if (auto vty = llvm::dyn_cast<llvm::VectorType>(ty))
return hasOpaqueType(vty->getElementType());
return false;
}
}
namespace llvm_util {
FastMathFlags parse_fmath(llvm::Instruction &i) {
FastMathFlags fmath;
if (auto op = dyn_cast<llvm::FPMathOperator>(&i)) {
if (op->hasNoNaNs())
fmath.flags |= FastMathFlags::NNaN;
if (op->hasNoInfs())
fmath.flags |= FastMathFlags::NInf;
if (op->hasNoSignedZeros())
fmath.flags |= FastMathFlags::NSZ;
if (op->hasAllowReciprocal())
fmath.flags |= FastMathFlags::ARCP;
if (op->hasAllowContract())
fmath.flags |= FastMathFlags::Contract;
if (op->hasAllowReassoc())
fmath.flags |= FastMathFlags::Reassoc;
if (op->hasApproxFunc())
fmath.flags |= FastMathFlags::AFN;
}
return fmath;
}
BasicBlock& getBB(const llvm::BasicBlock *bb) {
return current_fn->getBB(value_name(*bb));
}
string value_name(const llvm::Value &v) {
auto &name = value_names[&v];
if (!name.empty())
return name;
if (!v.getName().empty())
return name = '%' + v.getName().str();
return name = v.getType()->isVoidTy() ? "<void>"
: '%' + to_string(value_id_counter++);
}
void remove_value_name(const llvm::Value &v) {
value_names.erase(&v);
}
Type& get_int_type(unsigned bits) {
if (bits >= int_types.size())
int_types.resize(bits + 1);
if (!int_types[bits])
int_types[bits] = make_unique<IntType>("i" + to_string(bits), bits);
return *int_types[bits].get();
}
Type* llvm_type2alive(const llvm::Type *ty) {
switch (ty->getTypeID()) {
case llvm::Type::VoidTyID:
return &Type::voidTy;
case llvm::Type::IntegerTyID:
return &get_int_type(cast<llvm::IntegerType>(ty)->getBitWidth());
case llvm::Type::HalfTyID:
return &half_type;
case llvm::Type::FloatTyID:
return &float_type;
case llvm::Type::DoubleTyID:
return &double_type;
case llvm::Type::FP128TyID:
return &quad_type;
case llvm::Type::BFloatTyID:
return &bfloat_type;
case llvm::Type::PointerTyID: {
// TODO: support for non-64 bits pointers
unsigned as = cast<llvm::PointerType>(ty)->getAddressSpace();
// TODO: add support for non-0 AS
if (as != 0)
return nullptr;
if (as >= ptr_types.size())
ptr_types.resize(as + 1);
if (!ptr_types[as])
ptr_types[as] = make_unique<PtrType>(as);
return ptr_types[as].get();
}
case llvm::Type::StructTyID: {
auto &cache = type_cache[ty];
if (!cache) {
vector<Type*> elems;
vector<bool> is_padding;
auto strty = cast<llvm::StructType>(ty);
auto layout = DL->getStructLayout(const_cast<llvm::StructType *>(strty));
for (unsigned i = 0; i < strty->getNumElements(); ++i) {
auto e = strty->getElementType(i);
unsigned ofs = layout->getElementOffset(i);
unsigned sz = DL->getTypeStoreSize(e);
if (auto ty = llvm_type2alive(e)) {
elems.push_back(ty);
is_padding.push_back(false);
} else
return nullptr;
unsigned ofs_next = i + 1 == strty->getNumElements() ?
DL->getTypeAllocSize(const_cast<llvm::StructType *>(strty)) :
layout->getElementOffset(i + 1);
assert(ofs + sz <= ofs_next);
if (ofs_next != ofs + sz) {
unsigned padsz = 8 * (ofs_next - ofs - sz);
auto padding_ty = llvm::IntegerType::get(strty->getContext(), padsz);
if (auto ty = llvm_type2alive(padding_ty)) {
elems.push_back(ty);
is_padding.push_back(true);
} else
return nullptr;
}
}
cache = make_unique<StructType>("ty_" + to_string(type_id_counter++),
move(elems), move(is_padding));
}
return cache.get();
}
// TODO: non-fixed sized vectors
case llvm::Type::FixedVectorTyID: {
auto &cache = type_cache[ty];
if (!cache) {
auto vty = cast<llvm::VectorType>(ty);
auto elems = vty->getElementCount().getKnownMinValue();
auto ety = llvm_type2alive(vty->getElementType());
if (!ety || elems > 1024)
return nullptr;
cache = make_unique<VectorType>("ty_" + to_string(type_id_counter++),
elems, *ety);
}
return cache.get();
}
case llvm::Type::ArrayTyID: {
auto &cache = type_cache[ty];
if (!cache) {
auto aty = cast<llvm::ArrayType>(ty);
auto elemty = aty->getElementType();
auto elems = aty->getNumElements();
auto ety = llvm_type2alive(elemty);
if (!ety || elems > 100 * 1024)
return nullptr;
auto sz_with_padding = DL->getTypeAllocSize(elemty);
auto sz = DL->getTypeStoreSize(elemty);
assert(DL->getTypeAllocSize(const_cast<llvm::ArrayType *>(aty)) ==
elems * sz_with_padding);
Type *paddingTy = sz == sz_with_padding ? 0 :
llvm_type2alive(llvm::IntegerType::get(aty->getContext(),
8 * (sz_with_padding - sz)));
cache = make_unique<ArrayType>("ty_" + to_string(type_id_counter++),
elems, *ety, paddingTy);
}
return cache.get();
}
default:
*out << "ERROR: Unsupported type: " << *ty << '\n';
return nullptr;
}
}
Value* make_intconst(uint64_t val, int bits) {
auto c = make_unique<IntConst>(get_int_type(bits), val);
auto ret = c.get();
current_fn->addConstant(move(c));
return ret;
}
#define RETURN_CACHE(val) \
do { \
auto val_cpy = val; \
ENSURE(value_cache.emplace(v, val_cpy).second); \
return val_cpy; \
} while (0)
Value* get_operand(llvm::Value *v,
function<Value*(llvm::ConstantExpr*)> constexpr_conv,
function<Value*(AggregateValue*)> copy_inserter) {
if (auto I = value_cache.find(v);
I != value_cache.end())
return I->second;
auto ty = llvm_type2alive(v->getType());
if (!ty)
return nullptr;
if (auto cnst = dyn_cast<llvm::ConstantInt>(v)) {
unique_ptr<IntConst> c;
if (cnst->getBitWidth() <= 64)
c = make_unique<IntConst>(*ty, cnst->getZExtValue());
else
c = make_unique<IntConst>(*ty, toString(cnst->getValue(), 10, false));
auto ret = c.get();
current_fn->addConstant(move(c));
RETURN_CACHE(ret);
}
if (auto cnst = dyn_cast<llvm::ConstantFP>(v)) {
auto &apfloat = cnst->getValueAPF();
unique_ptr<FloatConst> c;
switch (ty->getAsFloatType()->getFpType()) {
case FloatType::Float:
case FloatType::BFloat:
c = make_unique<FloatConst>(*ty, apfloat.convertToFloat());
break;
case FloatType::Double:
c = make_unique<FloatConst>(*ty, apfloat.convertToDouble());
break;
case FloatType::Half:
case FloatType::Quad:
c = make_unique<FloatConst>(*ty,
toString(apfloat.bitcastToAPInt(), 10, false),
true);
break;
case FloatType::Unknown:
UNREACHABLE();
}
auto ret = c.get();
current_fn->addConstant(move(c));
RETURN_CACHE(ret);
}
if (isa<llvm::PoisonValue>(v)) {
auto val = make_unique<PoisonValue>(*ty);
auto ret = val.get();
current_fn->addConstant(move(val));
RETURN_CACHE(ret);
}
if (isa<llvm::UndefValue>(v)) {
auto val = make_unique<UndefValue>(*ty);
auto ret = val.get();
current_fn->addUndef(move(val));
RETURN_CACHE(ret);
}
if (isa<llvm::ConstantPointerNull>(v)) {
auto val = make_unique<NullPointerValue>(*ty);
auto ret = val.get();
current_fn->addConstant(move(val));
RETURN_CACHE(ret);
}
if (auto gv = dyn_cast<llvm::GlobalVariable>(v)) {
if (hasOpaqueType(gv->getValueType()))
// TODO: Global variable of opaque type is not supported.
return nullptr;
unsigned size = DL->getTypeAllocSize(gv->getValueType());
unsigned align = gv->getPointerAlignment(*DL).value();
string name;
if (!gv->hasName()) {
unsigned id = 0;
auto M = gv->getParent();
auto i = M->global_begin(), e = M->global_end();
for (; i != e; ++i) {
if (i->hasName())
continue;
if (&(*i) == gv)
break;
++id;
}
assert(i != e);
name = '@' + to_string(id);
} else {
name = '@' + gv->getName().str();
}
auto val = make_unique<GlobalVariable>(*ty, move(name), size, align,
gv->isConstant());
auto gvar = val.get();
current_fn->addConstant(move(val));
RETURN_CACHE(gvar);
}
auto fillAggregateValues = [&](AggregateType *aty,
function<llvm::Value *(unsigned)> get_elem, vector<Value*> &vals) -> bool
{
unsigned opi = 0;
for (unsigned i = 0; i < aty->numElementsConst(); ++i) {
if (!aty->isPadding(i)) {
if (auto op = get_operand(get_elem(opi), constexpr_conv, copy_inserter))
vals.emplace_back(op);
else
return false;
++opi;
}
}
return true;
};
if (auto cnst = dyn_cast<llvm::ConstantAggregate>(v)) {
vector<Value*> vals;
if (!fillAggregateValues(dynamic_cast<AggregateType *>(ty),
[&cnst](auto i) { return cnst->getOperand(i); }, vals))
return nullptr;
auto val = make_unique<AggregateValue>(*ty, move(vals));
auto ret = val.get();
if (all_of(cnst->op_begin(), cnst->op_end(), [](auto &V) -> bool
{ return isa<llvm::ConstantData>(V); })) {
current_fn->addConstant(move(val));
RETURN_CACHE(ret);
} else {
current_fn->addAggregate(move(val));
return copy_inserter(ret);
}
}
if (auto cnst = dyn_cast<llvm::ConstantDataSequential>(v)) {
vector<Value*> vals;
if (!fillAggregateValues(dynamic_cast<AggregateType *>(ty),
[&cnst](auto i) { return cnst->getElementAsConstant(i); }, vals))
return nullptr;
auto val = make_unique<AggregateValue>(*ty, move(vals));
auto ret = val.get();
current_fn->addConstant(move(val));
RETURN_CACHE(ret);
}
if (auto cnst = dyn_cast<llvm::ConstantAggregateZero>(v)) {
vector<Value*> vals;
if (!fillAggregateValues(dynamic_cast<AggregateType *>(ty),
[&cnst](auto i) { return cnst->getElementValue(i); }, vals))
return nullptr;
auto val = make_unique<AggregateValue>(*ty, move(vals));
auto ret = val.get();
current_fn->addConstant(move(val));
RETURN_CACHE(ret);
}
if (auto cexpr = dyn_cast<llvm::ConstantExpr>(v)) {
return constexpr_conv(cexpr);
}
return nullptr;
}
void add_identifier(const llvm::Value &llvm, Value &v) {
value_cache.emplace(&llvm, &v);
}
#define PRINT(T) \
ostream& operator<<(ostream &os, const T &x) { \
string str; \
llvm::raw_string_ostream ss(str); \
ss << x; \
return os << ss.str(); \
}
PRINT(llvm::Type)
PRINT(llvm::Value)
#undef PRINT
void init_llvm_utils(ostream &os, const llvm::DataLayout &dataLayout) {
out = &os;
type_id_counter = 0;
int_types.resize(65);
int_types[1] = make_unique<IntType>("i1", 1);
ptr_types.emplace_back(make_unique<PtrType>(0));
DL = &dataLayout;
}
ostream& get_outs() {
return *out;
}
void set_outs(ostream &os) {
out = &os;
}
void reset_state(Function &f) {
current_fn = &f;
value_cache.clear();
value_names.clear();
value_id_counter = 0;
}
}
|
/****************************************************************************
**
** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of the documentation of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:BSD$
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of Digia Plc and its Subsidiary(-ies) nor the names
** of its contributors may be used to endorse or promote products derived
** from this software without specific prior written permission.
**
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include <QtGui>
#include <QtDebug>
#include <QDeclarativeComponent>
//! [1]
void statusChanged(QDeclarativeComponent::Status status) {
if (status == QDeclarativeComponent::Error) {
foreach (const QDeclarativeError &error, component->errors()) {
const QByteArray file = error.url().toEncoded();
QMessageLogger(file.constData(), error.line(), 0).debug() << error.description();
}
}
}
//! [1]
//! [2]
const QLoggingCategory &category();
//! [2]
|
// <SNIPPET1>
using namespace System;
using namespace System::Security::Cryptography;
using namespace System::Text;
using namespace System::IO;
void EncryptTextToFile( String^ Data, String^ FileName, array<Byte>^Key, array<Byte>^IV )
{
try
{
// Create or open the specified file.
FileStream^ fStream = File::Open( FileName, FileMode::OpenOrCreate );
// Create a new RC2 object.
RC2^ RC2alg = RC2::Create();
// Create a CryptoStream using the FileStream
// and the passed key and initialization vector (IV).
CryptoStream^ cStream = gcnew CryptoStream( fStream,RC2alg->CreateEncryptor( Key, IV ),CryptoStreamMode::Write );
// Create a StreamWriter using the CryptoStream.
StreamWriter^ sWriter = gcnew StreamWriter( cStream );
// Write the data to the stream
// to encrypt it.
sWriter->WriteLine( Data );
// Close the streams and
// close the file.
sWriter->Close();
cStream->Close();
fStream->Close();
}
catch ( CryptographicException^ e )
{
Console::WriteLine( "A Cryptographic error occurred: {0}", e->Message );
}
catch ( UnauthorizedAccessException^ e )
{
Console::WriteLine( "A file error occurred: {0}", e->Message );
}
}
String^ DecryptTextFromFile( String^ FileName, array<Byte>^Key, array<Byte>^IV )
{
try
{
// Create or open the specified file.
FileStream^ fStream = File::Open( FileName, FileMode::OpenOrCreate );
// Create a new RC2 object.
RC2^ RC2alg = RC2::Create();
// Create a CryptoStream using the FileStream
// and the passed key and initialization vector (IV).
CryptoStream^ cStream = gcnew CryptoStream( fStream,RC2alg->CreateDecryptor( Key, IV ),CryptoStreamMode::Read );
// Create a StreamReader using the CryptoStream.
StreamReader^ sReader = gcnew StreamReader( cStream );
// Read the data from the stream
// to decrypt it.
String^ val = sReader->ReadLine();
// Close the streams and
// close the file.
sReader->Close();
cStream->Close();
fStream->Close();
// Return the string.
return val;
}
catch ( CryptographicException^ e )
{
Console::WriteLine( "A Cryptographic error occurred: {0}", e->Message );
return nullptr;
}
catch ( UnauthorizedAccessException^ e )
{
Console::WriteLine( "A file error occurred: {0}", e->Message );
return nullptr;
}
}
int main()
{
try
{
// Create a new RC2 object to generate a key
// and initialization vector (IV).
RC2^ RC2alg = RC2::Create();
// Create a string to encrypt.
String^ sData = "Here is some data to encrypt.";
String^ FileName = "CText.txt";
// Encrypt text to a file using the file name, key, and IV.
EncryptTextToFile( sData, FileName, RC2alg->Key, RC2alg->IV );
// Decrypt the text from a file using the file name, key, and IV.
String^ Final = DecryptTextFromFile( FileName, RC2alg->Key, RC2alg->IV );
// Display the decrypted string to the console.
Console::WriteLine( Final );
}
catch ( Exception^ e )
{
Console::WriteLine( e->Message );
}
}
// </SNIPPET1>
|
#include "Messages/internal/ROSTopicSubscribeMessage.h"
#include "DataHelpers.h"
void UROSTopicSubscribeMessage::ToData(ROSData& OutMessage) const
{
DataHelpers::Append<FString>(OutMessage, "op", "subscribe");
DataHelpers::Append<FString>(OutMessage, "id", ID);
DataHelpers::Append<FString>(OutMessage, "topic", TopicName);
DataHelpers::Append<FString>(OutMessage, "type", MessageType);
DataHelpers::Append<int32>(OutMessage, "throttle_rate", ThrottleRate);
DataHelpers::Append<int32>(OutMessage, "queue_length", QueueLength);
DataHelpers::Append<FString>(OutMessage, "compression", Compression);
if(FragmentSize > 0)
{
DataHelpers::Append<int32>(OutMessage, "fragment_size", FragmentSize);
}
}
bool UROSTopicSubscribeMessage::FromData(const ROSData& Message)
{
//optional
DataHelpers::Extract<FString>(Message, "id", ID);
DataHelpers::Extract<FString>(Message, "type", MessageType);
DataHelpers::Extract<int32>(Message, "throttle_rate", ThrottleRate);
DataHelpers::Extract<int32>(Message, "queue_length", QueueLength);
DataHelpers::Extract<FString>(Message, "compression", Compression);
return DataHelpers::Extract<FString>(Message, "topic", TopicName);
}
|
/*
* Copyright 2004, 2005, 2006 PathScale, Inc. All Rights Reserved.
*/
/*
Copyright (C) 2000, 2001 Silicon Graphics, Inc. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it would be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Further, this software is distributed without any warranty that it is
free of the rightful claim of any third person regarding infringement
or the like. Any license provided herein, whether implied or
otherwise, applies only to this software file. Patent licenses, if
any, provided herein do not apply to combinations of this program with
other software, or any other product whatsoever.
You should have received a copy of the GNU General Public License along
with this program; if not, write the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston MA 02111-1307, USA.
Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pky,
Mountain View, CA 94043, or:
http://www.sgi.com
For further information regarding this notice, see:
http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
/* =======================================================================
* =======================================================================
*
* Module: cg_swp_target.cxx
* $Revision: 1.5 $
* $Date: 04/12/21 14:57:08-08:00 $
* $Author: bos@eng-25.internal.keyresearch.com $
* $Source: /home/bos/bk/kpro64-pending/be/cg/x8664/SCCS/s.cg_swp_target.cxx $
*
* =======================================================================
* ======================================================================= */
#include <stdint.h>
#define USE_STANDARD_TYPES
#include <map>
#include "defs.h"
#include "mempool.h"
#include "tn.h"
#include "tn_set.h"
#include "bb.h"
#include "op.h"
#include "op_list.h"
#include "op_map.h"
#include "cgexp.h"
#include "cgtarget.h"
#include "register.h"
#include "cg_loop.h"
#include "cg_swp_options.h"
#include "cg_swp.h"
#include "cg_swp_target.h"
#include "tracing.h"
#include "pf_cg.h"
#include "cg_loop.h"
#include "calls.h"
#include "tag.h"
/* ====================================================================
*
* Convert all invariant predicate into a computation
*
* ====================================================================
*/
void Remove_Invariant_Predicates(CG_LOOP& cl, bool trace)
{
}
/* ====================================================================
*
* Convert all p0 conditional cmp into unconditional form
*
* ====================================================================
*/
void Unc_Promotion(CG_LOOP& cl, bool trace)
{
}
/* ====================================================================
*
* Remove MOVL to avoid special template requirement and register
* requirement.
*
* ====================================================================
*/
void Hoist_MOVL(CG_LOOP& cl, bool trace)
{
}
// Construct a data structure to locate all defs and uses of a TN,
// using the OP_VECTOR::index. The properties of the OP_VECTOR::index
// is that it's ordered.
//
struct TN_DU {
typedef OP_VECTOR::index_type index_type;
vector<index_type> defs;
vector<index_type> uses;
bool TN_is_invariant() const {
return defs.size() == 0;
}
// Returns true if the TN is not modified in the range [first,last)
bool TN_unchanged(index_type first, index_type last) {
for (int i = 0; i < defs.size(); i++) {
index_type t = defs[i];
if (first <= t && t < last)
return false;
}
return true;
}
// Returns true if the TN can be assigned a non-rotating register
bool TN_can_use_non_rotating_reg(TN *tn, OP_VECTOR& op_vec) {
// d is set to the earliest definition, set to MAX_INT if there is no definitions
index_type d = defs.size() > 0 ? defs[0] : INT32_MAX;
// an omega 0 use is always OK
// an omega 1 use is OK if the use is before the earliest definition
// an omega >1 use is never OK
for (int j = 0; j < uses.size(); j++) {
index_type u = uses[j];
OP *op = op_vec[u];
for (int i = 0; i < OP_opnds(op); i++) {
if (tn == OP_opnd(op, i)) {
int omega = OP_omega(op, i);
if (omega >= 1) {
if (omega > 1)
return false;
if (d < u) // omega == 1
return false;
}
}
}
}
return true;
}
};
// Construct a TN to TN_DU mapping.
//
struct TN_DU_MAP {
typedef std::map<TN *, TN_DU>::iterator iterator;
std::map<TN *, TN_DU> TN_DU_map;
iterator begin() {
return TN_DU_map.begin();
}
iterator end() {
return TN_DU_map.end();
}
TN_DU& operator[](TN *tn) {
return TN_DU_map[tn];
}
// Build a TN_DU data structure for each TN
// referenced in the BB. And also assign an OP-number
// to each OP *. The TN_DU represents all occurrences
// of defs and uses of the TN using the OP-number.
//
TN_DU_MAP(OP_VECTOR& op_vec, bool trace) {
for (INT op_num = 0; op_num < op_vec.size(); op_num++) {
OP *op = op_vec[op_num];
INT i;
for (i = 0; i < OP_results(op); i++) {
TN *tn = OP_result(op,i);
if (TN_is_register(tn) &&
!TN_is_dedicated(tn) &&
!TN_is_const_reg(tn)) {
if (TN_DU_map.find(tn) == TN_DU_map.end())
TN_DU_map[tn] = TN_DU();
TN_DU_map[tn].defs.push_back(op_num);
}
}
for (i = 0; i < OP_opnds(op); i++) {
TN *tn = OP_opnd(op,i);
if (TN_is_register(tn) &&
!TN_is_dedicated(tn) &&
!TN_is_const_reg(tn)) {
if (TN_DU_map.find(tn) == TN_DU_map.end())
TN_DU_map[tn] = TN_DU();
TN_DU_map[tn].uses.push_back(op_num);
}
}
}
// Trace the TN_DU data structure
if (trace) {
for (iterator it = TN_DU_map.begin(); it != TN_DU_map.end(); it++) {
TN *tn = (*it).first;
TN_DU &lrs = (*it).second;
fprintf(TFile, "Remove_Non_Definite_Dependence: TN_DU of TN%d: defs={", TN_number(tn));
{
for (int i = 0; i < lrs.defs.size(); i++) {
fprintf(TFile, "%d", lrs.defs[i]);
if (i != lrs.defs.size()-1) fputc(',', TFile);
}
}
fprintf(TFile, "}, uses={");
{
for (int i = 0; i < lrs.uses.size(); i++) {
fprintf(TFile, "%d", lrs.uses[i]);
if (i != lrs.uses.size()-1) fputc(',', TFile);
}
}
fprintf(TFile, "}\n");
}
}
}
};
/* ====================================================================
*
* tn_is_needed_in_epilog
*
* Using the increment feature on memory ops will cause the new
* value of the index to destroy the previous value. When we need
* the previous value to complete the epilog sequence, we need to
* use rotating registers to save the previous value, or decrement
* the value in the epilog before it is used (not yet implemented).
*
* ====================================================================
*/
BOOL static tn_is_needed_in_epilog (TN *tn)
{
return FALSE;
}
/* ====================================================================
*
* OP_owns_the_base_TN
*
* If there is no other dependence memop using the base TN,
* then this OP owns it.
*
* ====================================================================
*/
bool OP_owns_the_base_TN(OP *op, TN *base, TN_DU& tn_du, OP_VECTOR& op_vec)
{
return true;
}
// Return the base update form of the OP
//
BASE_UPDATE
OP_convertible_to_base_update(OP *op)
{
return NO_BASE_UPDATE;
}
// Returns the address base opnd
INT OP_base_opnd_num(OP *op)
{
return -1;
}
// Returns the address base opnd
INT OP_base_res_num(OP *op)
{
return -1;
}
// Returns the address base opnd
INT OP_imm_opnd_num(OP *op)
{
return -1;
}
INT32 OP_incr_opnd_num(TOP top)
{
// the last operand
return (TOP_fixed_opnds(top) - 1);
}
INT32 OP_incr_opnd_num(OP *op)
{
// the last operand
return (OP_opnds(op) - 1);
}
static INT32 Num_defs(BB *body, TN *def_tn)
{
return 0;
}
BOOL Mem_stride_ge_access(OP *op, INT64 stride)
{
return FALSE;
}
/* ====================================================================
*
* Marks OP that has no cross-iteration aliasing.
*
* need to run after unrolling and before postincr form
*
* ====================================================================
*/
void Init_OP_no_ci_alias(CG_LOOP& cl, BOOL trace)
{
}
// Identify and delete the increment operation.
// The increment operation will be combined into the op's
// base-update form.
//
static TN *
Identify_and_delete_incr(BB *bb, OP *memop, INT base_opnd_num, BASE_UPDATE up)
{
return NULL;
}
void Convert_OP_to_base_update_form(BB *body, OP *op, TN *incr, BASE_UPDATE up,
INT base_opnd_num, bool trace)
{
}
/* ====================================================================
*
* Convert_Post_Incr
* try to convert load/store into their base update form
* it must also update the OP_omega information
*
* ====================================================================
*/
void Gen_Post_Incr_Memop(CG_LOOP& cl, bool trace)
{
}
/* ====================================================================
*
* Expand_Simulated_Ops
*
* ====================================================================
*/
static BOOL Expand_Simulated_Ops(CG_LOOP& cl, bool trace)
{
return FALSE;
}
TOP Get_Predicated_Form(TOP top)
{
return TOP_UNDEFINED;
}
|
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/instruction-selector.h"
#include <limits>
#include "src/base/iterator.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/deoptimizer/deoptimizer.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/simd-shuffle.h"
#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
namespace compiler {
InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
source_positions_(source_positions),
source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
current_block_(nullptr),
instructions_(zone),
continuation_inputs_(sequence->zone()),
continuation_outputs_(sequence->zone()),
continuation_temps_(sequence->zone()),
defined_(node_count, false, zone),
used_(node_count, false, zone),
effect_level_(node_count, 0, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
virtual_register_rename_(zone),
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
trace_turbo_(trace_turbo),
tick_counter_(tick_counter),
broker_(broker),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count)
#if V8_TARGET_ARCH_64_BIT
,
phi_states_(node_count, Upper32BitsState::kNotYetChecked, zone)
#endif
{
DCHECK_EQ(*max_unoptimized_frame_height, 0); // Caller-initialized.
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
if (trace_turbo_ == kEnableTraceTurboJson) {
instr_origins_.assign(node_count, {-1, 0});
}
}
bool InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
for (auto const block : *blocks) {
if (!block->IsLoopHeader()) continue;
DCHECK_LE(2u, block->PredecessorCount());
for (Node* const phi : *block) {
if (phi->opcode() != IrOpcode::kPhi) continue;
// Mark all inputs as used.
for (Node* const input : phi->inputs()) {
MarkAsUsed(input);
}
}
}
// Visit each basic block in post order.
for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
if (instruction_selection_failed()) return false;
}
// Schedule the selected instructions.
if (UseInstructionScheduling()) {
scheduler_ = zone()->New<InstructionScheduler>(zone(), sequence());
}
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
for (size_t i = 0; i < instruction_block->phis().size(); i++) {
UpdateRenamesInPhi(instruction_block->PhiAt(i));
}
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
if (end != start) {
while (start-- > end + 1) {
UpdateRenames(instructions_[start]);
AddInstruction(instructions_[start]);
}
UpdateRenames(instructions_[end]);
AddTerminator(instructions_[end]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
#if DEBUG
sequence()->ValidateSSA();
#endif
return true;
}
void InstructionSelector::StartBlock(RpoNumber rpo) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->StartBlock(rpo);
} else {
sequence()->StartBlock(rpo);
}
}
void InstructionSelector::EndBlock(RpoNumber rpo) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->EndBlock(rpo);
} else {
sequence()->EndBlock(rpo);
}
}
void InstructionSelector::AddTerminator(Instruction* instr) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddTerminator(instr);
} else {
sequence()->AddInstruction(instr);
}
}
void InstructionSelector::AddInstruction(Instruction* instr) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddInstruction(instr);
} else {
sequence()->AddInstruction(instr);
}
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
InstructionOperand b, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
InstructionOperand b,
InstructionOperand c, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
size_t temp_count, InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
InstructionOperand e, InstructionOperand f, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps) {
if (output_count >= Instruction::kMaxOutputCount ||
input_count >= Instruction::kMaxInputCount ||
temp_count >= Instruction::kMaxTempCount) {
set_instruction_selection_failed();
return nullptr;
}
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
return Emit(instr);
}
Instruction* InstructionSelector::Emit(Instruction* instr) {
instructions_.push_back(instr);
return instr;
}
bool InstructionSelector::CanCover(Node* user, Node* node) const {
// 1. Both {user} and {node} must be in the same basic block.
if (schedule()->block(node) != schedule()->block(user)) {
return false;
}
// 2. Pure {node}s must be owned by the {user}.
if (node->op()->HasProperty(Operator::kPure)) {
return node->OwnedBy(user);
}
// 3. Impure {node}s must match the effect level of {user}.
if (GetEffectLevel(node) != GetEffectLevel(user)) {
return false;
}
// 4. Only {node} must have value edges pointing to {user}.
for (Edge const edge : node->use_edges()) {
if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
return false;
}
}
return true;
}
bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
Node* node_input) const {
if (CanCover(user, node) && CanCover(node, node_input)) {
// If {node} is pure, transitivity might not hold.
if (node->op()->HasProperty(Operator::kPure)) {
// If {node_input} is pure, the effect levels do not matter.
if (node_input->op()->HasProperty(Operator::kPure)) return true;
// Otherwise, {user} and {node_input} must have the same effect level.
return GetEffectLevel(user) == GetEffectLevel(node_input);
}
return true;
}
return false;
}
bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
Node* node) const {
BasicBlock* bb_user = schedule()->block(user);
BasicBlock* bb_node = schedule()->block(node);
if (bb_user != bb_node) return false;
for (Edge const edge : node->use_edges()) {
Node* from = edge.from();
if ((from != user) && (schedule()->block(from) == bb_user)) {
return false;
}
}
return true;
}
void InstructionSelector::UpdateRenames(Instruction* instruction) {
for (size_t i = 0; i < instruction->InputCount(); i++) {
TryRename(instruction->InputAt(i));
}
}
void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
for (size_t i = 0; i < phi->operands().size(); i++) {
int vreg = phi->operands()[i];
int renamed = GetRename(vreg);
if (vreg != renamed) {
phi->RenameInput(i, renamed);
}
}
}
int InstructionSelector::GetRename(int virtual_register) {
int rename = virtual_register;
while (true) {
if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
int next = virtual_register_rename_[rename];
if (next == InstructionOperand::kInvalidVirtualRegister) {
break;
}
rename = next;
}
return rename;
}
void InstructionSelector::TryRename(InstructionOperand* op) {
if (!op->IsUnallocated()) return;
UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
int vreg = unalloc->virtual_register();
int rename = GetRename(vreg);
if (rename != vreg) {
*unalloc = UnallocatedOperand(*unalloc, rename);
}
}
void InstructionSelector::SetRename(const Node* node, const Node* rename) {
int vreg = GetVirtualRegister(node);
if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
int invalid = InstructionOperand::kInvalidVirtualRegister;
virtual_register_rename_.resize(vreg + 1, invalid);
}
virtual_register_rename_[vreg] = GetVirtualRegister(rename);
}
int InstructionSelector::GetVirtualRegister(const Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, virtual_registers_.size());
int virtual_register = virtual_registers_[id];
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
virtual_register = sequence()->NextVirtualRegister();
virtual_registers_[id] = virtual_register;
}
return virtual_register;
}
const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
const {
std::map<NodeId, int> virtual_registers;
for (size_t n = 0; n < virtual_registers_.size(); ++n) {
if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
NodeId const id = static_cast<NodeId>(n);
virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
}
}
return virtual_registers;
}
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, defined_.size());
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, defined_.size());
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
DCHECK_NOT_NULL(node);
// TODO(bmeurer): This is a terrible monster hack, but we have to make sure
// that the Retain is actually emitted, otherwise the GC will mess up.
if (node->opcode() == IrOpcode::kRetain) return true;
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
size_t const id = node->id();
DCHECK_LT(id, used_.size());
return used_[id];
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, used_.size());
used_[id] = true;
}
int InstructionSelector::GetEffectLevel(Node* node) const {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, effect_level_.size());
return effect_level_[id];
}
int InstructionSelector::GetEffectLevel(Node* node,
FlagsContinuation* cont) const {
return cont->IsBranch()
? GetEffectLevel(
cont->true_block()->PredecessorAt(0)->control_input())
: GetEffectLevel(node);
}
void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, effect_level_.size());
effect_level_[id] = effect_level;
}
bool InstructionSelector::CanAddressRelativeToRootsRegister(
const ExternalReference& reference) const {
// There are three things to consider here:
// 1. CanUseRootsRegister: Is kRootRegister initialized?
const bool root_register_is_available_and_initialized = CanUseRootsRegister();
if (!root_register_is_available_and_initialized) return false;
// 2. enable_roots_relative_addressing_: Can we address everything on the heap
// through the root register, i.e. are root-relative addresses to arbitrary
// addresses guaranteed not to change between code generation and
// execution?
const bool all_root_relative_offsets_are_constant =
(enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing);
if (all_root_relative_offsets_are_constant) return true;
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
// have a fixed root-relative offset? If so, we can ignore 2.
const bool this_root_relative_offset_is_constant =
TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
reference);
return this_root_relative_offset_is_constant;
}
bool InstructionSelector::CanUseRootsRegister() const {
return linkage()->GetIncomingDescriptor()->flags() &
CallDescriptor::kCanUseRoots;
}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
Node* node) {
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
namespace {
InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
Node* input, FrameStateInputKind kind,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kNone) {
return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
}
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kDelayedStringConstant:
return g->UseImmediate(input);
case IrOpcode::kCompressedHeapConstant:
case IrOpcode::kHeapConstant: {
if (!CanBeTaggedOrCompressedPointer(rep)) {
// If we have inconsistent static and dynamic types, e.g. if we
// smi-check a string, we can get here with a heap object that
// says it is a smi. In that case, we return an invalid instruction
// operand, which will be interpreted as an optimized-out value.
// TODO(jarin) Ideally, we should turn the current instruction
// into an abort (we should never execute it).
return InstructionOperand();
}
Handle<HeapObject> constant = HeapConstantOf(input->op());
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(constant, &root_index) &&
root_index == RootIndex::kOptimizedOut) {
// For an optimized-out object we return an invalid instruction
// operand, so that we take the fast path for optimized-out values.
return InstructionOperand();
}
return g->UseImmediate(input);
}
case IrOpcode::kArgumentsElementsState:
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
default:
switch (kind) {
case FrameStateInputKind::kStackSlot:
return g->UseUniqueSlot(input);
case FrameStateInputKind::kAny:
// Currently deopts "wrap" other operations, so the deopt's inputs
// are potentially needed until the end of the deoptimising code.
return g->UseAnyAtEnd(input);
}
}
UNREACHABLE();
}
} // namespace
class StateObjectDeduplicator {
public:
explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
static const size_t kNotDuplicated = SIZE_MAX;
size_t GetObjectId(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId ||
node->opcode() == IrOpcode::kArgumentsElementsState);
for (size_t i = 0; i < objects_.size(); ++i) {
if (objects_[i] == node) return i;
// ObjectId nodes are the Turbofan way to express objects with the same
// identity in the deopt info. So they should always be mapped to
// previously appearing TypedObjectState nodes.
if (HasObjectId(objects_[i]) && HasObjectId(node) &&
ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
return i;
}
}
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kArgumentsElementsState);
return kNotDuplicated;
}
size_t InsertObject(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId ||
node->opcode() == IrOpcode::kArgumentsElementsState);
size_t id = objects_.size();
objects_.push_back(node);
return id;
}
size_t size() const { return objects_.size(); }
private:
static bool HasObjectId(Node* node) {
return node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId;
}
ZoneVector<Node*> objects_;
};
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddOperandToStateValueDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
MachineType type, FrameStateInputKind kind, Zone* zone) {
DCHECK_NOT_NULL(input);
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
// The elements backing store of an arguments object participates in the
// duplicate object counting, but can itself never appear duplicated.
DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
deduplicator->GetObjectId(input));
deduplicator->InsertObject(input);
return 0;
}
case IrOpcode::kArgumentsLengthState: {
values->PushArgumentsLength();
return 0;
}
case IrOpcode::kObjectState:
UNREACHABLE();
case IrOpcode::kTypedObjectState:
case IrOpcode::kObjectId: {
size_t id = deduplicator->GetObjectId(input);
if (id == StateObjectDeduplicator::kNotDuplicated) {
DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
size_t entries = 0;
id = deduplicator->InsertObject(input);
StateValueList* nested = values->PushRecursiveField(zone, id);
int const input_count = input->op()->ValueInputCount();
ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
for (int i = 0; i < input_count; ++i) {
entries += AddOperandToStateValueDescriptor(
nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
kind, zone);
}
return entries;
} else {
// Deoptimizer counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
values->PushDuplicate(id);
return 0;
}
}
default: {
InstructionOperand op =
OperandForDeopt(isolate(), g, input, kind, type.representation());
if (op.kind() == InstructionOperand::INVALID) {
// Invalid operand means the value is impossible or optimized-out.
values->PushOptimizedOut();
return 0;
} else {
inputs->push_back(op);
values->PushPlain(type);
return 1;
}
}
}
}
struct InstructionSelector::CachedStateValues : public ZoneObject {
public:
CachedStateValues(Zone* zone, StateValueList* values, size_t values_start,
InstructionOperandVector* inputs, size_t inputs_start)
: inputs_(inputs->begin() + inputs_start, inputs->end(), zone),
values_(values->MakeSlice(values_start)) {}
size_t Emit(InstructionOperandVector* inputs, StateValueList* values) {
inputs->insert(inputs->end(), inputs_.begin(), inputs_.end());
values->PushCachedSlice(values_);
return inputs_.size();
}
private:
InstructionOperandVector inputs_;
StateValueList::Slice values_;
};
class InstructionSelector::CachedStateValuesBuilder {
public:
explicit CachedStateValuesBuilder(StateValueList* values,
InstructionOperandVector* inputs,
StateObjectDeduplicator* deduplicator)
: values_(values),
inputs_(inputs),
deduplicator_(deduplicator),
values_start_(values->size()),
nested_start_(values->nested_count()),
inputs_start_(inputs->size()),
deduplicator_start_(deduplicator->size()) {}
// We can only build a CachedStateValues for a StateValue if it didn't update
// any of the ids in the deduplicator.
bool CanCache() const { return deduplicator_->size() == deduplicator_start_; }
InstructionSelector::CachedStateValues* Build(Zone* zone) {
DCHECK(CanCache());
DCHECK(values_->nested_count() == nested_start_);
return zone->New<InstructionSelector::CachedStateValues>(
zone, values_, values_start_, inputs_, inputs_start_);
}
private:
StateValueList* values_;
InstructionOperandVector* inputs_;
StateObjectDeduplicator* deduplicator_;
size_t values_start_;
size_t nested_start_;
size_t inputs_start_;
size_t deduplicator_start_;
};
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* node,
FrameStateInputKind kind, Zone* zone) {
// StateValues are often shared across different nodes, and processing them is
// expensive, so cache the result of processing a StateValue so that we can
// quickly copy the result if we see it again.
FrameStateInput key(node, kind);
auto cache_entry = state_values_cache_.find(key);
if (cache_entry != state_values_cache_.end()) {
// Entry found in cache, emit cached version.
return cache_entry->second->Emit(inputs, values);
} else {
// Not found in cache, generate and then store in cache if possible.
size_t entries = 0;
CachedStateValuesBuilder cache_builder(values, inputs, deduplicator);
StateValuesAccess::iterator it = StateValuesAccess(node).begin();
// Take advantage of sparse nature of StateValuesAccess to skip over
// multiple empty nodes at once pushing repeated OptimizedOuts all in one
// go.
while (!it.done()) {
values->PushOptimizedOut(it.AdvanceTillNotEmpty());
if (it.done()) break;
StateValuesAccess::TypedNode input_node = *it;
entries += AddOperandToStateValueDescriptor(values, inputs, g,
deduplicator, input_node.node,
input_node.type, kind, zone);
++it;
}
if (cache_builder.CanCache()) {
// Use this->zone() to build the cache entry in the instruction selector's
// zone rather than the more long-lived instruction zone.
state_values_cache_.emplace(key, cache_builder.Build(this->zone()));
}
return entries;
}
}
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
FrameStateDescriptor* descriptor, FrameState state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
size_t entries = 0;
size_t initial_size = inputs->size();
USE(initial_size); // initial_size is only used for debug.
if (descriptor->outer_state()) {
entries += AddInputsToFrameStateDescriptor(
descriptor->outer_state(), state.outer_frame_state(), g, deduplicator,
inputs, kind, zone);
}
Node* parameters = state.parameters();
Node* locals = state.locals();
Node* stack = state.stack();
Node* context = state.context();
Node* function = state.function();
DCHECK_EQ(descriptor->parameters_count(),
StateValuesAccess(parameters).size());
DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
DCHECK_EQ(values_descriptor->size(), 0u);
values_descriptor->ReserveSize(descriptor->GetSize());
DCHECK_NOT_NULL(function);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
entries += AddInputsToFrameStateDescriptor(
values_descriptor, inputs, g, deduplicator, parameters, kind, zone);
if (descriptor->HasContext()) {
DCHECK_NOT_NULL(context);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, context,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
}
entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
deduplicator, locals, kind, zone);
entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
deduplicator, stack, kind, zone);
DCHECK_EQ(initial_size + entries, inputs->size());
return entries;
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, InstructionOperand b,
FlagsContinuation* cont) {
InstructionOperand inputs[] = {a, b};
return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, InstructionOperand b,
InstructionOperand c, FlagsContinuation* cont) {
InstructionOperand inputs[] = {a, b, c};
return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, output_count, outputs, input_count,
inputs, 0, nullptr, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont) {
OperandGenerator g(this);
opcode = cont->Encode(opcode);
continuation_inputs_.resize(0);
for (size_t i = 0; i < input_count; i++) {
continuation_inputs_.push_back(inputs[i]);
}
continuation_outputs_.resize(0);
for (size_t i = 0; i < output_count; i++) {
continuation_outputs_.push_back(outputs[i]);
}
continuation_temps_.resize(0);
for (size_t i = 0; i < temp_count; i++) {
continuation_temps_.push_back(temps[i]);
}
if (cont->IsBranch()) {
continuation_inputs_.push_back(g.Label(cont->true_block()));
continuation_inputs_.push_back(g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
int immediate_args_count = 0;
if (cont->has_extra_args()) {
for (int i = 0; i < cont->extra_args_count(); i++) {
InstructionOperand op = cont->extra_args()[i];
continuation_inputs_.push_back(op);
input_count++;
if (op.IsImmediate()) {
immediate_args_count++;
} else {
// All immediate args should be added last.
DCHECK_EQ(immediate_args_count, 0);
}
}
}
opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) |
DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
FrameState{cont->frame_state()});
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsSelect()) {
// The {Select} should put one of two values into the output register,
// depending on the result of the condition. The two result values are in
// the last two input slots, the {false_value} in {input_count - 2}, and the
// true_value in {input_count - 1}. The other inputs are used for the
// condition.
AddOutputToSelectContinuation(&g, static_cast<int>(input_count) - 2,
cont->result());
} else if (cont->IsTrap()) {
int trap_id = static_cast<int>(cont->trap_id());
continuation_inputs_.push_back(g.UseImmediate(trap_id));
} else {
DCHECK(cont->IsNone());
}
size_t const emit_inputs_size = continuation_inputs_.size();
auto* emit_inputs =
emit_inputs_size ? &continuation_inputs_.front() : nullptr;
size_t const emit_outputs_size = continuation_outputs_.size();
auto* emit_outputs =
emit_outputs_size ? &continuation_outputs_.front() : nullptr;
size_t const emit_temps_size = continuation_temps_.size();
auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr;
return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
emit_inputs, emit_temps_size, emit_temps);
}
void InstructionSelector::AppendDeoptimizeArguments(
InstructionOperandVector* args, DeoptimizeKind kind,
DeoptimizeReason reason, FeedbackSource const& feedback,
FrameState frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id =
sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
args->push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
args, FrameStateInputKind::kAny,
instruction_zone());
}
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
FrameStateDescriptor* frame_state)
: descriptor(call_descriptor),
frame_state_descriptor(frame_state),
output_nodes(zone),
outputs(zone),
instruction_args(zone),
pushed_nodes(zone) {
output_nodes.reserve(call_descriptor->ReturnCount());
outputs.reserve(call_descriptor->ReturnCount());
pushed_nodes.reserve(input_count());
instruction_args.reserve(input_count() + frame_state_value_count());
}
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
ZoneVector<PushParameter> output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes;
size_t input_count() const { return descriptor->InputCount(); }
size_t frame_state_count() const { return descriptor->FrameStateCount(); }
size_t frame_state_value_count() const {
return (frame_state_descriptor == nullptr)
? 0
: (frame_state_descriptor->GetTotalSize() +
1); // Include deopt id.
}
};
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
CallBufferFlags flags,
bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
size_t ret_count = buffer->descriptor->ReturnCount();
DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
DCHECK_EQ(
call->op()->ValueInputCount(),
static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
if (ret_count > 0) {
// Collect the projections that represent multiple outputs from this call.
if (ret_count == 1) {
PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
buffer->output_nodes.push_back(result);
} else {
buffer->output_nodes.resize(ret_count);
for (size_t i = 0; i < ret_count; ++i) {
LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
buffer->output_nodes[i] = PushParameter(nullptr, location);
}
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
Node* node = edge.from();
DCHECK_EQ(IrOpcode::kProjection, node->opcode());
size_t const index = ProjectionIndexOf(node->op());
DCHECK_LT(index, buffer->output_nodes.size());
DCHECK(!buffer->output_nodes[index].node);
buffer->output_nodes[index].node = node;
}
frame_->EnsureReturnSlots(
static_cast<int>(buffer->descriptor->ReturnSlotCount()));
}
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
bool output_is_live = buffer->output_nodes[i].node != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
LinkageLocation location = buffer->output_nodes[i].location;
MachineRepresentation rep = location.GetType().representation();
Node* output = buffer->output_nodes[i].node;
InstructionOperand op = output == nullptr
? g.TempLocation(location)
: g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op);
if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
buffer->outputs.push_back(op);
buffer->output_nodes[i].node = nullptr;
}
}
}
}
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
buffer->instruction_args.push_back(
(call_address_immediate &&
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
callee->opcode() == IrOpcode::kRelocatableInt32Constant))
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallBuiltinPointer:
// The common case for builtin pointers is to have the target in a
// register. If we have a constant, we use a register anyway to simplify
// related code.
buffer->instruction_args.push_back(
call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
// Argument 1 is used for poison-alias index (encoded in a word-sized
// immediate. This an index of the operand that aliases with poison register
// or -1 if there is no aliasing.
buffer->instruction_args.push_back(g.TempImmediate(-1));
const size_t poison_alias_index = 1;
DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
// arg 2 : deoptimization id.
// arg 3 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
FrameState frame_state{
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()))};
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
if (is_tail_call) {
frame_state = FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
buffer->frame_state_descriptor->type() ==
FrameStateType::kArgumentsAdaptor) {
frame_state =
FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
}
}
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
DeoptimizeReason::kUnknown, FeedbackSource());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
frame_state_entries =
1 + AddInputsToFrameStateDescriptor(
buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
// Split the arguments into pushed_nodes and instruction_args. Pushed
// arguments require an explicit push instruction before the call and do
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
size_t pushed_count = 0;
bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
if (index == 0) continue; // The first argument (callee) is already done.
LinkageLocation location = buffer->descriptor->GetInputLocation(index);
if (call_tail) {
location = LinkageLocation::ConvertToTailCallerLocation(
location, stack_param_delta);
}
InstructionOperand op = g.UseLocation(*iter, location);
UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
if (unallocated.HasFixedSlotPolicy() && !call_tail) {
int stack_index = buffer->descriptor->GetStackIndexFromSlot(
unallocated.fixed_slot_index());
// This can insert empty slots before stack_index and will insert enough
// slots after stack_index to store the parameter.
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
int num_slots = std::max(
1, (ElementSizeInBytes(location.GetType().representation()) /
kSystemPointerSize));
buffer->pushed_nodes.resize(stack_index + num_slots);
}
PushParameter param = {*iter, location};
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
// If we do load poisoning and the linkage uses the poisoning register,
// then we request the input in memory location, and during code
// generation, we move the input to the register.
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
if (Register::from_code(reg) == kSpeculationPoisonRegister) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
}
}
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
frame_state_entries - 1);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
// their return address on the stack, move the return address to just above
// the parameters.
LinkageLocation saved_return_location =
LinkageLocation::ForSavedCallerReturnAddress();
InstructionOperand return_address =
g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
saved_return_location, stack_param_delta),
saved_return_location);
buffer->instruction_args.push_back(return_address);
}
}
bool InstructionSelector::IsSourcePositionUsed(Node* node) {
return (source_position_mode_ == kAllSourcePositions ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore);
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
DCHECK(!current_block_);
current_block_ = block;
auto current_num_instructions = [&] {
DCHECK_GE(kMaxInt, instructions_.size());
return static_cast<int>(instructions_.size());
};
int current_block_end = current_num_instructions();
int effect_level = 0;
for (Node* const node : *block) {
SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
#undef ADD_EFFECT_FOR_ATOMIC_OP
node->opcode() == IrOpcode::kMemoryBarrier) {
++effect_level;
}
}
// We visit the control first, then the nodes in the block, so the block's
// control input should be on the same effect level as the last node.
if (block->control_input() != nullptr) {
SetEffectLevel(block->control_input(), effect_level);
}
auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
if (instruction_selection_failed()) return false;
if (current_num_instructions() == instruction_start) return true;
std::reverse(instructions_.begin() + instruction_start,
instructions_.end());
if (!node) return true;
if (!source_positions_) return true;
SourcePosition source_position = source_positions_->GetSourcePosition(node);
if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
sequence()->SetSourcePosition(instructions_.back(), source_position);
}
return true;
};
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
if (!FinishEmittedInstructions(block->control_input(), current_block_end)) {
return;
}
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
for (auto node : base::Reversed(*block)) {
int current_node_end = current_num_instructions();
// Skip nodes that are unused or already defined.
if (IsUsed(node) && !IsDefined(node)) {
// Generate code for this node "top down", but schedule the code "bottom
// up".
VisitNode(node);
if (!FinishEmittedInstructions(node, current_node_end)) return;
}
if (trace_turbo_ == kEnableTraceTurboJson) {
instr_origins_[node->id()] = {current_num_instructions(),
current_node_end};
}
}
// We're done with the block.
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
if (current_num_instructions() == current_block_end) {
// Avoid empty block: insert a {kArchNop} instruction.
Emit(Instruction::New(sequence()->zone(), kArchNop));
}
instruction_block->set_code_start(current_num_instructions());
instruction_block->set_code_end(current_block_end);
current_block_ = nullptr;
}
void InstructionSelector::VisitControl(BasicBlock* block) {
#ifdef DEBUG
// SSA deconstruction requires targets of branches not to have phis.
// Edge split form guarantees this property, but is more strict.
if (block->SuccessorCount() > 1) {
for (BasicBlock* const successor : block->successors()) {
for (Node* const node : *successor) {
if (IrOpcode::IsPhiOpcode(node->opcode())) {
std::ostringstream str;
str << "You might have specified merged variables for a label with "
<< "only one predecessor." << std::endl
<< "# Current Block: " << *successor << std::endl
<< "# Node: " << *node;
FATAL("%s", str.str().c_str());
}
}
}
}
#endif
Node* input = block->control_input();
int instruction_end = static_cast<int>(instructions_.size());
switch (block->control()) {
case BasicBlock::kGoto:
VisitGoto(block->SuccessorAt(0));
break;
case BasicBlock::kCall: {
DCHECK_EQ(IrOpcode::kCall, input->opcode());
BasicBlock* success = block->SuccessorAt(0);
BasicBlock* exception = block->SuccessorAt(1);
VisitCall(input, exception);
VisitGoto(success);
break;
}
case BasicBlock::kTailCall: {
DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
VisitTailCall(input);
break;
}
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
if (tbranch == fbranch) {
VisitGoto(tbranch);
} else {
VisitBranch(input, tbranch, fbranch);
}
break;
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
// Last successor must be {IfDefault}.
BasicBlock* default_branch = block->successors().back();
DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
// All other successors must be {IfValue}s.
int32_t min_value = std::numeric_limits<int32_t>::max();
int32_t max_value = std::numeric_limits<int32_t>::min();
size_t case_count = block->SuccessorCount() - 1;
ZoneVector<CaseInfo> cases(case_count, zone());
for (size_t i = 0; i < case_count; ++i) {
BasicBlock* branch = block->SuccessorAt(i);
const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
if (min_value > p.value()) min_value = p.value();
if (max_value < p.value()) max_value = p.value();
}
SwitchInfo sw(cases, min_value, max_value, default_branch);
VisitSwitch(input, sw);
break;
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
VisitReturn(input);
break;
}
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
FrameState value{input->InputAt(0)};
VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
break;
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
VisitThrow(input);
break;
case BasicBlock::kNone: {
// Exit block doesn't have control.
DCHECK_NULL(input);
break;
}
default:
UNREACHABLE();
}
if (trace_turbo_ == kEnableTraceTurboJson && input) {
int instruction_start = static_cast<int>(instructions_.size());
instr_origins_[input->id()] = {instruction_start, instruction_end};
}
}
void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
Node* projection0 = NodeProperties::FindProjection(node, 0);
if (projection0) {
MarkAsWord32(projection0);
}
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
MarkAsWord32(projection1);
}
}
void InstructionSelector::VisitNode(Node* node) {
tick_counter_->TickAndMaybeEnterSafepoint();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
case IrOpcode::kLoop:
case IrOpcode::kEnd:
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kIfSuccess:
case IrOpcode::kSwitch:
case IrOpcode::kIfValue:
case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
case IrOpcode::kIfException:
return MarkAsTagged(node), VisitIfException(node);
case IrOpcode::kFinishRegion:
return MarkAsTagged(node), VisitFinishRegion(node);
case IrOpcode::kParameter: {
// Parameters should always be scheduled to the first block.
DCHECK_EQ(schedule()->block(node)->rpo_number(), 0);
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
case IrOpcode::kOsrValue:
return MarkAsTagged(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
MachineRepresentation rep = PhiRepresentationOf(node->op());
if (rep == MachineRepresentation::kNone) return;
MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
return MarkAsFloat32(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsTagged(node), VisitConstant(node);
case IrOpcode::kCompressedHeapConstant:
return MarkAsCompressed(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsTagged(node);
return VisitConstant(node);
}
case IrOpcode::kDelayedStringConstant:
return MarkAsTagged(node), VisitConstant(node);
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kDeoptimizeIf:
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
case IrOpcode::kDynamicCheckMapsWithDeoptUnless:
return VisitDynamicCheckMapsWithDeoptUnless(node);
case IrOpcode::kTrapIf:
return VisitTrapIf(node, TrapIdOf(node->op()));
case IrOpcode::kTrapUnless:
return VisitTrapUnless(node, TrapIdOf(node->op()));
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
case IrOpcode::kAbortCSAAssert:
VisitAbortCSAAssert(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
return;
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
case IrOpcode::kStaticAssert:
VisitStaticAssert(node);
return;
case IrOpcode::kDeadValue:
VisitDeadValue(node);
return;
case IrOpcode::kComment:
VisitComment(node);
return;
case IrOpcode::kRetain:
VisitRetain(node);
return;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kLoadTransform: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadTransform(node);
}
case IrOpcode::kLoadLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
case IrOpcode::kPoisonedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitPoisonedLoad(node);
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
return VisitProtectedStore(node);
case IrOpcode::kStoreLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitStoreLane(node);
}
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
return MarkAsWord32(node), VisitWord32Or(node);
case IrOpcode::kWord32Xor:
return MarkAsWord32(node), VisitWord32Xor(node);
case IrOpcode::kWord32Shl:
return MarkAsWord32(node), VisitWord32Shl(node);
case IrOpcode::kWord32Shr:
return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
return MarkAsWord32(node), VisitWord32Sar(node);
case IrOpcode::kWord32Rol:
return MarkAsWord32(node), VisitWord32Rol(node);
case IrOpcode::kWord32Ror:
return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
case IrOpcode::kWord32Clz:
return MarkAsWord32(node), VisitWord32Clz(node);
case IrOpcode::kWord32Ctz:
return MarkAsWord32(node), VisitWord32Ctz(node);
case IrOpcode::kWord32ReverseBits:
return MarkAsWord32(node), VisitWord32ReverseBits(node);
case IrOpcode::kWord32ReverseBytes:
return MarkAsWord32(node), VisitWord32ReverseBytes(node);
case IrOpcode::kInt32AbsWithOverflow:
return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
case IrOpcode::kWord32Popcnt:
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord32Select:
return MarkAsWord32(node), VisitSelect(node);
case IrOpcode::kWord64And:
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
return MarkAsWord64(node), VisitWord64Or(node);
case IrOpcode::kWord64Xor:
return MarkAsWord64(node), VisitWord64Xor(node);
case IrOpcode::kWord64Shl:
return MarkAsWord64(node), VisitWord64Shl(node);
case IrOpcode::kWord64Shr:
return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Rol:
return MarkAsWord64(node), VisitWord64Rol(node);
case IrOpcode::kWord64Ror:
return MarkAsWord64(node), VisitWord64Ror(node);
case IrOpcode::kWord64Clz:
return MarkAsWord64(node), VisitWord64Clz(node);
case IrOpcode::kWord64Ctz:
return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64ReverseBits:
return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64ReverseBytes:
return MarkAsWord64(node), VisitWord64ReverseBytes(node);
case IrOpcode::kSimd128ReverseBytes:
return MarkAsSimd128(node), VisitSimd128ReverseBytes(node);
case IrOpcode::kInt64AbsWithOverflow:
return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kWord64Select:
return MarkAsWord64(node), VisitSelect(node);
case IrOpcode::kInt32Add:
return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return MarkAsWord32(node), VisitInt32Sub(node);
case IrOpcode::kInt32SubWithOverflow:
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
return MarkAsWord32(node), VisitInt32Mul(node);
case IrOpcode::kInt32MulWithOverflow:
return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
return MarkAsWord32(node), VisitInt32Div(node);
case IrOpcode::kInt32Mod:
return MarkAsWord32(node), VisitInt32Mod(node);
case IrOpcode::kInt32LessThan:
return VisitInt32LessThan(node);
case IrOpcode::kInt32LessThanOrEqual:
return VisitInt32LessThanOrEqual(node);
case IrOpcode::kUint32Div:
return MarkAsWord32(node), VisitUint32Div(node);
case IrOpcode::kUint32LessThan:
return VisitUint32LessThan(node);
case IrOpcode::kUint32LessThanOrEqual:
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
return MarkAsWord32(node), VisitUint32Mod(node);
case IrOpcode::kUint32MulHigh:
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return MarkAsWord64(node), VisitInt64Add(node);
case IrOpcode::kInt64AddWithOverflow:
return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
return MarkAsWord64(node), VisitInt64Sub(node);
case IrOpcode::kInt64SubWithOverflow:
return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
return MarkAsWord64(node), VisitInt64Div(node);
case IrOpcode::kInt64Mod:
return MarkAsWord64(node), VisitInt64Mod(node);
case IrOpcode::kInt64LessThan:
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kUint64Div:
return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
case IrOpcode::kUint64LessThanOrEqual:
return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
return MarkAsTagged(node), VisitBitcastWordToTagged(node);
case IrOpcode::kBitcastWordToTaggedSigned:
return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
EmitIdentity(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeInt64ToFloat64:
return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToInt64:
return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kChangeFloat64ToUint64:
return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
case IrOpcode::kFloat64SilenceNaN:
MarkAsFloat64(node);
if (CanProduceSignalingNaN(node->InputAt(0))) {
return VisitFloat64SilenceNaN(node);
} else {
return EmitIdentity(node);
}
case IrOpcode::kTruncateFloat64ToInt64:
return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
case IrOpcode::kTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
case IrOpcode::kTruncateFloat32ToUint32:
return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
case IrOpcode::kTryTruncateFloat32ToInt64:
return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
case IrOpcode::kTryTruncateFloat64ToInt64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
case IrOpcode::kTryTruncateFloat32ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
case IrOpcode::kTryTruncateFloat64ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kBitcastWord32ToWord64:
return MarkAsWord64(node), VisitBitcastWord32ToWord64(node);
case IrOpcode::kChangeInt32ToInt64:
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToWord32:
return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
case IrOpcode::kRoundFloat64ToInt32:
return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
case IrOpcode::kRoundInt64ToFloat32:
return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
case IrOpcode::kRoundInt32ToFloat32:
return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
case IrOpcode::kRoundInt64ToFloat64:
return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
case IrOpcode::kRoundUint32ToFloat32:
return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat32:
return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat64:
return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
case IrOpcode::kBitcastFloat64ToInt64:
return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
case IrOpcode::kBitcastInt32ToFloat32:
return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
case IrOpcode::kBitcastInt64ToFloat64:
return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
case IrOpcode::kFloat32Add:
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsFloat32(node), VisitFloat32Sub(node);
case IrOpcode::kFloat32Neg:
return MarkAsFloat32(node), VisitFloat32Neg(node);
case IrOpcode::kFloat32Mul:
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
return MarkAsFloat32(node), VisitFloat32Div(node);
case IrOpcode::kFloat32Abs:
return MarkAsFloat32(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt:
return MarkAsFloat32(node), VisitFloat32Sqrt(node);
case IrOpcode::kFloat32Equal:
return VisitFloat32Equal(node);
case IrOpcode::kFloat32LessThan:
return VisitFloat32LessThan(node);
case IrOpcode::kFloat32LessThanOrEqual:
return VisitFloat32LessThanOrEqual(node);
case IrOpcode::kFloat32Max:
return MarkAsFloat32(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Min:
return MarkAsFloat32(node), VisitFloat32Min(node);
case IrOpcode::kFloat32Select:
return MarkAsFloat32(node), VisitSelect(node);
case IrOpcode::kFloat64Add:
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64Neg:
return MarkAsFloat64(node), VisitFloat64Neg(node);
case IrOpcode::kFloat64Mul:
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
return MarkAsFloat64(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsFloat64(node), VisitFloat64Mod(node);
case IrOpcode::kFloat64Min:
return MarkAsFloat64(node), VisitFloat64Min(node);
case IrOpcode::kFloat64Max:
return MarkAsFloat64(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsFloat64(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Acos:
return MarkAsFloat64(node), VisitFloat64Acos(node);
case IrOpcode::kFloat64Acosh:
return MarkAsFloat64(node), VisitFloat64Acosh(node);
case IrOpcode::kFloat64Asin:
return MarkAsFloat64(node), VisitFloat64Asin(node);
case IrOpcode::kFloat64Asinh:
return MarkAsFloat64(node), VisitFloat64Asinh(node);
case IrOpcode::kFloat64Atan:
return MarkAsFloat64(node), VisitFloat64Atan(node);
case IrOpcode::kFloat64Atanh:
return MarkAsFloat64(node), VisitFloat64Atanh(node);
case IrOpcode::kFloat64Atan2:
return MarkAsFloat64(node), VisitFloat64Atan2(node);
case IrOpcode::kFloat64Cbrt:
return MarkAsFloat64(node), VisitFloat64Cbrt(node);
case IrOpcode::kFloat64Cos:
return MarkAsFloat64(node), VisitFloat64Cos(node);
case IrOpcode::kFloat64Cosh:
return MarkAsFloat64(node), VisitFloat64Cosh(node);
case IrOpcode::kFloat64Exp:
return MarkAsFloat64(node), VisitFloat64Exp(node);
case IrOpcode::kFloat64Expm1:
return MarkAsFloat64(node), VisitFloat64Expm1(node);
case IrOpcode::kFloat64Log:
return MarkAsFloat64(node), VisitFloat64Log(node);
case IrOpcode::kFloat64Log1p:
return MarkAsFloat64(node), VisitFloat64Log1p(node);
case IrOpcode::kFloat64Log10:
return MarkAsFloat64(node), VisitFloat64Log10(node);
case IrOpcode::kFloat64Log2:
return MarkAsFloat64(node), VisitFloat64Log2(node);
case IrOpcode::kFloat64Pow:
return MarkAsFloat64(node), VisitFloat64Pow(node);
case IrOpcode::kFloat64Sin:
return MarkAsFloat64(node), VisitFloat64Sin(node);
case IrOpcode::kFloat64Sinh:
return MarkAsFloat64(node), VisitFloat64Sinh(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsFloat64(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Tan:
return MarkAsFloat64(node), VisitFloat64Tan(node);
case IrOpcode::kFloat64Tanh:
return MarkAsFloat64(node), VisitFloat64Tanh(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
case IrOpcode::kFloat64Select:
return MarkAsFloat64(node), VisitSelect(node);
case IrOpcode::kFloat32RoundDown:
return MarkAsFloat32(node), VisitFloat32RoundDown(node);
case IrOpcode::kFloat64RoundDown:
return MarkAsFloat64(node), VisitFloat64RoundDown(node);
case IrOpcode::kFloat32RoundUp:
return MarkAsFloat32(node), VisitFloat32RoundUp(node);
case IrOpcode::kFloat64RoundUp:
return MarkAsFloat64(node), VisitFloat64RoundUp(node);
case IrOpcode::kFloat32RoundTruncate:
return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kFloat32RoundTiesEven:
return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
case IrOpcode::kFloat64RoundTiesEven:
return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
case IrOpcode::kFloat64ExtractLowWord32:
return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
case IrOpcode::kFloat64InsertLowWord32:
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kTaggedPoisonOnSpeculation:
return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
case IrOpcode::kWord32PoisonOnSpeculation:
return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
case IrOpcode::kWord64PoisonOnSpeculation:
return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
return VisitStackPointerGreaterThan(node);
case IrOpcode::kLoadStackCheckOffset:
return VisitLoadStackCheckOffset(node);
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
case IrOpcode::kUnalignedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitUnalignedLoad(node);
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
case IrOpcode::kInt32PairAdd:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairAdd(node);
case IrOpcode::kInt32PairSub:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairSub(node);
case IrOpcode::kInt32PairMul:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairMul(node);
case IrOpcode::kWord32PairShl:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairShl(node);
case IrOpcode::kWord32PairShr:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairShr(node);
case IrOpcode::kWord32PairSar:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
case IrOpcode::kWord32AtomicStore:
return VisitWord32AtomicStore(node);
case IrOpcode::kWord64AtomicStore:
return VisitWord64AtomicStore(node);
case IrOpcode::kWord32AtomicPairStore:
return VisitWord32AtomicPairStore(node);
case IrOpcode::kWord32AtomicPairLoad: {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32AtomicPairLoad(node);
}
#define ATOMIC_CASE(name, rep) \
case IrOpcode::k##rep##Atomic##name: { \
MachineType type = AtomicOpType(node->op()); \
MarkAsRepresentation(type.representation(), node); \
return Visit##rep##Atomic##name(node); \
}
ATOMIC_CASE(Add, Word32)
ATOMIC_CASE(Add, Word64)
ATOMIC_CASE(Sub, Word32)
ATOMIC_CASE(Sub, Word64)
ATOMIC_CASE(And, Word32)
ATOMIC_CASE(And, Word64)
ATOMIC_CASE(Or, Word32)
ATOMIC_CASE(Or, Word64)
ATOMIC_CASE(Xor, Word32)
ATOMIC_CASE(Xor, Word64)
ATOMIC_CASE(Exchange, Word32)
ATOMIC_CASE(Exchange, Word64)
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
#define ATOMIC_CASE(name) \
case IrOpcode::kWord32AtomicPair##name: { \
MarkAsWord32(node); \
MarkPairProjectionsAsWord32(node); \
return VisitWord32AtomicPair##name(node); \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
}
case IrOpcode::kSignExtendWord8ToInt32:
return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
case IrOpcode::kSignExtendWord16ToInt32:
return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
case IrOpcode::kSignExtendWord8ToInt64:
return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
case IrOpcode::kSignExtendWord16ToInt64:
return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
case IrOpcode::kSignExtendWord32ToInt64:
return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
case IrOpcode::kF64x2Splat:
return MarkAsSimd128(node), VisitF64x2Splat(node);
case IrOpcode::kF64x2ExtractLane:
return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
case IrOpcode::kF64x2ReplaceLane:
return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
case IrOpcode::kF64x2Abs:
return MarkAsSimd128(node), VisitF64x2Abs(node);
case IrOpcode::kF64x2Neg:
return MarkAsSimd128(node), VisitF64x2Neg(node);
case IrOpcode::kF64x2Sqrt:
return MarkAsSimd128(node), VisitF64x2Sqrt(node);
case IrOpcode::kF64x2Add:
return MarkAsSimd128(node), VisitF64x2Add(node);
case IrOpcode::kF64x2Sub:
return MarkAsSimd128(node), VisitF64x2Sub(node);
case IrOpcode::kF64x2Mul:
return MarkAsSimd128(node), VisitF64x2Mul(node);
case IrOpcode::kF64x2Div:
return MarkAsSimd128(node), VisitF64x2Div(node);
case IrOpcode::kF64x2Min:
return MarkAsSimd128(node), VisitF64x2Min(node);
case IrOpcode::kF64x2Max:
return MarkAsSimd128(node), VisitF64x2Max(node);
case IrOpcode::kF64x2Eq:
return MarkAsSimd128(node), VisitF64x2Eq(node);
case IrOpcode::kF64x2Ne:
return MarkAsSimd128(node), VisitF64x2Ne(node);
case IrOpcode::kF64x2Lt:
return MarkAsSimd128(node), VisitF64x2Lt(node);
case IrOpcode::kF64x2Le:
return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF64x2Qfma:
return MarkAsSimd128(node), VisitF64x2Qfma(node);
case IrOpcode::kF64x2Qfms:
return MarkAsSimd128(node), VisitF64x2Qfms(node);
case IrOpcode::kF64x2Pmin:
return MarkAsSimd128(node), VisitF64x2Pmin(node);
case IrOpcode::kF64x2Pmax:
return MarkAsSimd128(node), VisitF64x2Pmax(node);
case IrOpcode::kF64x2Ceil:
return MarkAsSimd128(node), VisitF64x2Ceil(node);
case IrOpcode::kF64x2Floor:
return MarkAsSimd128(node), VisitF64x2Floor(node);
case IrOpcode::kF64x2Trunc:
return MarkAsSimd128(node), VisitF64x2Trunc(node);
case IrOpcode::kF64x2NearestInt:
return MarkAsSimd128(node), VisitF64x2NearestInt(node);
case IrOpcode::kF64x2ConvertLowI32x4S:
return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4S(node);
case IrOpcode::kF64x2ConvertLowI32x4U:
return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4U(node);
case IrOpcode::kF64x2PromoteLowF32x4:
return MarkAsSimd128(node), VisitF64x2PromoteLowF32x4(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
case IrOpcode::kF32x4ReplaceLane:
return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
case IrOpcode::kF32x4SConvertI32x4:
return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
case IrOpcode::kF32x4UConvertI32x4:
return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
case IrOpcode::kF32x4Abs:
return MarkAsSimd128(node), VisitF32x4Abs(node);
case IrOpcode::kF32x4Neg:
return MarkAsSimd128(node), VisitF32x4Neg(node);
case IrOpcode::kF32x4Sqrt:
return MarkAsSimd128(node), VisitF32x4Sqrt(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
case IrOpcode::kF32x4RecipSqrtApprox:
return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
case IrOpcode::kF32x4Add:
return MarkAsSimd128(node), VisitF32x4Add(node);
case IrOpcode::kF32x4Sub:
return MarkAsSimd128(node), VisitF32x4Sub(node);
case IrOpcode::kF32x4Mul:
return MarkAsSimd128(node), VisitF32x4Mul(node);
case IrOpcode::kF32x4Div:
return MarkAsSimd128(node), VisitF32x4Div(node);
case IrOpcode::kF32x4Min:
return MarkAsSimd128(node), VisitF32x4Min(node);
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
case IrOpcode::kF32x4Eq:
return MarkAsSimd128(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
return MarkAsSimd128(node), VisitF32x4Ne(node);
case IrOpcode::kF32x4Lt:
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
case IrOpcode::kF32x4Qfma:
return MarkAsSimd128(node), VisitF32x4Qfma(node);
case IrOpcode::kF32x4Qfms:
return MarkAsSimd128(node), VisitF32x4Qfms(node);
case IrOpcode::kF32x4Pmin:
return MarkAsSimd128(node), VisitF32x4Pmin(node);
case IrOpcode::kF32x4Pmax:
return MarkAsSimd128(node), VisitF32x4Pmax(node);
case IrOpcode::kF32x4Ceil:
return MarkAsSimd128(node), VisitF32x4Ceil(node);
case IrOpcode::kF32x4Floor:
return MarkAsSimd128(node), VisitF32x4Floor(node);
case IrOpcode::kF32x4Trunc:
return MarkAsSimd128(node), VisitF32x4Trunc(node);
case IrOpcode::kF32x4NearestInt:
return MarkAsSimd128(node), VisitF32x4NearestInt(node);
case IrOpcode::kF32x4DemoteF64x2Zero:
return MarkAsSimd128(node), VisitF32x4DemoteF64x2Zero(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
return MarkAsSimd128(node), VisitI64x2SplatI32Pair(node);
case IrOpcode::kI64x2ExtractLane:
return MarkAsWord64(node), VisitI64x2ExtractLane(node);
case IrOpcode::kI64x2ReplaceLane:
return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
case IrOpcode::kI64x2ReplaceLaneI32Pair:
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
case IrOpcode::kI64x2Abs:
return MarkAsSimd128(node), VisitI64x2Abs(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
case IrOpcode::kI64x2SConvertI32x4Low:
return MarkAsSimd128(node), VisitI64x2SConvertI32x4Low(node);
case IrOpcode::kI64x2SConvertI32x4High:
return MarkAsSimd128(node), VisitI64x2SConvertI32x4High(node);
case IrOpcode::kI64x2UConvertI32x4Low:
return MarkAsSimd128(node), VisitI64x2UConvertI32x4Low(node);
case IrOpcode::kI64x2UConvertI32x4High:
return MarkAsSimd128(node), VisitI64x2UConvertI32x4High(node);
case IrOpcode::kI64x2BitMask:
return MarkAsWord32(node), VisitI64x2BitMask(node);
case IrOpcode::kI64x2Shl:
return MarkAsSimd128(node), VisitI64x2Shl(node);
case IrOpcode::kI64x2ShrS:
return MarkAsSimd128(node), VisitI64x2ShrS(node);
case IrOpcode::kI64x2Add:
return MarkAsSimd128(node), VisitI64x2Add(node);
case IrOpcode::kI64x2Sub:
return MarkAsSimd128(node), VisitI64x2Sub(node);
case IrOpcode::kI64x2Mul:
return MarkAsSimd128(node), VisitI64x2Mul(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
case IrOpcode::kI64x2Ne:
return MarkAsSimd128(node), VisitI64x2Ne(node);
case IrOpcode::kI64x2GtS:
return MarkAsSimd128(node), VisitI64x2GtS(node);
case IrOpcode::kI64x2GeS:
return MarkAsSimd128(node), VisitI64x2GeS(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
case IrOpcode::kI64x2ExtMulLowI32x4S:
return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4S(node);
case IrOpcode::kI64x2ExtMulHighI32x4S:
return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4S(node);
case IrOpcode::kI64x2ExtMulLowI32x4U:
return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node);
case IrOpcode::kI64x2ExtMulHighI32x4U:
return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
return MarkAsWord32(node), VisitI32x4ExtractLane(node);
case IrOpcode::kI32x4ReplaceLane:
return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
case IrOpcode::kI32x4SConvertF32x4:
return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
case IrOpcode::kI32x4SConvertI16x8Low:
return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
case IrOpcode::kI32x4SConvertI16x8High:
return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
case IrOpcode::kI32x4Neg:
return MarkAsSimd128(node), VisitI32x4Neg(node);
case IrOpcode::kI32x4Shl:
return MarkAsSimd128(node), VisitI32x4Shl(node);
case IrOpcode::kI32x4ShrS:
return MarkAsSimd128(node), VisitI32x4ShrS(node);
case IrOpcode::kI32x4Add:
return MarkAsSimd128(node), VisitI32x4Add(node);
case IrOpcode::kI32x4Sub:
return MarkAsSimd128(node), VisitI32x4Sub(node);
case IrOpcode::kI32x4Mul:
return MarkAsSimd128(node), VisitI32x4Mul(node);
case IrOpcode::kI32x4MinS:
return MarkAsSimd128(node), VisitI32x4MinS(node);
case IrOpcode::kI32x4MaxS:
return MarkAsSimd128(node), VisitI32x4MaxS(node);
case IrOpcode::kI32x4Eq:
return MarkAsSimd128(node), VisitI32x4Eq(node);
case IrOpcode::kI32x4Ne:
return MarkAsSimd128(node), VisitI32x4Ne(node);
case IrOpcode::kI32x4GtS:
return MarkAsSimd128(node), VisitI32x4GtS(node);
case IrOpcode::kI32x4GeS:
return MarkAsSimd128(node), VisitI32x4GeS(node);
case IrOpcode::kI32x4UConvertF32x4:
return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
case IrOpcode::kI32x4UConvertI16x8Low:
return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
case IrOpcode::kI32x4UConvertI16x8High:
return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
case IrOpcode::kI32x4ShrU:
return MarkAsSimd128(node), VisitI32x4ShrU(node);
case IrOpcode::kI32x4MinU:
return MarkAsSimd128(node), VisitI32x4MinU(node);
case IrOpcode::kI32x4MaxU:
return MarkAsSimd128(node), VisitI32x4MaxU(node);
case IrOpcode::kI32x4GtU:
return MarkAsSimd128(node), VisitI32x4GtU(node);
case IrOpcode::kI32x4GeU:
return MarkAsSimd128(node), VisitI32x4GeU(node);
case IrOpcode::kI32x4Abs:
return MarkAsSimd128(node), VisitI32x4Abs(node);
case IrOpcode::kI32x4BitMask:
return MarkAsWord32(node), VisitI32x4BitMask(node);
case IrOpcode::kI32x4DotI16x8S:
return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
case IrOpcode::kI32x4ExtMulLowI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8S(node);
case IrOpcode::kI32x4ExtMulHighI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8S(node);
case IrOpcode::kI32x4ExtMulLowI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node);
case IrOpcode::kI32x4ExtMulHighI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
case IrOpcode::kI32x4TruncSatF64x2SZero:
return MarkAsSimd128(node), VisitI32x4TruncSatF64x2SZero(node);
case IrOpcode::kI32x4TruncSatF64x2UZero:
return MarkAsSimd128(node), VisitI32x4TruncSatF64x2UZero(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
return MarkAsWord32(node), VisitI16x8ExtractLaneU(node);
case IrOpcode::kI16x8ExtractLaneS:
return MarkAsWord32(node), VisitI16x8ExtractLaneS(node);
case IrOpcode::kI16x8ReplaceLane:
return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
case IrOpcode::kI16x8SConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
case IrOpcode::kI16x8SConvertI8x16High:
return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
case IrOpcode::kI16x8Neg:
return MarkAsSimd128(node), VisitI16x8Neg(node);
case IrOpcode::kI16x8Shl:
return MarkAsSimd128(node), VisitI16x8Shl(node);
case IrOpcode::kI16x8ShrS:
return MarkAsSimd128(node), VisitI16x8ShrS(node);
case IrOpcode::kI16x8SConvertI32x4:
return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
case IrOpcode::kI16x8Add:
return MarkAsSimd128(node), VisitI16x8Add(node);
case IrOpcode::kI16x8AddSatS:
return MarkAsSimd128(node), VisitI16x8AddSatS(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
case IrOpcode::kI16x8SubSatS:
return MarkAsSimd128(node), VisitI16x8SubSatS(node);
case IrOpcode::kI16x8Mul:
return MarkAsSimd128(node), VisitI16x8Mul(node);
case IrOpcode::kI16x8MinS:
return MarkAsSimd128(node), VisitI16x8MinS(node);
case IrOpcode::kI16x8MaxS:
return MarkAsSimd128(node), VisitI16x8MaxS(node);
case IrOpcode::kI16x8Eq:
return MarkAsSimd128(node), VisitI16x8Eq(node);
case IrOpcode::kI16x8Ne:
return MarkAsSimd128(node), VisitI16x8Ne(node);
case IrOpcode::kI16x8GtS:
return MarkAsSimd128(node), VisitI16x8GtS(node);
case IrOpcode::kI16x8GeS:
return MarkAsSimd128(node), VisitI16x8GeS(node);
case IrOpcode::kI16x8UConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
case IrOpcode::kI16x8UConvertI8x16High:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
case IrOpcode::kI16x8ShrU:
return MarkAsSimd128(node), VisitI16x8ShrU(node);
case IrOpcode::kI16x8UConvertI32x4:
return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
case IrOpcode::kI16x8AddSatU:
return MarkAsSimd128(node), VisitI16x8AddSatU(node);
case IrOpcode::kI16x8SubSatU:
return MarkAsSimd128(node), VisitI16x8SubSatU(node);
case IrOpcode::kI16x8MinU:
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
return MarkAsSimd128(node), VisitI16x8MaxU(node);
case IrOpcode::kI16x8GtU:
return MarkAsSimd128(node), VisitI16x8GtU(node);
case IrOpcode::kI16x8GeU:
return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI16x8RoundingAverageU:
return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node);
case IrOpcode::kI16x8Q15MulRSatS:
return MarkAsSimd128(node), VisitI16x8Q15MulRSatS(node);
case IrOpcode::kI16x8Abs:
return MarkAsSimd128(node), VisitI16x8Abs(node);
case IrOpcode::kI16x8BitMask:
return MarkAsWord32(node), VisitI16x8BitMask(node);
case IrOpcode::kI16x8ExtMulLowI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16S(node);
case IrOpcode::kI16x8ExtMulHighI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16S(node);
case IrOpcode::kI16x8ExtMulLowI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node);
case IrOpcode::kI16x8ExtMulHighI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16U(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLaneU:
return MarkAsWord32(node), VisitI8x16ExtractLaneU(node);
case IrOpcode::kI8x16ExtractLaneS:
return MarkAsWord32(node), VisitI8x16ExtractLaneS(node);
case IrOpcode::kI8x16ReplaceLane:
return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
case IrOpcode::kI8x16Neg:
return MarkAsSimd128(node), VisitI8x16Neg(node);
case IrOpcode::kI8x16Shl:
return MarkAsSimd128(node), VisitI8x16Shl(node);
case IrOpcode::kI8x16ShrS:
return MarkAsSimd128(node), VisitI8x16ShrS(node);
case IrOpcode::kI8x16SConvertI16x8:
return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
case IrOpcode::kI8x16Add:
return MarkAsSimd128(node), VisitI8x16Add(node);
case IrOpcode::kI8x16AddSatS:
return MarkAsSimd128(node), VisitI8x16AddSatS(node);
case IrOpcode::kI8x16Sub:
return MarkAsSimd128(node), VisitI8x16Sub(node);
case IrOpcode::kI8x16SubSatS:
return MarkAsSimd128(node), VisitI8x16SubSatS(node);
case IrOpcode::kI8x16MinS:
return MarkAsSimd128(node), VisitI8x16MinS(node);
case IrOpcode::kI8x16MaxS:
return MarkAsSimd128(node), VisitI8x16MaxS(node);
case IrOpcode::kI8x16Eq:
return MarkAsSimd128(node), VisitI8x16Eq(node);
case IrOpcode::kI8x16Ne:
return MarkAsSimd128(node), VisitI8x16Ne(node);
case IrOpcode::kI8x16GtS:
return MarkAsSimd128(node), VisitI8x16GtS(node);
case IrOpcode::kI8x16GeS:
return MarkAsSimd128(node), VisitI8x16GeS(node);
case IrOpcode::kI8x16ShrU:
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
case IrOpcode::kI8x16AddSatU:
return MarkAsSimd128(node), VisitI8x16AddSatU(node);
case IrOpcode::kI8x16SubSatU:
return MarkAsSimd128(node), VisitI8x16SubSatU(node);
case IrOpcode::kI8x16MinU:
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
return MarkAsSimd128(node), VisitI8x16MaxU(node);
case IrOpcode::kI8x16GtU:
return MarkAsSimd128(node), VisitI8x16GtU(node);
case IrOpcode::kI8x16GeU:
return MarkAsSimd128(node), VisitI8x16GeU(node);
case IrOpcode::kI8x16RoundingAverageU:
return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node);
case IrOpcode::kI8x16Popcnt:
return MarkAsSimd128(node), VisitI8x16Popcnt(node);
case IrOpcode::kI8x16Abs:
return MarkAsSimd128(node), VisitI8x16Abs(node);
case IrOpcode::kI8x16BitMask:
return MarkAsWord32(node), VisitI8x16BitMask(node);
case IrOpcode::kS128Const:
return MarkAsSimd128(node), VisitS128Const(node);
case IrOpcode::kS128Zero:
return MarkAsSimd128(node), VisitS128Zero(node);
case IrOpcode::kS128And:
return MarkAsSimd128(node), VisitS128And(node);
case IrOpcode::kS128Or:
return MarkAsSimd128(node), VisitS128Or(node);
case IrOpcode::kS128Xor:
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
case IrOpcode::kS128Select:
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS128AndNot:
return MarkAsSimd128(node), VisitS128AndNot(node);
case IrOpcode::kI8x16Swizzle:
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
case IrOpcode::kV128AnyTrue:
return MarkAsWord32(node), VisitV128AnyTrue(node);
case IrOpcode::kI64x2AllTrue:
return MarkAsWord32(node), VisitI64x2AllTrue(node);
case IrOpcode::kI32x4AllTrue:
return MarkAsWord32(node), VisitI32x4AllTrue(node);
case IrOpcode::kI16x8AllTrue:
return MarkAsWord32(node), VisitI16x8AllTrue(node);
case IrOpcode::kI8x16AllTrue:
return MarkAsWord32(node), VisitI8x16AllTrue(node);
default:
FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
node->op()->mnemonic(), node->id());
break;
}
}
void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
OperandGenerator g(this);
Node* input_node = NodeProperties::GetValueInput(node, 0);
InstructionOperand input = g.UseRegister(input_node);
InstructionOperand output = g.DefineSameAsFirst(node);
Emit(kArchWordPoisonOnSpeculation, output, input);
} else {
EmitIdentity(node);
}
}
void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
EmitWordPoisonOnSpeculation(node);
}
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
VisitStackPointerGreaterThan(node, &cont);
}
void InstructionSelector::VisitLoadStackCheckOffset(Node* node) {
OperandGenerator g(this);
Emit(kArchStackCheckOffset, g.DefineAsRegister(node));
}
void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
Emit(kArchFramePointer, g.DefineAsRegister(node));
}
void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
OperandGenerator g(this);
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
void InstructionSelector::VisitFloat64Acos(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
}
void InstructionSelector::VisitFloat64Acosh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
}
void InstructionSelector::VisitFloat64Asin(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
}
void InstructionSelector::VisitFloat64Asinh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
}
void InstructionSelector::VisitFloat64Atan(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
}
void InstructionSelector::VisitFloat64Atanh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
}
void InstructionSelector::VisitFloat64Atan2(Node* node) {
VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
}
void InstructionSelector::VisitFloat64Cbrt(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
}
void InstructionSelector::VisitFloat64Cos(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
}
void InstructionSelector::VisitFloat64Cosh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
}
void InstructionSelector::VisitFloat64Exp(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
}
void InstructionSelector::VisitFloat64Expm1(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
}
void InstructionSelector::VisitFloat64Log(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
}
void InstructionSelector::VisitFloat64Log1p(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
}
void InstructionSelector::VisitFloat64Log2(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
}
void InstructionSelector::VisitFloat64Log10(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
}
void InstructionSelector::VisitFloat64Pow(Node* node) {
VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
}
void InstructionSelector::VisitFloat64Sin(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
}
void InstructionSelector::VisitFloat64Sinh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
}
void InstructionSelector::VisitFloat64Tan(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
}
void InstructionSelector::VisitFloat64Tanh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
}
void InstructionSelector::EmitTableSwitch(
const SwitchInfo& sw, InstructionOperand const& index_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.value_range();
DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = index_operand;
InstructionOperand default_operand = g.Label(sw.default_branch());
std::fill(&inputs[1], &inputs[input_count], default_operand);
for (const CaseInfo& c : sw.CasesUnsorted()) {
size_t value = c.value - sw.min_value();
DCHECK_LE(0u, value);
DCHECK_LT(value + 2, input_count);
inputs[value + 2] = g.Label(c.branch);
}
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
void InstructionSelector::EmitBinarySearchSwitch(
const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.case_count() * 2;
DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
inputs[1] = g.Label(sw.default_branch());
std::vector<CaseInfo> cases = sw.CasesSortedByValue();
for (size_t index = 0; index < cases.size(); ++index) {
const CaseInfo& c = cases[index];
inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
inputs[index * 2 + 2 + 1] = g.Label(c.branch);
}
Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
EmitIdentity(node);
}
void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
OperandGenerator g(this);
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
// 32 bit targets do not implement the following instructions.
#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Rol(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
UNIMPLEMENTED();
}
#endif // V8_TARGET_ARCH_32_BIT
// 64 bit targets do not implement the following instructions.
#if V8_TARGET_ARCH_64_BIT
void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
// !V8_TARGET_ARCH_RISCV64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
int index = ParameterIndexOf(node->op());
InstructionOperand op =
linkage()->ParameterHasSecondaryLocation(index)
? g.DefineAsDualLocation(
node, linkage()->GetParameterLocation(index),
linkage()->GetParameterSecondaryLocation(index))
: g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
Emit(kArchNop, op);
}
namespace {
LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr());
}
constexpr InstructionCode EncodeCallDescriptorFlags(
InstructionCode opcode, CallDescriptor::Flags flags) {
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
CONSTEXPR_DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
} // namespace
void InstructionSelector::VisitIfException(Node* node) {
OperandGenerator g(this);
DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
}
void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
int index = OsrValueIndexOf(node->op());
Emit(kArchNop,
g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
}
void InstructionSelector::VisitPhi(Node* node) {
const int input_count = node->op()->ValueInputCount();
DCHECK_EQ(input_count, current_block_->PredecessorCount());
PhiInstruction* phi = instruction_zone()->New<PhiInstruction>(
instruction_zone(), GetVirtualRegister(node),
static_cast<size_t>(input_count));
sequence()
->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
->AddPhi(phi);
for (int i = 0; i < input_count; ++i) {
Node* const input = node->InputAt(i);
MarkAsUsed(input);
phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
}
}
void InstructionSelector::VisitProjection(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt32MulWithOverflow:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kTryTruncateFloat32ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
case IrOpcode::kTryTruncateFloat64ToUint64:
case IrOpcode::kInt32PairAdd:
case IrOpcode::kInt32PairSub:
case IrOpcode::kInt32PairMul:
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
case IrOpcode::kInt32AbsWithOverflow:
case IrOpcode::kInt64AbsWithOverflow:
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
MarkAsUsed(value);
}
break;
default:
break;
}
}
void InstructionSelector::VisitConstant(Node* node) {
// We must emit a NOP here because every live range needs a defining
// instruction in the register allocator.
OperandGenerator g(this);
Emit(kArchNop, g.DefineAsConstant(node));
}
void InstructionSelector::UpdateMaxPushedArgumentCount(size_t count) {
*max_pushed_argument_count_ = std::max(count, *max_pushed_argument_count_);
}
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
auto call_descriptor = CallDescriptorOf(node->op());
if (call_descriptor->NeedsCallerSavedRegisters()) {
SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters()
? kSaveFPRegs
: kDontSaveFPRegs;
Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(mode)),
g.NoOutput());
}
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (call_descriptor->NeedsFrameState()) {
frame_state_descriptor = GetFrameStateDescriptor(FrameState{
node->InputAt(static_cast<int>(call_descriptor->InputCount()))});
}
CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
CallDescriptor::Flags flags = call_descriptor->flags();
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on some architectures it's probably better to use
// the code object in a register if there are multiple uses of it.
// Improve constant pool and the heuristics in the register allocator
// for where to emit constants.
CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
EmitPrepareArguments(&buffer.pushed_nodes, call_descriptor, node);
UpdateMaxPushedArgumentCount(buffer.pushed_nodes.size());
// Pass label of exception handler block.
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
int misc_field = static_cast<int>(call_descriptor->ParameterCount());
#if ABI_USES_FUNCTION_DESCRIPTORS
// Highest misc_field bit is used on AIX to indicate if a CFunction call
// has function descriptor or not.
STATIC_ASSERT(MiscField::kSize == kHasFunctionDescriptorBitShift + 1);
if (!call_descriptor->NoFunctionDescriptor()) {
misc_field |= 1 << kHasFunctionDescriptorBitShift;
}
#endif
opcode = kArchCallCFunction | MiscField::encode(misc_field);
break;
}
case CallDescriptor::kCallCodeObject:
opcode = EncodeCallDescriptorFlags(kArchCallCodeObject, flags);
break;
case CallDescriptor::kCallJSFunction:
opcode = EncodeCallDescriptorFlags(kArchCallJSFunction, flags);
break;
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
opcode = EncodeCallDescriptorFlags(kArchCallWasmFunction, flags);
break;
#endif // V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallBuiltinPointer:
opcode = EncodeCallDescriptorFlags(kArchCallBuiltinPointer, flags);
break;
}
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
Instruction* call_instr =
Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
&buffer.instruction_args.front());
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
if (call_descriptor->NeedsCallerSavedRegisters()) {
SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters()
? kSaveFPRegs
: kDontSaveFPRegs;
Emit(
kArchRestoreCallerRegisters | MiscField::encode(static_cast<int>(mode)),
g.NoOutput());
}
}
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
auto call_descriptor = CallDescriptorOf(node->op());
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
const CallDescriptor* callee = CallDescriptorOf(node->op());
DCHECK(caller->CanTailCall(callee));
const int stack_param_delta = callee->GetStackParameterDelta(caller);
CallBuffer buffer(zone(), call_descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
CallBufferFlags flags(kCallCodeImmediate | kCallTail);
if (IsTailCallAddressImmediate()) {
flags |= kCallAddressImmediate;
}
if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
flags |= kCallFixedTargetRegister;
}
InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
UpdateMaxPushedArgumentCount(stack_param_delta);
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObject;
break;
case CallDescriptor::kCallAddress:
DCHECK(!caller->IsJSFunctionCall());
opcode = kArchTailCallAddress;
break;
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction:
DCHECK(!caller->IsJSFunctionCall());
opcode = kArchTailCallWasm;
break;
#endif // V8_ENABLE_WEBASSEMBLY
default:
UNREACHABLE();
}
opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
Emit(kArchPrepareTailCall, g.NoOutput());
// Add an immediate operand that represents the offset to the first slot that
// is unused with respect to the stack pointer that has been updated for the
// tail call instruction. Backends that pad arguments can write the padding
// value at this offset from the stack.
const int optional_padding_offset =
callee->GetOffsetToFirstUnusedStackSlot() - 1;
buffer.instruction_args.push_back(g.TempImmediate(optional_padding_offset));
const int first_unused_slot_offset =
kReturnAddressStackSlotCount + stack_param_delta;
buffer.instruction_args.push_back(g.TempImmediate(first_unused_slot_offset));
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
&buffer.instruction_args.front(), temps.size(),
temps.empty() ? nullptr : &temps.front());
}
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
Emit(kArchJmp, g.NoOutput(), g.Label(target));
}
void InstructionSelector::VisitReturn(Node* ret) {
OperandGenerator g(this);
const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
? 1
: ret->op()->ValueInputCount();
DCHECK_GE(input_count, 1);
auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
Node* pop_count = ret->InputAt(0);
value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
pop_count->opcode() == IrOpcode::kInt64Constant)
? g.UseImmediate(pop_count)
: g.UseRegister(pop_count);
for (int i = 1; i < input_count; ++i) {
value_locations[i] =
g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
}
Emit(kArchRet, 0, nullptr, input_count, value_locations);
}
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
FlagsContinuation cont =
FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
} else {
FlagsContinuation cont =
FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
}
void InstructionSelector::VisitSelect(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSelect(kNotEqual, node,
node->InputAt(1), node->InputAt(2));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
OperandGenerator g(this);
DynamicCheckMapsWithDeoptUnlessNode n(node);
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
DynamicCheckMapsDescriptor descriptor;
// Note: We use Operator::kNoDeopt here because this builtin does not lazy
// deoptimize (which is the meaning of Operator::kNoDeopt), even though it can
// eagerly deoptimize.
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoThrow);
InstructionOperand dynamic_check_args[] = {
g.UseLocation(n.map(), call_descriptor->GetInputLocation(1)),
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())};
if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(),
dynamic_check_args, 3);
VisitWordCompareZero(node, n.condition(), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(),
dynamic_check_args, 3);
VisitWordCompareZero(node, n.condition(), &cont);
}
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
FlagsContinuation cont =
FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
FlagsContinuation cont =
FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::EmitIdentity(Node* node) {
MarkAsUsed(node->InputAt(0));
SetRename(node, node->InputAt(0));
}
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
FrameState frame_state) {
InstructionOperandVector args(instruction_zone());
AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
void InstructionSelector::VisitThrow(Node* node) {
OperandGenerator g(this);
Emit(kArchThrowTerminator, g.NoOutput());
}
void InstructionSelector::VisitDebugBreak(Node* node) {
OperandGenerator g(this);
Emit(kArchDebugBreak, g.NoOutput());
}
void InstructionSelector::VisitUnreachable(Node* node) {
OperandGenerator g(this);
Emit(kArchDebugBreak, g.NoOutput());
}
void InstructionSelector::VisitStaticAssert(Node* node) {
Node* asserted = node->InputAt(0);
UnparkedScopeIfNeeded scope(broker_);
AllowHandleDereference allow_handle_dereference;
asserted->Print(4);
FATAL(
"Expected Turbofan static assert to hold, but got non-true input:\n %s",
StaticAssertSourceOf(node->op()));
}
void InstructionSelector::VisitDeadValue(Node* node) {
OperandGenerator g(this);
MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
Emit(kArchDebugBreak, g.DefineAsConstant(node));
}
void InstructionSelector::VisitComment(Node* node) {
OperandGenerator g(this);
InstructionOperand operand(g.UseImmediate(node));
Emit(kArchComment, 0, nullptr, 1, &operand);
}
void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
#if V8_TARGET_ARCH_64_BIT
VisitInt64Add(node);
#else // V8_TARGET_ARCH_64_BIT
VisitInt32Add(node);
#endif // V8_TARGET_ARCH_64_BIT
}
void InstructionSelector::VisitRetain(Node* node) {
OperandGenerator g(this);
Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
}
bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
// TODO(jarin) Improve the heuristic here.
if (node->opcode() == IrOpcode::kFloat64Add ||
node->opcode() == IrOpcode::kFloat64Sub ||
node->opcode() == IrOpcode::kFloat64Mul) {
return false;
}
return true;
}
#if V8_TARGET_ARCH_64_BIT
bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node,
int recursion_depth) {
// To compute whether a Node sets its upper 32 bits to zero, there are three
// cases.
// 1. Phi node, with a computed result already available in phi_states_:
// Read the value from phi_states_.
// 2. Phi node, with no result available in phi_states_ yet:
// Recursively check its inputs, and store the result in phi_states_.
// 3. Anything else:
// Call the architecture-specific ZeroExtendsWord32ToWord64NoPhis.
// Limit recursion depth to avoid the possibility of stack overflow on very
// large functions.
const int kMaxRecursionDepth = 100;
if (node->opcode() == IrOpcode::kPhi) {
Upper32BitsState current = phi_states_[node->id()];
if (current != Upper32BitsState::kNotYetChecked) {
return current == Upper32BitsState::kUpperBitsGuaranteedZero;
}
// If further recursion is prevented, we can't make any assumptions about
// the output of this phi node.
if (recursion_depth >= kMaxRecursionDepth) {
return false;
}
// Mark the current node so that we skip it if we recursively visit it
// again. Or, said differently, we compute a largest fixed-point so we can
// be optimistic when we hit cycles.
phi_states_[node->id()] = Upper32BitsState::kUpperBitsGuaranteedZero;
int input_count = node->op()->ValueInputCount();
for (int i = 0; i < input_count; ++i) {
Node* input = NodeProperties::GetValueInput(node, i);
if (!ZeroExtendsWord32ToWord64(input, recursion_depth + 1)) {
phi_states_[node->id()] = Upper32BitsState::kNoGuarantee;
return false;
}
}
return true;
}
return ZeroExtendsWord32ToWord64NoPhis(node);
}
#endif // V8_TARGET_ARCH_64_BIT
namespace {
FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
FrameState state) {
DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
DCHECK_EQ(FrameState::kFrameStateInputCount, state->InputCount());
const FrameStateInfo& state_info = FrameStateInfoOf(state->op());
int parameters = state_info.parameter_count();
int locals = state_info.local_count();
int stack = state_info.type() == FrameStateType::kUnoptimizedFunction ? 1 : 0;
FrameStateDescriptor* outer_state = nullptr;
if (state.has_outer_frame_state()) {
outer_state =
GetFrameStateDescriptorInternal(zone, state.outer_frame_state());
}
#if V8_ENABLE_WEBASSEMBLY
if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) {
auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>(
state_info.function_info());
return zone->New<JSToWasmFrameStateDescriptor>(
zone, state_info.type(), state_info.bailout_id(),
state_info.state_combine(), parameters, locals, stack,
state_info.shared_info(), outer_state, function_info->signature());
}
#endif // V8_ENABLE_WEBASSEMBLY
return zone->New<FrameStateDescriptor>(
zone, state_info.type(), state_info.bailout_id(),
state_info.state_combine(), parameters, locals, stack,
state_info.shared_info(), outer_state);
}
} // namespace
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
FrameState state) {
auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state);
*max_unoptimized_frame_height_ =
std::max(*max_unoptimized_frame_height_,
desc->total_conservative_frame_size_in_bytes());
return desc;
}
#if V8_ENABLE_WEBASSEMBLY
void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
bool* is_swizzle) {
// Get raw shuffle indices.
base::Memcpy(shuffle, S128ImmediateParameterOf(node->op()).data(),
kSimd128Size);
bool needs_swap;
bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
GetVirtualRegister(node->InputAt(1));
wasm::SimdShuffle::CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap,
is_swizzle);
if (needs_swap) {
SwapShuffleInputs(node);
}
// Duplicate the first input; for some shuffles on some architectures, it's
// easiest to implement a swizzle as a shuffle so it might be used.
if (*is_swizzle) {
node->ReplaceInput(1, node->InputAt(0));
}
}
// static
void InstructionSelector::SwapShuffleInputs(Node* node) {
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
node->ReplaceInput(0, input1);
node->ReplaceInput(1, input0);
}
#endif // V8_ENABLE_WEBASSEMBLY
// static
bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return safety_check != IsSafetyCheck::kNoSafetyCheck;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
}
UNREACHABLE();
}
} // namespace compiler
} // namespace internal
} // namespace v8
|
// Copyright (c) 2012-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "consensus/merkle.h"
#include "merkleblock.h"
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
#include "arith_uint256.h"
#include "version.h"
#include "random.h"
#include "test/test_lovebit.h"
#include <vector>
#include <boost/assign/list_of.hpp>
#include <boost/test/unit_test.hpp>
using namespace std;
class CPartialMerkleTreeTester : public CPartialMerkleTree
{
public:
// flip one bit in one of the hashes - this should break the authentication
void Damage() {
unsigned int n = insecure_rand() % vHash.size();
int bit = insecure_rand() % 256;
*(vHash[n].begin() + (bit>>3)) ^= 1<<(bit&7);
}
};
BOOST_FIXTURE_TEST_SUITE(pmt_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(pmt_test1)
{
seed_insecure_rand(false);
static const unsigned int nTxCounts[] = {1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095};
for (int n = 0; n < 12; n++) {
unsigned int nTx = nTxCounts[n];
// build a block with some dummy transactions
CBlock block;
for (unsigned int j=0; j<nTx; j++) {
CMutableTransaction tx;
tx.nLockTime = j; // actual transaction data doesn't matter; just make the nLockTime's unique
block.vtx.push_back(CTransaction(tx));
}
// calculate actual merkle root and height
uint256 merkleRoot1 = BlockMerkleRoot(block);
std::vector<uint256> vTxid(nTx, uint256());
for (unsigned int j=0; j<nTx; j++)
vTxid[j] = block.vtx[j].GetHash();
int nHeight = 1, nTx_ = nTx;
while (nTx_ > 1) {
nTx_ = (nTx_+1)/2;
nHeight++;
}
// check with random subsets with inclusion chances 1, 1/2, 1/4, ..., 1/128
for (int att = 1; att < 15; att++) {
// build random subset of txid's
std::vector<bool> vMatch(nTx, false);
std::vector<uint256> vMatchTxid1;
for (unsigned int j=0; j<nTx; j++) {
bool fInclude = (insecure_rand() & ((1 << (att/2)) - 1)) == 0;
vMatch[j] = fInclude;
if (fInclude)
vMatchTxid1.push_back(vTxid[j]);
}
// build the partial merkle tree
CPartialMerkleTree pmt1(vTxid, vMatch);
// serialize
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << pmt1;
// verify CPartialMerkleTree's size guarantees
unsigned int n = std::min<unsigned int>(nTx, 1 + vMatchTxid1.size()*nHeight);
BOOST_CHECK(ss.size() <= 10 + (258*n+7)/8);
// deserialize into a tester copy
CPartialMerkleTreeTester pmt2;
ss >> pmt2;
// extract merkle root and matched txids from copy
std::vector<uint256> vMatchTxid2;
uint256 merkleRoot2 = pmt2.ExtractMatches(vMatchTxid2);
// check that it has the same merkle root as the original, and a valid one
BOOST_CHECK(merkleRoot1 == merkleRoot2);
BOOST_CHECK(!merkleRoot2.IsNull());
// check that it contains the matched transactions (in the same order!)
BOOST_CHECK(vMatchTxid1 == vMatchTxid2);
// check that random bit flips break the authentication
for (int j=0; j<4; j++) {
CPartialMerkleTreeTester pmt3(pmt2);
pmt3.Damage();
std::vector<uint256> vMatchTxid3;
uint256 merkleRoot3 = pmt3.ExtractMatches(vMatchTxid3);
BOOST_CHECK(merkleRoot3 != merkleRoot1);
}
}
}
}
BOOST_AUTO_TEST_CASE(pmt_malleability)
{
std::vector<uint256> vTxid = boost::assign::list_of
(ArithToUint256(1))(ArithToUint256(2))
(ArithToUint256(3))(ArithToUint256(4))
(ArithToUint256(5))(ArithToUint256(6))
(ArithToUint256(7))(ArithToUint256(8))
(ArithToUint256(9))(ArithToUint256(10))
(ArithToUint256(9))(ArithToUint256(10));
std::vector<bool> vMatch = boost::assign::list_of(false)(false)(false)(false)(false)(false)(false)(false)(false)(true)(true)(false);
CPartialMerkleTree tree(vTxid, vMatch);
std::vector<uint256> vTxid2;
BOOST_CHECK(tree.ExtractMatches(vTxid).IsNull());
}
BOOST_AUTO_TEST_SUITE_END()
|
//===- DbiStreamBuilder.cpp - PDB Dbi Stream Creation -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h"
#include "llvm/DebugInfo/CodeView/StreamWriter.h"
#include "llvm/DebugInfo/PDB/Raw/DbiStream.h"
#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Raw/RawError.h"
using namespace llvm;
using namespace llvm::codeview;
using namespace llvm::pdb;
DbiStreamBuilder::DbiStreamBuilder(PDBFile &File)
: File(File), Age(1), BuildNumber(0), PdbDllVersion(0), PdbDllRbld(0),
Flags(0), MachineType(PDB_Machine::x86) {}
void DbiStreamBuilder::setVersionHeader(PdbRaw_DbiVer V) { VerHeader = V; }
void DbiStreamBuilder::setAge(uint32_t A) { Age = A; }
void DbiStreamBuilder::setBuildNumber(uint16_t B) { BuildNumber = B; }
void DbiStreamBuilder::setPdbDllVersion(uint16_t V) { PdbDllVersion = V; }
void DbiStreamBuilder::setPdbDllRbld(uint16_t R) { PdbDllRbld = R; }
void DbiStreamBuilder::setFlags(uint16_t F) { Flags = F; }
void DbiStreamBuilder::setMachineType(PDB_Machine M) { MachineType = M; }
Expected<std::unique_ptr<DbiStream>> DbiStreamBuilder::build() {
if (!VerHeader.hasValue())
return make_error<RawError>(raw_error_code::unspecified,
"Missing DBI Stream Version");
auto DbiS = MappedBlockStream::createIndexedStream(StreamDBI, File);
if (!DbiS)
return DbiS.takeError();
auto DS = std::move(*DbiS);
DbiStream::HeaderInfo *H =
static_cast<DbiStream::HeaderInfo *>(DS->getAllocator().Allocate(
sizeof(DbiStream::HeaderInfo),
llvm::AlignOf<DbiStream::HeaderInfo>::Alignment));
H->VersionHeader = *VerHeader;
H->VersionSignature = -1;
H->Age = Age;
H->BuildNumber = BuildNumber;
H->Flags = Flags;
H->PdbDllRbld = PdbDllRbld;
H->PdbDllVersion = PdbDllVersion;
H->MachineType = static_cast<uint16_t>(MachineType);
H->ECSubstreamSize = 0;
H->FileInfoSize = 0;
H->ModiSubstreamSize = 0;
H->OptionalDbgHdrSize = 0;
H->SecContrSubstreamSize = 0;
H->SectionMapSize = 0;
H->TypeServerSize = 0;
H->SymRecordStreamIndex = DbiStream::InvalidStreamIndex;
H->PublicSymbolStreamIndex = DbiStream::InvalidStreamIndex;
H->MFCTypeServerIndex = DbiStream::InvalidStreamIndex;
H->GlobalSymbolStreamIndex = DbiStream::InvalidStreamIndex;
auto Dbi = llvm::make_unique<DbiStream>(File, std::move(DS));
Dbi->Header = H;
return std::move(Dbi);
}
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/base/rds.h"
#include <atomic>
#include <cassert>
#include <cstdio>
#include <map>
#include <mutex>
#include <vector>
#include <folly/Bits.h>
#include <folly/Hash.h>
#include <folly/portability/SysMman.h>
#include <folly/sorted_vector_types.h>
#include <folly/String.h>
#include <tbb/concurrent_hash_map.h>
#include "hphp/util/logger.h"
#include "hphp/util/maphuge.h"
#include "hphp/util/numa.h"
#include "hphp/util/rds-local.h"
#include "hphp/util/smalllocks.h"
#include "hphp/util/type-scan.h"
#include "hphp/runtime/base/rds-header.h"
#include "hphp/runtime/base/rds-symbol.h"
#include "hphp/runtime/vm/debug/debug.h"
#include "hphp/runtime/vm/jit/mcgen.h"
#include "hphp/runtime/vm/jit/mcgen-translate.h"
#include "hphp/runtime/vm/jit/vm-protect.h"
#include "hphp/runtime/vm/treadmill.h"
#include "hphp/runtime/vm/vm-regs.h"
namespace HPHP { namespace rds {
//////////////////////////////////////////////////////////////////////
namespace {
//////////////////////////////////////////////////////////////////////
using Guard = std::lock_guard<std::mutex>;
/*
* This mutex protects actually allocating from RDS (the above
* statics). It is ordered *after* the locks in s_linkTable.
*/
std::mutex s_allocMutex;
//////////////////////////////////////////////////////////////////////
struct HashCompare {
bool equal(const Symbol& k1, const Symbol& k2) const {
return symbol_eq(k1, k2);
}
size_t hash(const Symbol& k) const {
return symbol_hash(k);
}
};
struct LinkEntry {
Handle handle;
uint32_t size;
};
using LinkTable = tbb::concurrent_hash_map<
Symbol,
LinkEntry,
HashCompare
>;
LinkTable s_linkTable;
struct RevLinkEntry {
uint32_t size;
Symbol sym;
};
using RevLinkTable = std::map<Handle,RevLinkEntry>;
RevLinkTable s_handleTable;
__thread std::atomic<bool> s_hasFullInit{false};
struct StoreRevLink : boost::static_visitor<bool> {
bool operator()(Profile) const { return false; }
template<typename T>
bool operator()(T) const { return true; }
};
//////////////////////////////////////////////////////////////////////
/*
* Space wasted by alignment is tracked in these maps. We don't bother with
* free lists for local RDS because we aren't sensitive to its layout or
* compactness.
*/
using FreeLists = folly::sorted_vector_map<unsigned,
std::deque<rds::Handle>>;
FreeLists s_normal_free_lists;
FreeLists s_persistent_free_lists;
#if RDS_FIXED_PERSISTENT_BASE
// Allocate 2M from low memory each time.
constexpr size_t kPersistentChunkSize = 1u << 20;
#endif
}
//////////////////////////////////////////////////////////////////////
namespace detail {
// Current allocation frontier for the non-persistent region.
size_t s_normal_frontier = sizeof(Header);
// Frontier for the "local" part of the persistent region (data not
// shared between threads, but not zero'd)---downward-growing.
size_t s_local_frontier = 0;
size_t s_local_base = 0;
#if !RDS_FIXED_PERSISTENT_BASE
uintptr_t s_persistent_base = 0;
size_t s_persistent_size = 0;
#else
// It is a constexpr equal to 0 defined in rds-inl.h
#endif
// Persistent region grows down from frontier towards limit, when it runs out of
// space, we can allocate another chunk and redefine the frontier and the limit,
// as guarded by s_allocMutex.
uintptr_t s_persistent_frontier = 0;
uintptr_t s_persistent_limit = 0;
size_t s_persistent_usage = 0;
AllocDescriptorList s_normal_alloc_descs;
AllocDescriptorList s_local_alloc_descs;
/*
* Pushing a value into tbb::concurrent_vector is a racy operation; there is
* period of time where the vector's size is increased (so iterators may see
* the new value) but we haven't written data to it.
*
* To avoid reading these uninitialized values, we keep separate size bounds
* on these vectors to use as secondary limits during iteration.
*/
std::atomic<size_t> s_normal_alloc_descs_size;
std::atomic<size_t> s_local_alloc_descs_size;
/*
* Round base up to align, which must be a power of two.
*/
size_t roundUp(size_t base, size_t align) {
assertx(folly::isPowTwo(align));
--align;
return (base + align) & ~align;
}
/*
* Add the given offset to the free list for its size.
*/
void addFreeBlock(FreeLists& lists, size_t where, size_t size) {
if (size == 0) return;
lists[size].emplace_back(where);
}
/*
* Try to find a tracked free block of a suitable size. If an oversized block is
* found instead, the remaining space before and/or after the return space is
* re-added to the appropriate free lists.
*/
folly::Optional<Handle> findFreeBlock(FreeLists& lists, size_t size,
size_t align) {
for (auto it = lists.lower_bound(size); it != lists.end(); ++it) {
auto const blockSize = it->first;
for (auto list_it = it->second.begin();
list_it != it->second.end();
++list_it) {
auto const raw = static_cast<size_t>(*list_it);
static_assert(sizeof(raw) > 4, "avoid 32-bit overflow");
auto const end = raw + blockSize;
auto const handle = roundUp(raw, align);
if (handle + size > end) continue;
it->second.erase(list_it);
auto const headerSize = handle - raw;
addFreeBlock(lists, raw, headerSize);
auto const footerSize = blockSize - size - headerSize;
addFreeBlock(lists, handle + size, footerSize);
return handle;
}
}
return folly::none;
}
// Create a new chunk for use in persistent RDS, but don't add to
// 's_persistent_free_lists' yet.
NEVER_INLINE void addNewPersistentChunk(size_t size) {
assertx(size > 0 && size < kMaxHandle && size % 4096 == 0);
auto const raw = static_cast<char*>(lower_malloc(size));
auto const addr = reinterpret_cast<uintptr_t>(raw);
memset(raw, 0, size);
#if !RDS_FIXED_PERSISTENT_BASE
// This is only called once in processInit() if we don't have a persistent
// base.
always_assert(s_persistent_base == 0);
s_persistent_limit = addr;
s_persistent_frontier = addr + size;
s_persistent_base = s_persistent_frontier - size4g;
#else
always_assert_flog(addr >= kMinPersistentHandle && addr < size4g,
"failed to suitable address for persistent RDS");
assertx(s_persistent_frontier >= s_persistent_limit);
if (s_persistent_frontier != s_persistent_limit) {
addFreeBlock(s_persistent_free_lists,
ptrToHandle<Mode::Persistent>(s_persistent_limit),
s_persistent_frontier - s_persistent_limit);
}
s_persistent_limit = addr;
s_persistent_frontier = addr + size;
#endif
}
Handle alloc(Mode mode, size_t numBytes,
size_t align, type_scan::Index tyIndex) {
assertx(align <= 16);
switch (mode) {
case Mode::Normal: {
align = folly::nextPowTwo(std::max(align, alignof(GenNumber)));
auto const prefix = roundUp(sizeof(GenNumber), align);
auto const adjBytes = numBytes + prefix;
always_assert(align <= adjBytes);
if (auto free = findFreeBlock(s_normal_free_lists, adjBytes, align)) {
auto const begin = *free;
addFreeBlock(s_normal_free_lists, begin, prefix - sizeof(GenNumber));
auto const handle = begin + prefix;
if (type_scan::hasScanner(tyIndex)) {
assertx(s_normal_alloc_descs_size.load(std::memory_order_acquire) ==
s_normal_alloc_descs.size());
s_normal_alloc_descs.push_back(
AllocDescriptor{Handle(handle), uint32_t(numBytes), tyIndex}
);
s_normal_alloc_descs_size.fetch_add(1, std::memory_order_acq_rel);
}
return handle;
}
auto const oldFrontier = s_normal_frontier;
s_normal_frontier = roundUp(s_normal_frontier, align);
addFreeBlock(s_normal_free_lists, oldFrontier,
s_normal_frontier - oldFrontier);
s_normal_frontier += adjBytes;
if (debug && !jit::VMProtect::is_protected) {
memset(
(char*)(tl_base) + oldFrontier,
kRDSTrashFill,
s_normal_frontier - oldFrontier
);
}
always_assert_flog(
s_normal_frontier < s_local_frontier,
"Ran out of RDS space (mode=Normal)"
);
auto const begin = s_normal_frontier - adjBytes;
addFreeBlock(s_normal_free_lists, begin, prefix - sizeof(GenNumber));
auto const handle = begin + prefix;
if (type_scan::hasScanner(tyIndex)) {
assertx(s_normal_alloc_descs_size.load(std::memory_order_acquire) ==
s_normal_alloc_descs.size());
s_normal_alloc_descs.push_back(
AllocDescriptor{Handle(handle), uint32_t(numBytes), tyIndex}
);
s_normal_alloc_descs_size.fetch_add(1, std::memory_order_acq_rel);
}
return handle;
}
case Mode::Persistent: {
align = folly::nextPowTwo(align);
always_assert(align <= numBytes);
s_persistent_usage += numBytes;
if (auto free = findFreeBlock(s_persistent_free_lists, numBytes, align)) {
return *free;
}
auto const newFrontier =
(s_persistent_frontier - numBytes) & ~(align - 1);
if (newFrontier >= s_persistent_limit) {
s_persistent_frontier = newFrontier;
return ptrToHandle<Mode::Persistent>(newFrontier);
}
#if RDS_FIXED_PERSISTENT_BASE
// Allocate on demand, add kPersistentChunkSize each time.
assertx(numBytes <= kPersistentChunkSize);
addNewPersistentChunk(kPersistentChunkSize);
return alloc(mode, numBytes, align, tyIndex); // retry after a new chunk
#else
// We reserved plenty of space in s_persistent_free_lists in the beginning
// of the process, but maybe it is time to increase the size in the
// config.
always_assert_flog(
false,
"Ran out of RDS space (mode=Persistent)"
);
#endif
}
case Mode::Local: {
align = folly::nextPowTwo(align);
always_assert(align <= numBytes);
auto& frontier = s_local_frontier;
frontier -= numBytes;
frontier &= ~(align - 1);
always_assert_flog(
frontier >= s_normal_frontier,
"Ran out of RDS space (mode=Local)"
);
if (type_scan::hasScanner(tyIndex)) {
assertx(s_local_alloc_descs_size.load(std::memory_order_acquire) ==
s_local_alloc_descs.size());
s_local_alloc_descs.push_back(
AllocDescriptor{Handle(frontier), uint32_t(numBytes), tyIndex}
);
s_local_alloc_descs_size.fetch_add(1, std::memory_order_acq_rel);
}
return frontier;
}
default:
not_reached();
}
}
Handle allocUnlocked(Mode mode, size_t numBytes,
size_t align, type_scan::Index tyIndex) {
Guard g(s_allocMutex);
return alloc(mode, numBytes, align, tyIndex);
}
Handle bindImpl(Symbol key, Mode mode, size_t sizeBytes,
size_t align, type_scan::Index tyIndex) {
LinkTable::const_accessor acc;
if (s_linkTable.find(acc, key)) return acc->second.handle;
Guard g(s_allocMutex);
if (s_linkTable.find(acc, key)) return acc->second.handle;
auto const handle = alloc(mode, sizeBytes, align, tyIndex);
recordRds(handle, sizeBytes, key);
LinkTable::const_accessor insert_acc;
// insert_acc is held until after s_handleTable is updated
if (!s_linkTable.insert(
insert_acc,
LinkTable::value_type(key, {handle, safe_cast<uint32_t>(sizeBytes)}))) {
always_assert(0);
}
if (boost::apply_visitor(StoreRevLink(), key)) {
s_handleTable.emplace(handle, RevLinkEntry {
safe_cast<uint32_t>(sizeBytes), key
});
}
return handle;
}
Handle attachImpl(Symbol key) {
LinkTable::const_accessor acc;
if (s_linkTable.find(acc, key)) return acc->second.handle;
return kUninitHandle;
}
NEVER_INLINE
void bindOnLinkImpl(std::atomic<Handle>& handle,
Symbol sym, Mode mode, size_t size, size_t align,
type_scan::Index tsi, const void* init_val) {
Handle c = kUninitHandle;
if (handle.compare_exchange_strong(c, kBeingBound,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
// we flipped it from kUninitHandle, so we get to fill in the value.
auto const h = allocUnlocked(mode, size, align, tsi);
recordRds(h, size, sym);
if (init_val != nullptr && isPersistentHandle(h)) {
memcpy(handleToPtr<void, Mode::Persistent>(h), init_val, size);
}
if (handle.exchange(h, std::memory_order_relaxed) ==
kBeingBoundWithWaiters) {
futex_wake(&handle, INT_MAX);
}
return;
}
// Someone else beat us to it, so wait until they've filled it in.
if (c == kBeingBound) {
handle.compare_exchange_strong(c, kBeingBoundWithWaiters,
std::memory_order_relaxed,
std::memory_order_relaxed);
}
while (handle.load(std::memory_order_relaxed) == kBeingBoundWithWaiters) {
futex_wait(&handle, kBeingBoundWithWaiters);
}
assertx(isHandleBound(handle.load(std::memory_order_relaxed)));
}
}
void unbind(Symbol key, Handle handle) {
Guard g(s_allocMutex);
s_linkTable.erase(key);
s_handleTable.erase(handle);
}
using namespace detail;
void visitSymbols(std::function<void(const Symbol&,Handle,uint32_t)> fun) {
Guard g(s_allocMutex);
// make sure that find/count don't interfere with iteration.
s_linkTable.rehash();
for (auto it : s_linkTable) {
fun(it.first, it.second.handle, it.second.size);
}
}
//////////////////////////////////////////////////////////////////////
__thread void* tl_base = nullptr;
rds::Link<bool, Mode::Persistent> s_persistentTrue;
// All threads tl_bases are kept in a set, to allow iterating Local
// and Normal RDS sections across threads.
std::mutex s_tlBaseListLock;
std::vector<void*> s_tlBaseList;
//////////////////////////////////////////////////////////////////////
static size_t s_next_bit;
static size_t s_bits_to_go;
//////////////////////////////////////////////////////////////////////
void processInit() {
assertx(!s_local_base);
if (RuntimeOption::EvalRDSSize > 1u << 30) {
// The encoding of RDS handles require that the normal and local regions
// together be smaller than 1G.
RuntimeOption::EvalRDSSize = 1u << 30;
}
s_local_base = RuntimeOption::EvalRDSSize * 3 / 4;
s_local_frontier = s_local_base;
#if RDS_FIXED_PERSISTENT_BASE
auto constexpr allocSize = kPersistentChunkSize;
#else
auto const allocSize = RuntimeOption::EvalRDSSize / 4;
#endif
addNewPersistentChunk(allocSize),
s_persistentTrue.bind(Mode::Persistent, LinkID{"RDSTrue"});
*s_persistentTrue = true;
local::RDSInit();
}
void requestInit() {
assertx(tl_base);
auto gen = header()->currentGen;
memset(tl_base, 0, sizeof(Header));
if (debug) {
// Trash the normal section in debug mode, so that we can catch errors with
// not checking the gen number quickly.
memset(
static_cast<char*>(tl_base) + sizeof(Header),
kRDSTrashFill,
s_normal_frontier - sizeof(Header)
);
gen = 1;
} else if (++gen == kInvalidGenNumber) {
// If the current gen number has wrapped around back to the "invalid"
// number, memset the entire normal section. Once the current gen number
// wraps, it becomes ambiguous whether any given gen number is up to date.
memset(
static_cast<char*>(tl_base) + sizeof(Header),
kInvalidGenNumber,
s_normal_frontier - sizeof(Header)
);
++gen;
}
header()->currentGen = gen;
}
void requestExit() {
// Don't bother running the dtor ...
}
void flush() {
if (madvise(tl_base, s_normal_frontier, MADV_DONTNEED) == -1) {
Logger::Warning("RDS madvise failure: %s\n",
folly::errnoStr(errno).c_str());
}
if (jit::mcgen::retranslateAllEnabled() &&
!jit::mcgen::retranslateAllPending()) {
size_t offset = s_local_frontier & ~0xfff;
size_t protectedSpace = local::detail::s_usedbytes +
(-local::detail::s_usedbytes & 0xfff);
if (madvise(static_cast<char*>(tl_base) + offset,
s_local_base - protectedSpace - offset,
MADV_DONTNEED)) {
Logger::Warning("RDS local madvise failure: %s\n",
folly::errnoStr(errno).c_str());
}
}
}
/* RDS Layout:
* +-------------+ <-- tl_base
* | Header |
* +-------------+
* | |
* | Normal | growing higher
* | region | vvv
* | |
* +-------------+ <-- tl_base + s_normal_frontier
* | \ \ \ \ \ \ |
* +-------------+ <-- tl_base + s_local_frontier
* | |
* | Local | ^^^
* | region | growing lower
* | |
* +-------------+ <-- tl_base + s_local_base
* | \ \ \ \ \ \ |
* +-------------+ higher addresses
*
* +-------------+ <--- s_persistent_base
* | |
* | Persistent | not necessarily contiguous when RDS_FIXED_PERSISTENT_BASE
* | region |
* | |
* +-------------+
*/
size_t usedBytes() {
return s_normal_frontier;
}
size_t usedLocalBytes() {
return s_local_base - s_local_frontier;
}
size_t usedPersistentBytes() {
return s_persistent_usage;
}
folly::Range<const char*> normalSection() {
return {(const char*)tl_base, usedBytes()};
}
folly::Range<const char*> localSection() {
return {(const char*)tl_base + s_local_frontier, usedLocalBytes()};
}
GenNumber currentGenNumber() {
return header()->currentGen;
}
Handle currentGenNumberHandle() {
return offsetof(Header, currentGen);
}
constexpr size_t kAllocBitNumBytes = 8;
size_t allocBit() {
Guard g(s_allocMutex);
if (s_bits_to_go == 0) {
auto const handle = detail::alloc(
Mode::Normal,
kAllocBitNumBytes,
kAllocBitNumBytes,
type_scan::getIndexForScan<unsigned char[kAllocBitNumBytes]>()
);
s_next_bit = handle * CHAR_BIT;
s_bits_to_go = kAllocBitNumBytes * CHAR_BIT;
recordRds(handle, kAllocBitNumBytes, "Unknown", "bits");
}
s_bits_to_go--;
return s_next_bit++;
}
bool testAndSetBit(size_t bit) {
size_t block = bit / CHAR_BIT;
unsigned char mask = 1 << (bit % CHAR_BIT);
Handle handle = block & ~(kAllocBitNumBytes - 1);
if (!isHandleInit(handle, NormalTag{})) {
auto ptr = handleToPtr<unsigned char, Mode::Normal>(handle);
memset(ptr, 0, kAllocBitNumBytes);
initHandle(handle);
}
auto& ref = handleToRef<unsigned char, Mode::Normal>(block);
bool ret = ref & mask;
ref |= mask;
return ret;
}
bool isValidHandle(Handle handle) {
return handle >= kMinPersistentHandle ||
(handle >= sizeof(Header) && handle < s_normal_frontier) ||
(handle >= s_local_frontier && handle < s_local_base);
}
void threadInit(bool shouldRegister) {
if (!s_local_base) {
processInit();
}
assertx(tl_base == nullptr);
tl_base = mmap(nullptr, s_local_base, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
always_assert_flog(
tl_base != MAP_FAILED,
"Failed to mmap RDS region. errno = {}",
folly::errnoStr(errno).c_str()
);
numa_bind_to(tl_base, s_local_base, s_numaNode);
#ifdef NDEBUG
// A huge-page RDS is incompatible with VMProtect in vm-regs.cpp
if (RuntimeOption::EvalMapTgtCacheHuge) {
hintHuge(tl_base, s_local_base);
}
#endif
if (shouldRegister) {
Guard g(s_tlBaseListLock);
assertx(std::find(begin(s_tlBaseList), end(s_tlBaseList), tl_base) ==
end(s_tlBaseList));
s_tlBaseList.push_back(tl_base);
}
if (RuntimeOption::EvalPerfDataMap) {
Debug::DebugInfo::recordDataMap(
tl_base,
(char*)tl_base + s_local_base,
"rds");
}
header()->currentGen = 1;
if (shouldRegister) {
local::init();
s_hasFullInit.store(true, std::memory_order_release);
}
}
void threadExit(bool shouldUnregister) {
if (shouldUnregister) {
s_hasFullInit.store(false, std::memory_order_release);
local::fini(true);
Guard g(s_tlBaseListLock);
auto it = std::find(begin(s_tlBaseList), end(s_tlBaseList), tl_base);
if (it != end(s_tlBaseList)) {
s_tlBaseList.erase(it);
}
}
if (RuntimeOption::EvalPerfDataMap) {
Debug::DebugInfo::recordDataMap(
tl_base,
(char*)tl_base + s_local_base,
"-rds");
}
auto const base = tl_base;
auto do_unmap = [base] {
munmap(base, s_local_base);
};
// Other requests may be reading from this rds section via the s_tlBaseList.
// We just removed ourself from the list now, but defer the unmap until after
// any outstanding requests have completed.
if (shouldUnregister) {
Treadmill::enqueue(std::move(do_unmap));
} else {
do_unmap();
}
}
bool isFullyInitialized() {
return s_hasFullInit.load(std::memory_order_acquire);
}
void recordRds(Handle h, size_t size,
folly::StringPiece type, folly::StringPiece msg) {
if (RuntimeOption::EvalPerfDataMap) {
if (isNormalHandle(h)) {
h = genNumberHandleFrom(h);
size += sizeof(GenNumber);
}
Debug::DebugInfo::recordDataMap(
(char*)(intptr_t)h,
(char*)(intptr_t)h + size,
folly::sformat("rds+{}-{}", type, msg));
}
}
void recordRds(Handle h, size_t size, const Symbol& sym) {
if (RuntimeOption::EvalPerfDataMap) {
recordRds(h, size, symbol_kind(sym), symbol_rep(sym));
}
}
std::vector<void*> allTLBases() {
Guard g(s_tlBaseListLock);
return s_tlBaseList;
}
folly::Optional<Symbol> reverseLink(Handle handle) {
Guard g(s_allocMutex);
auto const it = s_handleTable.lower_bound(handle);
if (it == s_handleTable.end()) return folly::none;
if (it->first + it->second.size < handle) return folly::none;
return it->second.sym;
}
namespace {
local::RegisterConfig s_rdsLocalConfigRegistration({
.rdsInitFunc =
[] (size_t size) -> uint32_t {
return rds::detail::allocUnlocked(rds::Mode::Local,
std::max(size, 16UL), 16U,
type_scan::kIndexUnknown);
},
.initFunc =
[](size_t size, uint32_t handle) -> void* {
if (rds::tl_base) {
return rds::handleToPtr<void, rds::Mode::Local>(handle);
}
return local_malloc(size);
},
.finiFunc =
[](void* ptr) -> void{
local_free(ptr);
},
.inRdsFunc =
[](void* ptr, size_t size) -> bool {
return tl_base &&
std::less_equal<void>()(localSection().cbegin(), ptr)
&& std::less_equal<void>()(
(const char*)ptr
+ size, localSection().cend());
},
.initRequestEventHandler =
[](RequestEventHandler* h) -> void {
h->setInited(true);
// This registration makes sure obj->requestShutdown() will be called.
// Do it before calling requestInit() so that obj is reachable to the
// GC no matter what the callback does.
auto index = g_context->registerRequestEventHandler(h);
SCOPE_FAIL {
h->setInited(false);
g_context->unregisterRequestEventHandler(h, index);
};
h->requestInit();
}
});
}
//////////////////////////////////////////////////////////////////////
}}
|
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2013-2014 The Dogecoin developers
// Copyright (c) 2014 The Inutoshi developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "checkpoints.h"
#include "main.h"
#include "uint256.h"
#include <stdint.h>
#include <boost/assign/list_of.hpp> // for 'map_list_of()'
#include <boost/foreach.hpp>
namespace Checkpoints
{
typedef std::map<int, uint256> MapCheckpoints;
// How many times we expect transactions after the last checkpoint to
// be slower. This number is a compromise, as it can't be accurate for
// every system. When reindexing from a fast disk with a slow CPU, it
// can be up to 20, while when downloading from a slow network with a
// fast multicore CPU, it won't be much higher than 1.
static const double SIGCHECK_VERIFICATION_FACTOR = 5.0;
struct CCheckpointData {
const MapCheckpoints *mapCheckpoints;
int64_t nTimeLastCheckpoint;
int64_t nTransactionsLastCheckpoint;
double fTransactionsPerDay;
};
bool fEnabled = true;
// What makes a good checkpoint block?
// + Is surrounded by blocks with reasonable timestamps
// (no blocks before with a timestamp after, none after with
// timestamp before)
// + Contains no strange transactions
static MapCheckpoints mapCheckpoints =
boost::assign::map_list_of
( 0, uint256("0x1a91e3dace36e2be3bf030a65679fe821aa1d6ef92e7c9902eb318182c355691"))
( 42279, uint256("0x8444c3ef39a46222e87584ef956ad2c9ef401578bd8b51e8e4b9a86ec3134d3a"))
( 42400, uint256("0x557bb7c17ed9e6d4a6f9361cfddf7c1fc0bdc394af7019167442b41f507252b4"))
( 104679, uint256("0x35eb87ae90d44b98898fec8c39577b76cb1eb08e1261cfc10706c8ce9a1d01cf"))
( 128370, uint256("0x3f9265c94cab7dc3bd6a2ad2fb26c8845cb41cff437e0a75ae006997b4974be6"))
( 145000, uint256("0xcc47cae70d7c5c92828d3214a266331dde59087d4a39071fa76ddfff9b7bde72"))
( 165393, uint256("0x7154efb4009e18c1c6a6a79fc6015f48502bcd0a1edd9c20e44cd7cbbe2eeef1"))
( 186774, uint256("0x3c712c49b34a5f34d4b963750d6ba02b73e8a938d2ee415dcda141d89f5cb23a"))
( 199992, uint256("0x3408ff829b7104eebaf61fd2ba2203ef2a43af38b95b353e992ef48f00ebb190"))
( 225000, uint256("be148d9c5eab4a33392a6367198796784479720d06bfdd07bd547fe934eea15a"))
( 250000, uint256("0e4bcfe8d970979f7e30e2809ab51908d435677998cf759169407824d4f36460"))
( 270639, uint256("c587a36dd4f60725b9dd01d99694799bef111fc584d659f6756ab06d2a90d911"))
( 299742, uint256("1cc89c0c8a58046bf0222fe131c099852bd9af25a80e07922918ef5fb39d6742"))
( 323141, uint256("60c9f919f9b271add6ef5671e9538bad296d79f7fdc6487ba702bf2ba131d31d"))
( 339202, uint256("8c29048df5ae9df38a67ea9470fdd404d281a3a5c6f33080cd5bf14aa496ab03"))
( 350000, uint256("2bdcba23a47049e69c4fec4c425462e30f3d21d25223bde0ed36be4ea59a7075"))
( 370005, uint256("7be5af2c5bdcb79047dcd691ef613b82d4f1c20835677daed936de37a4782e15"))
( 371337, uint256("60323982f9c5ff1b5a954eac9dc1269352835f47c2c5222691d80f0d50dcf053"))
( 400002, uint256("a5021d69a83f39aef10f3f24f932068d6ff322c654d20562def3fac5703ce3aa"))
;
static const CCheckpointData data = {
&mapCheckpoints,
1412259032, // * UNIX timestamp of last checkpoint block
12371456, // * total number of transactions between genesis and last checkpoint
// (the tx=... number in the SetBestChain debug.log lines)
29000.0 // * estimated number of transactions per day after checkpoint
};
static MapCheckpoints mapCheckpointsTestnet =
boost::assign::map_list_of
( 0, uint256("0x"))
;
static const CCheckpointData dataTestnet = {
&mapCheckpointsTestnet,
1369685559,
37581,
300
};
static MapCheckpoints mapCheckpointsRegtest =
boost::assign::map_list_of
( 0, uint256("3d2160a3b5dc4a9d62e7e66a295f70313ac808440ef7400d6c0772171ce973a5"))
;
static const CCheckpointData dataRegtest = {
&mapCheckpointsRegtest,
0,
0,
0
};
const CCheckpointData &Checkpoints() {
if (Params().NetworkID() == CChainParams::TESTNET)
return dataTestnet;
else if (Params().NetworkID() == CChainParams::MAIN)
return data;
else
return dataRegtest;
}
bool CheckBlock(int nHeight, const uint256& hash)
{
if (!fEnabled)
return true;
const MapCheckpoints& checkpoints = *Checkpoints().mapCheckpoints;
MapCheckpoints::const_iterator i = checkpoints.find(nHeight);
if (i == checkpoints.end()) return true;
return hash == i->second;
}
// Guess how far we are in the verification process at the given block index
double GuessVerificationProgress(CBlockIndex *pindex, bool fSigchecks) {
if (pindex==NULL)
return 0.0;
int64_t nNow = time(NULL);
double fSigcheckVerificationFactor = fSigchecks ? SIGCHECK_VERIFICATION_FACTOR : 1.0;
double fWorkBefore = 0.0; // Amount of work done before pindex
double fWorkAfter = 0.0; // Amount of work left after pindex (estimated)
// Work is defined as: 1.0 per transaction before the last checkpoint, and
// fSigcheckVerificationFactor per transaction after.
const CCheckpointData &data = Checkpoints();
if (pindex->nChainTx <= data.nTransactionsLastCheckpoint) {
double nCheapBefore = pindex->nChainTx;
double nCheapAfter = data.nTransactionsLastCheckpoint - pindex->nChainTx;
double nExpensiveAfter = (nNow - data.nTimeLastCheckpoint)/86400.0*data.fTransactionsPerDay;
fWorkBefore = nCheapBefore;
fWorkAfter = nCheapAfter + nExpensiveAfter*fSigcheckVerificationFactor;
} else {
double nCheapBefore = data.nTransactionsLastCheckpoint;
double nExpensiveBefore = pindex->nChainTx - data.nTransactionsLastCheckpoint;
double nExpensiveAfter = (nNow - pindex->nTime)/86400.0*data.fTransactionsPerDay;
fWorkBefore = nCheapBefore + nExpensiveBefore*fSigcheckVerificationFactor;
fWorkAfter = nExpensiveAfter*fSigcheckVerificationFactor;
}
return fWorkBefore / (fWorkBefore + fWorkAfter);
}
int GetTotalBlocksEstimate()
{
if (!fEnabled)
return 0;
const MapCheckpoints& checkpoints = *Checkpoints().mapCheckpoints;
return checkpoints.rbegin()->first;
}
CBlockIndex* GetLastCheckpoint(const std::map<uint256, CBlockIndex*>& mapBlockIndex)
{
if (!fEnabled)
return NULL;
const MapCheckpoints& checkpoints = *Checkpoints().mapCheckpoints;
BOOST_REVERSE_FOREACH(const MapCheckpoints::value_type& i, checkpoints)
{
const uint256& hash = i.second;
std::map<uint256, CBlockIndex*>::const_iterator t = mapBlockIndex.find(hash);
if (t != mapBlockIndex.end())
return t->second;
}
return NULL;
}
}
|
/**
@author Shin'ichiro Nakaoka
*/
#include "VRMLToSGConverter.h"
#include "SceneDrawables.h"
#include "SceneLights.h"
#include "SceneEffects.h"
#include "Triangulator.h"
#include "PolygonMeshTriangulator.h"
#include "MeshFilter.h"
#include "MeshGenerator.h"
#include "ImageIO.h"
#include "SceneLoader.h"
#include "Exception.h"
#include "NullOut.h"
#include "EigenUtil.h"
#include "UTF8.h"
#include <cnoid/stdx/filesystem>
#include <fmt/format.h>
#include <tuple>
using namespace std;
using namespace cnoid;
namespace filesystem = cnoid::stdx::filesystem;
using fmt::format;
namespace cnoid {
class VRMLToSGConverterImpl
{
public:
VRMLToSGConverter* self;
ostream* os_;
ostream& os() { return *os_; }
bool isTriangulationEnabled;
bool isNormalGenerationEnabled;
vector<int> removedFaceIndices;
vector<int> removedFaceVertexIndices;
PolygonMeshTriangulator polygonMeshTriangulator;
Triangulator<SgVertexArray> triangulator;
vector<int> polygon;
vector<int> newColorPosToOrgColorPosMap;
MeshFilter meshFilter;
MeshGenerator meshGenerator;
ImageIO imageIO;
VRMLMaterialPtr defaultMaterial;
typedef map<VRMLNodePtr, SgNodePtr> VRMLNodeToSgNodeMap;
VRMLNodeToSgNodeMap vrmlNodeToSgNodeMap;
typedef map<VRMLGeometryPtr, SgMeshPtr> VRMLGeometryToSgMeshMap;
VRMLGeometryToSgMeshMap vrmlGeometryToSgMeshMap;
typedef map<VRMLGeometryPtr, SgPlotPtr> VRMLGeometryToSgPlotMap;
VRMLGeometryToSgPlotMap vrmlGeometryToSgPlotMap;
typedef map<VRMLMaterialPtr, SgMaterialPtr> VRMLMaterialToSgMaterialMap;
VRMLMaterialToSgMaterialMap vrmlMaterialToSgMaterialMap;
typedef map<VRMLTexturePtr, SgTexturePtr> VRMLTextureToSgTextureMap;
VRMLTextureToSgTextureMap vrmlTextureToSgTextureMap;
typedef map<VRMLTextureTransformPtr, SgTextureTransformPtr> VRMLTextureTransformToSgTextureTransformMap;
VRMLTextureTransformToSgTextureTransformMap vrmlTextureTransformToSgTextureTransformMap;
typedef map<string, SgImagePtr> ImagePathToSgImageMap;
ImagePathToSgImageMap imagePathToSgImageMap;
enum BoxFaceID { NO_FACE, LEFT_FACE, TOP_FACE, FRONT_FACE, BOTTOM_FACE, RIGHT_FACE, BACK_FACE };
unique_ptr<SceneLoader> sceneLoader;
string baseDirectory;
VRMLToSGConverterImpl(VRMLToSGConverter* self);
void putMessage(const std::string& message);
SgNode* convertNode(VRMLNode* vnode);
SgNode* convertGroupNode(AbstractVRMLGroup* vgroup);
pair<SgNode*, SgGroup*> createTransformNodeSet(VRMLTransform* vt);
SgNode* convertShapeNode(VRMLShape* vshape);
SgMeshPtr createMeshFromIndexedFaceSet(VRMLIndexedFaceSet* vface);
bool setIndicesForPerTriangleData(SgIndexArray& indices, int dataSize);
bool convertIndicesForTriangles(SgIndexArray& indices, const MFInt32& orgIndices, const bool perVertex, const bool ccw);
SgPolygonMeshPtr createPolygonMeshFromIndexedFaceSet(VRMLIndexedFaceSet* vface);
void setIndicesForPerPolygonData(SgIndexArray& indices, int dataSize, const MFInt32& orgCoordIndices);
void convertIndicesForPolygons(
SgIndexArray& indices, const MFInt32& orgIndices, const MFInt32& orgCoordIndices, const bool perVertex, const bool ccw);
SgMeshPtr createMeshFromElevationGrid(VRMLElevationGrid* grid);
void setDefaultTextureCoordinateForElevationGrid(const SgMeshPtr& mesh, const VRMLElevationGrid* grid);
SgMeshPtr createMeshFromExtrusion(VRMLExtrusion* extrusion);
void setDefaultTextureCoordinateForExtrusion(const SgMeshPtr& mesh, const VRMLExtrusion* extrusion);
SgMaterial* createMaterial(VRMLMaterial* vm);
SgTexture* createTexture(VRMLTexture* vt);
SgNode* convertLineSet(VRMLIndexedLineSet* vLineSet);
SgNode* convertPointSet(VRMLPointSet* vPointSet);
SgTextureTransform* createTextureTransform(VRMLTextureTransform* tt);
SgNode* convertLightNode(VRMLLight* vnode);
void setLightCommonProperties(SgLight* light, VRMLLight* vlight);
SgNode* createPointLight(VRMLPointLight* vlight);
SgSpotLight* createSpotLight(VRMLSpotLight* vlight);
SgDirectionalLight* createDirectionalLight(VRMLDirectionalLight* vlight);
SgNode* convertFogNode(VRMLFog* vfog);
SgNode* readNonVrmlInline(VRMLNonVrmlInline* nonVrmlInline);
};
}
VRMLToSGConverter::VRMLToSGConverter()
{
impl = new VRMLToSGConverterImpl(this);
}
VRMLToSGConverterImpl::VRMLToSGConverterImpl(VRMLToSGConverter* self)
: self(self)
{
os_ = &nullout();
isTriangulationEnabled = true;
isNormalGenerationEnabled = true;
imageIO.setUpsideDown(true);
defaultMaterial = new VRMLMaterial();
}
VRMLToSGConverter::~VRMLToSGConverter()
{
delete impl;
}
void VRMLToSGConverter::setSourceVrmlFilename(const std::string& filename)
{
impl->baseDirectory = toUTF8(filesystem::absolute(fromUTF8(filename)).parent_path().string());
}
void VRMLToSGConverter::setMessageSink(std::ostream& os)
{
impl->os_ = &os;
if(impl->sceneLoader){
impl->sceneLoader->setMessageSink(os);
}
}
void VRMLToSGConverter::setTriangulationEnabled(bool on)
{
impl->isTriangulationEnabled = on;
}
void VRMLToSGConverter::setDivisionNumber(int divisionNumber)
{
impl->meshGenerator.setDivisionNumber(divisionNumber);
}
int VRMLToSGConverter::divisionNumber() const
{
return impl->meshGenerator.divisionNumber();
}
void VRMLToSGConverter::setNormalGenerationEnabled(bool on, bool doOverwrite)
{
impl->isNormalGenerationEnabled = on;
impl->meshFilter.setNormalOverwritingEnabled(doOverwrite);
}
void VRMLToSGConverter::setMinCreaseAngle(double angle)
{
impl->meshFilter.setMinCreaseAngle(angle);
}
void VRMLToSGConverter::setMaxCreaseAngle(double angle)
{
impl->meshFilter.setMaxCreaseAngle(angle);
}
void VRMLToSGConverter::clearConvertedNodeMap()
{
impl->vrmlNodeToSgNodeMap.clear();
impl->vrmlGeometryToSgMeshMap.clear();
impl->vrmlGeometryToSgPlotMap.clear();
impl->vrmlMaterialToSgMaterialMap.clear();
impl->vrmlTextureToSgTextureMap.clear();
impl->vrmlTextureTransformToSgTextureTransformMap.clear();
impl->imagePathToSgImageMap.clear();
}
SgNodePtr VRMLToSGConverter::convert(VRMLNodePtr vrmlNode)
{
if(vrmlNode){
return impl->convertNode(vrmlNode.get());
}
return 0;
}
void VRMLToSGConverterImpl::putMessage(const std::string& message)
{
os() << message << endl;
/*
if(!self->sigMessage.empty()){
self->sigMessage(message + "\n" );
}
*/
}
SgNode* VRMLToSGConverterImpl::convertNode(VRMLNode* vnode)
{
SgNode* node = 0;
VRMLNodeToSgNodeMap::iterator p = vrmlNodeToSgNodeMap.find(vnode);
if(p != vrmlNodeToSgNodeMap.end()){
node = p->second.get();
} else {
if(VRMLProtoInstance* protoInstance = dynamic_cast<VRMLProtoInstance*>(vnode)){
vnode = protoInstance->actualNode.get();
}
if(vnode){
if(AbstractVRMLGroup* group = dynamic_cast<AbstractVRMLGroup*>(vnode)){
node = convertGroupNode(group);
} else if(VRMLShape* shape = dynamic_cast<VRMLShape*>(vnode)){
node = convertShapeNode(shape);
} else if(VRMLLight* light = dynamic_cast<VRMLLight*>(vnode)){
node = convertLightNode(light);
} else if(VRMLFog* fog = dynamic_cast<VRMLFog*>(vnode)){
node = convertFogNode(fog);
} else if(VRMLNonVrmlInline* nonVrmlInline = dynamic_cast<VRMLNonVrmlInline*>(vnode)){
node = readNonVrmlInline(nonVrmlInline);
}
if(node){
node->setName(vnode->defName);
vrmlNodeToSgNodeMap[vnode] = node;
}
}
}
return node;
}
SgNode* VRMLToSGConverterImpl::convertGroupNode(AbstractVRMLGroup* vgroup)
{
SgNode* top;
SgGroup* group;
if(VRMLTransform* transform = dynamic_cast<VRMLTransform*>(vgroup)){
std::tie(top, group) = createTransformNodeSet(transform);
} else {
group = new SgGroup;
top = group;
if(VRMLInline* vrmlInline = dynamic_cast<VRMLInline*>(vgroup)){
auto& urls = vrmlInline->urls;
if(!urls.empty()){
group->setUriByFilePathAndBaseDirectory(urls.front(), baseDirectory);
}
}
}
int num = vgroup->countChildren();
for(int i=0; i < num; i++){
SgNode* child = convertNode(vgroup->getChild(i));
if(child){
group->addChild(child);
}
}
if(group->numChildren() == 0){
delete top;
top = 0;
}
return top;
}
pair<SgNode*, SgGroup*> VRMLToSGConverterImpl::createTransformNodeSet(VRMLTransform* vt)
{
const Translation3d C(vt->center);
const AngleAxisd& R = vt->rotation;
const Translation3d T(vt->translation);
SgPosTransform* transform = new SgPosTransform;
if(vt->scale.isOnes()){
// no scaling
transform->setTransform(T * C * R * C.inverse());
return make_pair(transform, transform);
} else {
SgScaleTransform* scale = new SgScaleTransform;
scale->setScale(vt->scale);
transform->addChild(scale);
if(vt->center.isZero() && !vt->scaleOrientation.angle()){
transform->setTransform(T * R);
return make_pair(transform, scale);
} else {
SgPosTransform* transform2 = new SgPosTransform;
const AngleAxisd& SR = vt->scaleOrientation;
Affine3d S;
S.linear() = vt->scale.asDiagonal();
S.translation().setZero();
transform->setTransform(T * C * R * SR);
transform2->setTransform(SR.inverse() * C.inverse());
scale->addChild(transform2);
return make_pair(transform, transform2);
}
}
}
SgNode* VRMLToSGConverterImpl::convertShapeNode(VRMLShape* vshape)
{
SgNode* converted = nullptr;
VRMLGeometry* vrmlGeometry = dynamic_node_cast<VRMLGeometry>(vshape->geometry).get();
if(vrmlGeometry){
SgMeshPtr mesh;
VRMLGeometryToSgMeshMap::iterator p = vrmlGeometryToSgMeshMap.find(vrmlGeometry);
if(p != vrmlGeometryToSgMeshMap.end()){
mesh = p->second;
} else {
int meshOptions = MeshGenerator::NoOption;
if(vshape->appearance && vshape->appearance->texture){
meshOptions = MeshGenerator::TextureCoordinate;
}
if(VRMLIndexedFaceSet* faceSet = dynamic_cast<VRMLIndexedFaceSet*>(vrmlGeometry)){
if(!isTriangulationEnabled){
mesh = createMeshFromIndexedFaceSet(faceSet);
} else {
SgPolygonMeshPtr polygonMesh = createPolygonMeshFromIndexedFaceSet(faceSet);
if(polygonMesh){
mesh = polygonMeshTriangulator.triangulate(polygonMesh);
const string& errorMessage = polygonMeshTriangulator.errorMessage();
if(!errorMessage.empty()){
string message;
if(faceSet->defName.empty()){
message = "Error of an IndexedFaceSet node: \n";
} else {
message = format("Error of IndexedFaceSet node \"{}\": \n", faceSet->defName);
}
putMessage(message + errorMessage);
}
}
}
if(mesh && isNormalGenerationEnabled){
meshFilter.generateNormals(mesh, faceSet->creaseAngle);
}
} else if(VRMLBox* box = dynamic_cast<VRMLBox*>(vrmlGeometry)){
mesh = meshGenerator.generateBox(
Vector3(box->size[0], box->size[1], box->size[2]), meshOptions);
} else if(VRMLSphere* sphere = dynamic_cast<VRMLSphere*>(vrmlGeometry)){
mesh = meshGenerator.generateSphere(sphere->radius, meshOptions);
} else if(VRMLCylinder* cylinder = dynamic_cast<VRMLCylinder*>(vrmlGeometry)){
SgMesh::Cylinder param(cylinder->radius, cylinder->height);
param.top = cylinder->top;
param.bottom = cylinder->bottom;
param.side = cylinder->side;
mesh = new SgMesh(param);
meshGenerator.updateMeshWithPrimitiveInformation(mesh, meshOptions);
} else if(VRMLCone* cone = dynamic_cast<VRMLCone*>(vrmlGeometry)){
SgMesh::Cone param(cone->bottomRadius, cone->height);
param.bottom = cone->bottom;
param.side = cone->side;
mesh = new SgMesh(param);
meshGenerator.updateMeshWithPrimitiveInformation(mesh, meshOptions);
} else if(VRMLElevationGrid* elevationGrid = dynamic_cast<VRMLElevationGrid*>(vrmlGeometry)){
mesh = createMeshFromElevationGrid(elevationGrid);
} else if(VRMLExtrusion* extrusion = dynamic_cast<VRMLExtrusion*>(vrmlGeometry)){
mesh = createMeshFromExtrusion(extrusion);
} else if(VRMLIndexedLineSet* lineSet = dynamic_cast<VRMLIndexedLineSet*>(vrmlGeometry)){
converted = convertLineSet(lineSet);
} else if(VRMLPointSet* pointSet = dynamic_cast<VRMLPointSet*>(vrmlGeometry)){
converted = convertPointSet(pointSet);
} else {
putMessage(format("VRML {} node is not supported as a geometry.", vrmlGeometry->typeName()));
}
if(mesh){
mesh->setName(vrmlGeometry->defName);
vrmlGeometryToSgMeshMap[vrmlGeometry] = mesh;
}
}
if(mesh){
SgShape* shape = new SgShape;
converted = shape;
shape->setMesh(mesh);
if(vshape->appearance && vshape->appearance->material){
auto vm = vshape->appearance->material;
VRMLMaterialToSgMaterialMap::iterator p = vrmlMaterialToSgMaterialMap.find(vm);
if(p != vrmlMaterialToSgMaterialMap.end()){
shape->setMaterial(p->second);
} else {
shape->setMaterial(createMaterial(vm));
vrmlMaterialToSgMaterialMap[vm] = shape->material();
}
}
if(vshape->appearance && vshape->appearance->texture){
SgTextureTransformPtr textureTransform;
if(vshape->appearance->textureTransform){
VRMLTextureTransform* vtt = vshape->appearance->textureTransform.get();
auto pp = vrmlTextureTransformToSgTextureTransformMap.find(vtt);
if(pp != vrmlTextureTransformToSgTextureTransformMap.end()){
textureTransform = pp->second;
} else {
textureTransform = createTextureTransform(vtt);
vrmlTextureTransformToSgTextureTransformMap[vtt] = textureTransform;
}
}
SgTexturePtr texture;
VRMLTexture* vt = vshape->appearance->texture.get();
VRMLTextureToSgTextureMap::iterator p = vrmlTextureToSgTextureMap.find(vt);
if(p != vrmlTextureToSgTextureMap.end()){
if(p->second->textureTransform() == textureTransform){
texture = p->second;
}
}
if(!texture){
texture = createTexture(vt);
vrmlTextureToSgTextureMap[vt] = texture;
}
if(texture){
texture->setTextureTransform(textureTransform);
shape->setTexture(texture);
if(!mesh->texCoords()){
if(dynamic_cast<VRMLIndexedFaceSet*>(vrmlGeometry)){
meshGenerator.generateTextureCoordinateForIndexedFaceSet(mesh);
} else if(VRMLElevationGrid* elevationGrid = dynamic_cast<VRMLElevationGrid*>(vrmlGeometry)){
setDefaultTextureCoordinateForElevationGrid(mesh, elevationGrid);
} else if(VRMLExtrusion* extrusion = dynamic_cast<VRMLExtrusion*>(vrmlGeometry)){
setDefaultTextureCoordinateForExtrusion(mesh, extrusion);
}
}
}
}
}
}
return converted;
}
SgMeshPtr VRMLToSGConverterImpl::createMeshFromIndexedFaceSet(VRMLIndexedFaceSet* vface)
{
if(!vface->coord || vface->coord->point.empty() || vface->coordIndex.empty()){
return nullptr;
}
SgMeshPtr mesh = new SgMesh;
mesh->setCreaseAngle(vface->creaseAngle);
mesh->setSolid(vface->solid);
mesh->setVertices(new SgVertexArray(vface->coord->point));
removedFaceIndices.clear();
removedFaceVertexIndices.clear();
const MFInt32& orgCoordIndices = vface->coordIndex;
const bool ccw = vface->ccw;
const int orgCoordIndicesSize = orgCoordIndices.size();
SgIndexArray& triangleVertices = mesh->triangleVertices();
triangleVertices.reserve((orgCoordIndicesSize + 1) * 3 / 4);
int faceIndex = 0;
int faceVertexIndex = 0;
int firstVertexIndex = 0;
int numFaceVertices = 0;
for(int i=0; i < orgCoordIndicesSize; ++i){
int index = orgCoordIndices[i];
if(index >= 0){
++numFaceVertices;
} else {
if(numFaceVertices == 3) { // Triangle ?
if(ccw){
triangleVertices.push_back(orgCoordIndices[firstVertexIndex]);
triangleVertices.push_back(orgCoordIndices[firstVertexIndex + 1]);
triangleVertices.push_back(orgCoordIndices[firstVertexIndex + 2]);
} else { // flip
triangleVertices.push_back(orgCoordIndices[firstVertexIndex + 2]);
triangleVertices.push_back(orgCoordIndices[firstVertexIndex + 1]);
triangleVertices.push_back(orgCoordIndices[firstVertexIndex]);
}
faceVertexIndex += 3;
} else {
removedFaceIndices.push_back(faceIndex);
for(int j=0; j < numFaceVertices; ++j){
removedFaceVertexIndices.push_back(faceVertexIndex++);
}
}
firstVertexIndex = i + 1; // next position
numFaceVertices = 0;
++faceVertexIndex;
++faceIndex;
}
}
if(!removedFaceIndices.empty()){
if(vface->defName.empty()){
putMessage(format("An IndexedFaceSet node contains {} non-triangle polygon(s).",
removedFaceIndices.size()));
} else {
putMessage(format("IndexedFaceSet node \"{0}\" contains {1} non-triangle polygon(s).",
vface->defName, removedFaceIndices.size()));
}
}
if(vface->normal && !vface->normal->vector.empty()){
bool converted;
if(vface->normalIndex.empty() && !vface->normalPerVertex){
converted = setIndicesForPerTriangleData(mesh->normalIndices(), vface->normal->vector.size());
} else {
converted = convertIndicesForTriangles(mesh->normalIndices(), vface->normalIndex, vface->normalPerVertex, ccw);
}
if(converted){
mesh->setNormals(new SgNormalArray(vface->normal->vector));
} else {
putMessage("The normalIndex field of an IndexedFaceSet node contains illegal data.");
}
}
if(vface->color && !vface->color->color.empty()){
bool converted;
if(vface->colorIndex.empty() && !vface->colorPerVertex){
converted = setIndicesForPerTriangleData(mesh->colorIndices(), vface->color->color.size());
} else {
converted = convertIndicesForTriangles(mesh->colorIndices(), vface->colorIndex, vface->colorPerVertex, ccw);
}
if(converted){
mesh->setColors(new SgColorArray(vface->color->color));
} else {
putMessage("The colorIndex field of an IndexedFaceSet node contains illegal data.");
}
}
if(vface->texCoord && !vface->texCoord->point.empty()){
if(convertIndicesForTriangles(mesh->texCoordIndices(), vface->texCoordIndex, true, ccw)){
mesh->setTexCoords(new SgTexCoordArray(vface->texCoord->point));
} else {
putMessage("The texCoordIndex field of an IndexedFaceSet node contains illegal data.");
}
}
mesh->updateBoundingBox();
return mesh;
}
bool VRMLToSGConverterImpl::setIndicesForPerTriangleData(SgIndexArray& indices, int dataSize)
{
const int numIndices = (dataSize - removedFaceIndices.size()) * 3;
if(numIndices > 0){
indices.reserve(numIndices);
}
int indexToSkip = removedFaceIndices.empty() ? std::numeric_limits<int>::min() : removedFaceIndices.front();
size_t nextIndexToSkipIndex = 1;
for(int i=0; i < dataSize; ++i){
if(i == indexToSkip){
if(nextIndexToSkipIndex < removedFaceIndices.size()){
indexToSkip = removedFaceIndices[nextIndexToSkipIndex++];
}
} else {
indices.push_back(i);
indices.push_back(i);
indices.push_back(i);
}
}
return true;
}
bool VRMLToSGConverterImpl::convertIndicesForTriangles
(SgIndexArray& indices, const MFInt32& orgIndices, const bool perVertex, const bool ccw)
{
bool converted = true;
if(!orgIndices.empty()){
vector<int>* indicesToSkip;
if(perVertex){
indicesToSkip = &removedFaceVertexIndices;
indices.reserve((orgIndices.size() + 1) * 3 / 4);
} else {
indicesToSkip = &removedFaceIndices;
indices.reserve(orgIndices.size() * 3);
}
int indexToSkip = indicesToSkip->empty() ? std::numeric_limits<int>::min() : indicesToSkip->front();
size_t nextIndexToSkipIndex = 1;
int numFaceVertices = 0;
int firstVertexIndex = 0;
const int numOrgIndices = orgIndices.size();
for(int i=0; i < numOrgIndices; ++i){
if(i == indexToSkip){
if(nextIndexToSkipIndex < indicesToSkip->size()){
indexToSkip = (*indicesToSkip)[nextIndexToSkipIndex++];
}
} else {
const int index = orgIndices[i];
if(perVertex){
if(index >= 0){
++numFaceVertices;
} else {
if(numFaceVertices != 3){
converted = false;
break;
}
if(ccw){
indices.push_back(orgIndices[firstVertexIndex]);
indices.push_back(orgIndices[firstVertexIndex + 1]);
indices.push_back(orgIndices[firstVertexIndex + 2]);
} else {
indices.push_back(orgIndices[firstVertexIndex + 2]);
indices.push_back(orgIndices[firstVertexIndex + 1]);
indices.push_back(orgIndices[firstVertexIndex]);
}
firstVertexIndex = i + 1;
numFaceVertices = 0;
}
} else { // not perVertex
if(index < 0){
converted = false;
break;
}
indices.push_back(index);
indices.push_back(index);
indices.push_back(index);
}
}
}
if(!converted){
indices.clear();
}
}
return converted;
}
SgPolygonMeshPtr VRMLToSGConverterImpl::createPolygonMeshFromIndexedFaceSet(VRMLIndexedFaceSet* vface)
{
if(!vface->coord){
putMessage("VRMLIndexedFaceSet: The coord field is not defined." );
return nullptr;
}
if(vface->coord->point.empty()){
putMessage("VRMLIndexedFaceSet: The point field is empty." );
return nullptr;
}
if(vface->coordIndex.empty()){
putMessage("VRMLIndexedFaceSet: The coordIndex field is empty." );
return nullptr;
}
SgPolygonMeshPtr mesh = new SgPolygonMesh;
mesh->setCreaseAngle(vface->creaseAngle);
mesh->setSolid(vface->solid);
mesh->setVertices(new SgVertexArray(vface->coord->point));
const MFInt32& orgCoordIndices = vface->coordIndex;
const bool ccw = vface->ccw;
{
SgIndexArray& vertexIndices = mesh->faceVertexIndices();
if(ccw){
vertexIndices = orgCoordIndices;
} else {
vertexIndices.reserve(orgCoordIndices.size());
int firstVertexIndex = 0;
int numFaceVertices = 0;
for(size_t i=0; i < orgCoordIndices.size(); ++i){
int index = orgCoordIndices[i];
if(index >= 0){
++numFaceVertices;
} else {
while(--numFaceVertices >= 0){
vertexIndices.push_back(orgCoordIndices[firstVertexIndex + numFaceVertices]);
}
vertexIndices.push_back(-1);
firstVertexIndex = i + 1;
numFaceVertices = 0;
}
}
}
}
if(vface->normal && !vface->normal->vector.empty()){
mesh->setNormals(new SgNormalArray(vface->normal->vector));
if(vface->normalIndex.empty()){
if(!vface->normalPerVertex){
setIndicesForPerPolygonData(mesh->normalIndices(), mesh->normals()->size(), orgCoordIndices);
}
} else {
convertIndicesForPolygons(mesh->normalIndices(), vface->normalIndex, orgCoordIndices, vface->normalPerVertex, ccw);
}
}
if(vface->color && !vface->color->color.empty()){
mesh->setColors(new SgColorArray(vface->color->color));
if(vface->colorIndex.empty()){
if(!vface->colorPerVertex){
setIndicesForPerPolygonData(mesh->colorIndices(), mesh->colors()->size(), orgCoordIndices);
}
} else {
convertIndicesForPolygons(mesh->colorIndices(), vface->colorIndex, orgCoordIndices, vface->colorPerVertex, ccw);
}
}
if(vface->texCoord && !vface->texCoord->point.empty()){
mesh->setTexCoords(new SgTexCoordArray(vface->texCoord->point));
if(!vface->texCoordIndex.empty()){
convertIndicesForPolygons(mesh->texCoordIndices(), vface->texCoordIndex, orgCoordIndices, true, ccw);
}
}
// The bounding box doesn't have to be updated because this polygon mesh is immediately converted into the triagnle mesh
return mesh;
}
void VRMLToSGConverterImpl::setIndicesForPerPolygonData(SgIndexArray& indices, int dataSize, const MFInt32& orgCoordIndices)
{
indices.reserve(orgCoordIndices.size());
size_t indexInOrgCoordIndices = 0;
for(int i=0; i < dataSize; ++i){
while(indexInOrgCoordIndices < orgCoordIndices.size()){
const int orgCoordIndex = orgCoordIndices[indexInOrgCoordIndices++];
if(orgCoordIndex < 0){
break;
}
indices.push_back(i);
}
indices.push_back(-1);
}
}
void VRMLToSGConverterImpl::convertIndicesForPolygons
(SgIndexArray& indices, const MFInt32& orgIndices, const MFInt32& orgCoordIndices, const bool perVertex, const bool ccw)
{
if(perVertex){
if(ccw){
indices = orgIndices;
return;
}
indices.reserve(orgIndices.size());
int firstVertexIndex = 0;
int numFaceVertices = 0;
for(size_t i=0; i < orgIndices.size(); ++i){
if(orgIndices[i] >= 0){
++numFaceVertices;
} else {
while(--numFaceVertices >= 0){
indices.push_back(orgIndices[firstVertexIndex + numFaceVertices]);
}
indices.push_back(-1);
firstVertexIndex = i + 1;
numFaceVertices = 0;
}
}
} else {
indices.reserve(orgCoordIndices.size());
size_t indexInOrgCoordIndices = 0;
for(size_t i=0; i < orgIndices.size(); ++i){
const int index = orgIndices[i];
if(index >= 0){
while(indexInOrgCoordIndices < orgCoordIndices.size()){
const int orgCoordIndex = orgCoordIndices[indexInOrgCoordIndices++];
if(orgCoordIndex < 0){
break;
}
indices.push_back(index);
}
indices.push_back(-1);
}
}
}
}
SgMeshPtr VRMLToSGConverterImpl::createMeshFromElevationGrid(VRMLElevationGrid* grid)
{
if(grid->xDimension * grid->zDimension != static_cast<SFInt32>(grid->height.size())){
putMessage("A VRML ElevationGrid node has illegal parameters.");
return SgMeshPtr();
}
SgMeshPtr mesh = new SgMesh;
mesh->setVertices(new SgVertexArray());
SgVertexArray& vertices = *mesh->vertices();
vertices.reserve(grid->zDimension * grid->xDimension);
for(int z=0; z < grid->zDimension; z++){
for(int x=0; x < grid->xDimension; x++){
vertices.push_back(Vector3f(x * grid->xSpacing, grid->height[z * grid->xDimension + x], z * grid->zSpacing));
}
}
mesh->reserveNumTriangles((grid->zDimension - 1) * (grid->xDimension - 1) * 2);
for(int z=0; z < grid->zDimension - 1; ++z){
const int current = z * grid->xDimension;
const int next = (z + 1) * grid->xDimension;
for(int x=0; x < grid->xDimension - 1; ++x){
if(grid->ccw){
mesh->addTriangle( x + current, x + next, (x + 1) + next);
mesh->addTriangle( x + current, (x + 1) + next, (x + 1) + current);
}else{
mesh->addTriangle( x + current, (x + 1) + next, x + next);
mesh->addTriangle( x + current, (x + 1) + current, (x + 1) + next);
}
}
}
mesh->setCreaseAngle(grid->creaseAngle);
mesh->setSolid(grid->solid);
if(isNormalGenerationEnabled){
meshFilter.generateNormals(mesh, grid->creaseAngle);
}
if(grid->color){
const MFColor& orgColors = grid->color->color;
if(!orgColors.empty()){
if(!grid->colorPerVertex){
mesh->setColors(new SgColorArray());
SgColorArray& colors = *mesh->colors();
const int n = orgColors.size();
colors.reserve(n * 2);
for(int i=0; i < n; ++i){
const SFColor& c = orgColors[i];
for(int j=0; j < 6; ++j){
colors.push_back(c);
}
}
}
}
}
if(grid->texCoord){
const MFVec2s& point = grid->texCoord->point;
const int n = point.size();
mesh->setTexCoords(new SgTexCoordArray());
SgTexCoordArray& texCoords = *mesh->texCoords();
texCoords.resize(n);
for(int i=0; i < n; ++i){
texCoords[i] = point[i];
}
mesh->texCoordIndices() = mesh->triangleVertices();
}
mesh->updateBoundingBox();
return mesh;
}
void VRMLToSGConverterImpl::setDefaultTextureCoordinateForElevationGrid(const SgMeshPtr& mesh, const VRMLElevationGrid* grid)
{
float xmax = grid->xSpacing * (grid->xDimension - 1);
float zmax = grid->zSpacing * (grid->zDimension - 1);
mesh->setTexCoords(new SgTexCoordArray());
SgTexCoordArray& texCoords = *mesh->texCoords();
const SgVertexArray& vertices = *mesh->vertices();
for(size_t i=0; i < vertices.size(); ++i){
const Vector3f& v = vertices[i];
texCoords.push_back(Vector2f(v.x() / xmax, v.z() / zmax));
}
mesh->texCoordIndices() = mesh->triangleVertices();
}
SgMeshPtr VRMLToSGConverterImpl::createMeshFromExtrusion(VRMLExtrusion* extrusion)
{
bool isClosed = false;
const int numSpine = extrusion->spine.size();
const int numcross = extrusion->crossSection.size();
if(extrusion->spine[0][0] == extrusion->spine[numSpine - 1][0] &&
extrusion->spine[0][1] == extrusion->spine[numSpine - 1][1] &&
extrusion->spine[0][2] == extrusion->spine[numSpine - 1][2] ){
isClosed = true;
}
bool crossSectionisClosed = false;
if(extrusion->crossSection[0][0] == extrusion->crossSection[numcross - 1][0] &&
extrusion->crossSection[0][1] == extrusion->crossSection[numcross - 1][1] ){
crossSectionisClosed = true;
}
SgMeshPtr mesh = new SgMesh;
mesh->setVertices(new SgVertexArray());
SgVertexArray& vertices = *mesh->vertices();
vertices.reserve(numSpine*numcross);
SFVec3f preZaxis(SFVec3f::Zero());
int definedZaxis = -1;
std::vector<SFVec3f> Yaxisarray;
std::vector<SFVec3f> Zaxisarray;
if(numSpine > 2){
for(int i=0; i < numSpine; ++i){
SFVec3f Yaxis, Zaxis;
if(i == 0){
if(isClosed){
const SFVec3f& spine1 = extrusion->spine[numSpine - 2];
const SFVec3f& spine2 = extrusion->spine[0];
const SFVec3f& spine3 = extrusion->spine[1];
Yaxis = spine3 - spine1;
Zaxis = (spine3 - spine2).cross(spine1 - spine2);
} else {
const SFVec3f& spine1 = extrusion->spine[0];
const SFVec3f& spine2 = extrusion->spine[1];
const SFVec3f& spine3 = extrusion->spine[2];
Yaxis = spine2 - spine1;
Zaxis = (spine3 - spine2).cross(spine1 - spine2);
}
} else if(i == numSpine - 1){
if(isClosed){
const SFVec3f& spine1 = extrusion->spine[numSpine - 2];
const SFVec3f& spine2 = extrusion->spine[0];
const SFVec3f& spine3 = extrusion->spine[1];
Yaxis = spine3 - spine1;
Zaxis = (spine3 - spine2).cross(spine1 - spine2);
} else {
const SFVec3f& spine1 = extrusion->spine[numSpine - 3];
const SFVec3f& spine2 = extrusion->spine[numSpine - 2];
const SFVec3f& spine3 = extrusion->spine[numSpine - 1];
Yaxis = spine3 - spine2;
Zaxis = (spine3 - spine2).cross(spine1 - spine2);
}
} else {
const SFVec3f& spine1 = extrusion->spine[i - 1];
const SFVec3f& spine2 = extrusion->spine[i];
const SFVec3f& spine3 = extrusion->spine[i + 1];
Yaxis = spine3 - spine1;
Zaxis = (spine3-spine2).cross(spine1-spine2);
}
if(!Zaxis.norm()){
if(definedZaxis != -1)
Zaxis = preZaxis;
} else {
if(definedZaxis == -1){
definedZaxis = i;
}
preZaxis = Zaxis;
}
Yaxisarray.push_back(Yaxis);
Zaxisarray.push_back(Zaxis);
}
} else {
const SFVec3f Yaxis(extrusion->spine[1] - extrusion->spine[0]);
Yaxisarray.push_back(Yaxis);
Yaxisarray.push_back(Yaxis);
}
for(int i=0; i < numSpine; ++i){
Eigen::Matrix3d Scp;
SFVec3f y = Yaxisarray[i].normalized();
if(definedZaxis == -1){
SFRotation R(acos(y[1]), SFRotation::Vector3(y[2], 0.0, -y[0]));
Scp = R.toRotationMatrix();
} else {
if(i < definedZaxis){
Zaxisarray[i] = Zaxisarray[definedZaxis];
}
if(i && (Zaxisarray[i].dot(Zaxisarray[i - 1]) < 0.0)){
Zaxisarray[i] *= -1.0;
}
SFVec3f z = Zaxisarray[i].normalized();
SFVec3f x = y.cross(z);
Scp << x, y, z;
}
const SFVec3f& spine = extrusion->spine[i];
SFVec3f scale;
if(extrusion->scale.size() == 1){
scale << extrusion->scale[0][0], 0.0, extrusion->scale[0][1];
} else {
scale << extrusion->scale[i][0], 0.0, extrusion->scale[i][1];
}
SFRotation o;
if(extrusion->orientation.size() == 1){
o = extrusion->orientation[0];
} else {
o = extrusion->orientation[i];
}
for(int j=0; j < numcross; ++j){
const SFVec3f crossSection(extrusion->crossSection[j][0], 0.0, extrusion->crossSection[j][1]);
const SFVec3f v1(crossSection[0] * scale[0], 0.0, crossSection[2] * scale[2]);
vertices.push_back((Scp * o.toRotationMatrix() * v1 + spine).cast<float>());
}
}
for(int i=0; i < numSpine - 1 ; ++i){
const int upper = i * numcross;
const int lower = (i + 1) * numcross;
for(int j=0; j < numcross - 1; ++j) {
if(extrusion->ccw){
mesh->addTriangle(j + upper, j + lower, (j + 1) + lower);
mesh->addTriangle(j + upper, (j + 1) + lower, j + 1 + upper);
} else {
// upward convex triangle
mesh->addTriangle(j + upper, (j + 1) + lower, j + lower);
// downward convex triangle
mesh->addTriangle(j + upper, (j + 1) + upper, j + 1 + lower);
}
}
}
int j = 0;
if(crossSectionisClosed){
j = 1;
}
if(extrusion->beginCap && !isClosed){
triangulator.setVertices(vertices);
polygon.clear();
for(int i=0; i < numcross - j; ++i){
polygon.push_back(i);
}
triangulator.apply(polygon);
const vector<int>& triangles = triangulator.triangles();
for(size_t i=0; i < triangles.size(); i += 3){
if(extrusion->ccw){
mesh->addTriangle(polygon[triangles[i]], polygon[triangles[i+1]], polygon[triangles[i+2]]);
} else {
mesh->addTriangle(polygon[triangles[i]], polygon[triangles[i+2]], polygon[triangles[i+1]]);
}
}
}
if(extrusion->endCap && !isClosed){
triangulator.setVertices(vertices);
polygon.clear();
for(int i=0; i < numcross - j; ++i){
polygon.push_back(numcross * (numSpine - 1) + i);
}
triangulator.apply(polygon);
const vector<int>& triangles = triangulator.triangles();
for(size_t i=0; i < triangles.size(); i +=3){
if(extrusion->ccw){
mesh->addTriangle(polygon[triangles[i]], polygon[triangles[i+2]], polygon[triangles[i+1]]);
} else {
mesh->addTriangle(polygon[triangles[i]], polygon[triangles[i+1]], polygon[triangles[i+2]]);
}
}
}
mesh->setCreaseAngle(extrusion->creaseAngle);
mesh->setSolid(extrusion->solid);
if(isNormalGenerationEnabled){
meshFilter.generateNormals(mesh, extrusion->creaseAngle);
}
mesh->updateBoundingBox();
return mesh;
}
void VRMLToSGConverterImpl::setDefaultTextureCoordinateForExtrusion(const SgMeshPtr& mesh, const VRMLExtrusion* extrusion)
{
const int numSpine = extrusion->spine.size();
const int numcross = extrusion->crossSection.size();
mesh->setTexCoords(new SgTexCoordArray());
SgTexCoordArray& texCoords = *mesh->texCoords();
vector<double> s;
vector<double> t;
double slen = 0.0;
s.push_back(0.0);
for(size_t i=1; i < extrusion->crossSection.size(); ++i){
double x = extrusion->crossSection[i][0] - extrusion->crossSection[i-1][0];
double z = extrusion->crossSection[i][1] - extrusion->crossSection[i-1][1];
slen += sqrt(x*x + z*z);
s.push_back(slen);
}
double tlen = 0.0;
t.push_back(0.0);
for(size_t i=1; i < extrusion->spine.size(); ++i){
double x = extrusion->spine[i][0] - extrusion->spine[i-1][0];
double y = extrusion->spine[i][1] - extrusion->spine[i-1][1];
double z = extrusion->spine[i][2] - extrusion->spine[i-1][2];
tlen += sqrt(x*x + y*y + z*z);
t.push_back(tlen);
}
for(size_t i=0; i < extrusion->spine.size(); ++i){
Vector2f point;
point[1] = t[i] / tlen;
for(size_t j=0; j < extrusion->crossSection.size(); ++j){
point[0] = s[j] / slen;
texCoords.push_back(point);
}
}
SgIndexArray& texCoordIndices = mesh->texCoordIndices();
texCoordIndices.clear();
const int endOfSpineVertices = (numSpine - 1) * (numcross - 1) * 2 * 3;
texCoordIndices.resize(endOfSpineVertices);
const SgIndexArray& triangleVertices = mesh->triangleVertices();
copy(triangleVertices.begin(), triangleVertices.begin() + endOfSpineVertices, texCoordIndices.begin());
int endOfBeginCapVertices = endOfSpineVertices;
const int endOfSpineTexCoords = texCoords.size();
if(extrusion->beginCap){
if(extrusion->endCap){
endOfBeginCapVertices += (triangleVertices.size() - endOfSpineVertices) / 2;
} else {
endOfBeginCapVertices = triangleVertices.size();
}
double xmin, xmax;
double zmin, zmax;
xmin = xmax = extrusion->crossSection[0][0];
zmin = zmax = extrusion->crossSection[0][1];
for(size_t i=1; i < extrusion->crossSection.size(); ++i){
xmax = std::max(xmax, extrusion->crossSection[i][0]);
xmin = std::min(xmin, extrusion->crossSection[i][0]);
zmax = std::max(zmax, extrusion->crossSection[i][1]);
zmin = std::min(xmin, extrusion->crossSection[i][1]);
}
float xsize = xmax - xmin;
float zsize = zmax - zmin;
for(int i=0; i < numcross; ++i){
Vector2f point;
point[0] = (extrusion->crossSection[i][0] - xmin) / xsize;
point[1] = (extrusion->crossSection[i][1] - zmin) / zsize;
texCoords.push_back(point);
}
for(int i = endOfSpineVertices; i < endOfBeginCapVertices; ++i){
texCoordIndices.push_back(triangleVertices[i] + endOfSpineTexCoords);
}
}
if(extrusion->endCap){
double xmax, xmin;
double zmax, zmin;
xmin = xmax = extrusion->crossSection[0][0];
zmin = zmax = extrusion->crossSection[0][1];
for(size_t i=1; i < extrusion->crossSection.size(); ++i){
xmax = std::max(xmax, extrusion->crossSection[i][0]);
xmin = std::min(xmin, extrusion->crossSection[i][0]);
zmax = std::max(zmax, extrusion->crossSection[i][1]);
zmin = std::min(xmin, extrusion->crossSection[i][1]);
}
double xsize = xmax - xmin;
double zsize = zmax - zmin;
for(size_t i=0; i < extrusion->crossSection.size(); ++i){
Vector2f point;
point[0] = (extrusion->crossSection[i][0] - xmin) / xsize;
point[1] = (extrusion->crossSection[i][1] - zmin) / zsize;
texCoords.push_back(point);
}
const int offset = texCoords.size() - endOfSpineTexCoords;
for(size_t i = endOfBeginCapVertices; i < triangleVertices.size(); ++i){
texCoordIndices.push_back(triangleVertices[i] + offset);
}
}
}
SgMaterial* VRMLToSGConverterImpl::createMaterial(VRMLMaterial* vm)
{
SgMaterial* material = new SgMaterial;
material->setName(vm->defName);
material->setDiffuseColor(vm->diffuseColor);
material->setAmbientIntensity(vm->ambientIntensity);
material->setEmissiveColor(vm->emissiveColor);
material->setSpecularColor(vm->specularColor);
material->setSpecularExponent(
127.0f * std::max(0.0f, std::min((float)vm->shininess, 1.0f)) + 1.0f);
material->setTransparency(vm->transparency);
return material;
}
SgTextureTransform* VRMLToSGConverterImpl::createTextureTransform(VRMLTextureTransform* tt)
{
SgTextureTransform* textureTransform = new SgTextureTransform;
textureTransform->setName(tt->defName);
textureTransform->setCenter(tt->center);
textureTransform->setRotation(tt->rotation);
textureTransform->setScale(tt->scale);
textureTransform->setTranslation(tt->translation);
return textureTransform;
}
SgTexture* VRMLToSGConverterImpl::createTexture(VRMLTexture* vt)
{
SgTexture* texture = nullptr;
VRMLImageTexturePtr imageTextureNode = dynamic_node_cast<VRMLImageTexture>(vt);
if(imageTextureNode){
SgImagePtr image;
SgImagePtr imageForLoading;
const MFString& filepaths = imageTextureNode->filepath;
for(size_t i=0; i < filepaths.size(); ++i){
auto& filepath = filepaths[i];
if(!filepath.empty()){
auto p = imagePathToSgImageMap.find(filepath);
if(p != imagePathToSgImageMap.end()){
image = p->second;
break;
} else {
if(!imageForLoading){
imageForLoading = new SgImage;
}
if(imageIO.load(imageForLoading->image(), filepath, os())){
image = imageForLoading;
image->setUri(imageTextureNode->url[i], filepath);
imagePathToSgImageMap[filepath] = image;
break;
}
}
}
}
if(image){
texture = new SgTexture;
texture->setImage(image);
texture->setRepeat(imageTextureNode->repeatS, imageTextureNode->repeatT);
}
} else if(VRMLPixelTexturePtr pixelTextureNode = dynamic_node_cast<VRMLPixelTexture>(vt)){
const int width = pixelTextureNode->image.width;
const int height = pixelTextureNode->image.height;
const int nc = pixelTextureNode->image.numComponents;
if(width > 0 && height > 0 && nc > 0){
texture = new SgTexture;
SgImage* image = new SgImage;
image->setSize(width, height, nc);
// copy the pixels in the upside-down way
std::vector<unsigned char>& src = pixelTextureNode->image.pixels;
unsigned char* dest = image->pixels();
for(int i=0; i<height; ++i){
int ii = height - i - 1;
for(int j=0; j < width; ++j){
for(int l=0; l < nc; ++l)
dest[(i * width + j) * nc + l] = src[(ii * width +j) * nc + l];
}
}
texture->setImage(image);
texture->setRepeat(pixelTextureNode->repeatS, pixelTextureNode->repeatT);
}
} else {
putMessage("MovieTextureNode is not supported");
}
if(texture){
texture->setName(vt->defName);
}
return texture;
}
SgNode* VRMLToSGConverterImpl::convertLineSet(VRMLIndexedLineSet* vLineSet)
{
VRMLGeometryToSgPlotMap::iterator p = vrmlGeometryToSgPlotMap.find(vLineSet);
if(p != vrmlGeometryToSgPlotMap.end()){
return p->second.get();
}
if(!vLineSet->coord || vLineSet->coord->point.empty() || vLineSet->coordIndex.empty()){
return 0;
}
SgLineSet* lineSet = new SgLineSet;
lineSet->setVertices(new SgVertexArray(vLineSet->coord->point));
const bool hasColors = (vLineSet->color && !vLineSet->color->color.empty());
if(hasColors){
newColorPosToOrgColorPosMap.clear();
}
const bool colorPerVertex = vLineSet->colorPerVertex;
const MFInt32& coordIndex = vLineSet->coordIndex;
int topPosition = 0;
int polylineIndex = 0;
for(size_t i=0; i < coordIndex.size(); ++i){
const int index = coordIndex[i];
if(index < 0){
int n = i - topPosition;
if(n >= 2){
--n;
for(int j=0; j < n; ++j){
const int v1 = topPosition + j;
const int v2 = topPosition + j + 1;
lineSet->addLine(coordIndex[v1], coordIndex[v2]);
if(hasColors){
if(colorPerVertex){
newColorPosToOrgColorPosMap.push_back(v1);
newColorPosToOrgColorPosMap.push_back(v2);
} else {
newColorPosToOrgColorPosMap.push_back(polylineIndex);
newColorPosToOrgColorPosMap.push_back(polylineIndex);
}
}
}
}
topPosition = i + 1;
++polylineIndex;
}
}
if(hasColors){
lineSet->setColors(new SgColorArray(vLineSet->color->color));
const int numColors = lineSet->colors()->size();
const MFInt32& orgColorIndices = vLineSet->colorIndex;
const int numOrgColorIndices = orgColorIndices.size();
bool doWarning = false;
SgIndexArray& colorIndices = lineSet->colorIndices();
const SgIndexArray& vertexIndices = lineSet->lineVertexIndices();
for(size_t i=0; i < vertexIndices.size(); ++i){
int orgPos = newColorPosToOrgColorPosMap[i];
if(orgPos >= numOrgColorIndices){
orgPos = numOrgColorIndices - 1;
doWarning = true;
}
int index = orgColorIndices[orgPos];
if(index < 0){
index = 0;
doWarning = true;
} else if(index >= numColors){
index = numColors - 1;
doWarning = true;
}
colorIndices.push_back(index);
}
if(doWarning){
putMessage("Warning: The colorIndex elements do not correspond to the colors or the coordIndex elements in an IndexedLineSet node.");
}
}
lineSet->updateBoundingBox();
vrmlGeometryToSgPlotMap[vLineSet] = lineSet;
return lineSet;
}
SgNode* VRMLToSGConverterImpl::convertPointSet(VRMLPointSet* vPointSet)
{
VRMLGeometryToSgPlotMap::iterator p = vrmlGeometryToSgPlotMap.find(vPointSet);
if(p != vrmlGeometryToSgPlotMap.end()){
return p->second;
}
if(!vPointSet->coord || vPointSet->coord->point.empty()){
return nullptr;
}
SgPointSet* pointSet = new SgPointSet;
pointSet->setVertices(new SgVertexArray(vPointSet->coord->point));
pointSet->updateBoundingBox();
if(vPointSet->color && !vPointSet->color->color.empty()){
pointSet->setColors(new SgColorArray(vPointSet->color->color));
}
vrmlGeometryToSgPlotMap[vPointSet] = pointSet;
return pointSet;
}
SgNode* VRMLToSGConverterImpl::convertLightNode(VRMLLight* vlight)
{
if(VRMLPointLight* vPointLight = dynamic_cast<VRMLSpotLight*>(vlight)){
return createPointLight(vPointLight);
} else if(VRMLDirectionalLight* vDirectionalLight = dynamic_cast<VRMLDirectionalLight*>(vlight)){
return createDirectionalLight(vDirectionalLight);
}
return 0;
}
void VRMLToSGConverterImpl::setLightCommonProperties(SgLight* light, VRMLLight* vlight)
{
light->on(vlight->on);
light->setColor(vlight->color);
light->setIntensity(vlight->intensity);
light->setAmbientIntensity(vlight->ambientIntensity);
}
SgNode* VRMLToSGConverterImpl::createPointLight(VRMLPointLight* vlight)
{
SgPointLight* light;
if(VRMLSpotLight* vSpotLight = dynamic_cast<VRMLSpotLight*>(vlight)){
light = createSpotLight(vSpotLight);
} else {
light = new SgPointLight();
}
light->setConstantAttenuation(vlight->attenuation[0]);
light->setLinearAttenuation(vlight->attenuation[1]);
light->setQuadraticAttenuation(vlight->attenuation[2]);
setLightCommonProperties(light, vlight);
if(vlight->location == SFVec3f::Zero()){
return light;
} else {
SgPosTransform* transform = new SgPosTransform;
transform->setTranslation(vlight->location);
transform->addChild(light);
return transform;
}
}
SgSpotLight* VRMLToSGConverterImpl::createSpotLight(VRMLSpotLight* vlight)
{
SgSpotLight* light = new SgSpotLight();
light->setDirection(vlight->direction);
light->setBeamWidth(vlight->beamWidth);
light->setCutOffAngle(vlight->cutOffAngle);
return light;
}
SgDirectionalLight* VRMLToSGConverterImpl::createDirectionalLight(VRMLDirectionalLight* vlight)
{
SgDirectionalLight* light = new SgDirectionalLight();
light->setDirection(vlight->direction);
setLightCommonProperties(light, vlight);
return light;
}
SgNode* VRMLToSGConverterImpl::convertFogNode(VRMLFog* vfog)
{
SgFog* fog = new SgFog;
fog->setColor(vfog->color);
fog->setVisibilityRange(vfog->visibilityRange);
return fog;
}
SgNode* VRMLToSGConverterImpl::readNonVrmlInline(VRMLNonVrmlInline* nonVrmlInline)
{
if(!nonVrmlInline->url.empty()){
if(!sceneLoader){
sceneLoader.reset(new SceneLoader);
sceneLoader->setMessageSink(os());
}
return sceneLoader->load(nonVrmlInline->url);
}
return 0;
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Copyright (c) 2017-2017 The Iox developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/iox-config.h"
#endif
#include "net.h"
#include "init.h"
#include "addrman.h"
#include "chainparams.h"
#include "clientversion.h"
#include "miner.h"
#include "primitives/transaction.h"
#include "scheduler.h"
#include "ui_interface.h"
#ifdef ENABLE_WALLET
#include "wallet.h"
#endif
#ifdef WIN32
#include <string.h>
#else
#include <fcntl.h>
#endif
#ifdef USE_UPNP
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/miniwget.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
#endif
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
// Dump addresses to peers.dat every 15 minutes (900s)
#define DUMP_ADDRESSES_INTERVAL 900
#if !defined(HAVE_MSG_NOSIGNAL) && !defined(MSG_NOSIGNAL)
#define MSG_NOSIGNAL 0
#endif
// Fix for ancient MinGW versions, that don't have defined these in ws2tcpip.h.
// Todo: Can be removed when our pull-tester is upgraded to a modern MinGW version.
#ifdef WIN32
#ifndef PROTECTION_LEVEL_UNRESTRICTED
#define PROTECTION_LEVEL_UNRESTRICTED 10
#endif
#ifndef IPV6_PROTECTION_LEVEL
#define IPV6_PROTECTION_LEVEL 23
#endif
#endif
using namespace boost;
using namespace std;
namespace
{
const int MAX_OUTBOUND_CONNECTIONS = 16;
struct ListenSocket {
SOCKET socket;
bool whitelisted;
ListenSocket(SOCKET socket, bool whitelisted) : socket(socket), whitelisted(whitelisted) {}
};
}
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
uint64_t nLocalServices = NODE_NETWORK;
CCriticalSection cs_mapLocalHost;
map<CNetAddr, LocalServiceInfo> mapLocalHost;
static bool vfReachable[NET_MAX] = {};
static bool vfLimited[NET_MAX] = {};
static CNode* pnodeLocalHost = NULL;
uint64_t nLocalHostNonce = 0;
static std::vector<ListenSocket> vhListenSocket;
CAddrMan addrman;
int nMaxConnections = 125;
bool fAddressesInitialized = false;
vector<CNode*> vNodes;
CCriticalSection cs_vNodes;
map<CInv, CDataStream> mapRelay;
deque<pair<int64_t, CInv> > vRelayExpiration;
CCriticalSection cs_mapRelay;
limitedmap<CInv, int64_t> mapAlreadyAskedFor(MAX_INV_SZ);
static deque<string> vOneShots;
CCriticalSection cs_vOneShots;
set<CNetAddr> setservAddNodeAddresses;
CCriticalSection cs_setservAddNodeAddresses;
vector<std::string> vAddedNodes;
CCriticalSection cs_vAddedNodes;
NodeId nLastNodeId = 0;
CCriticalSection cs_nLastNodeId;
static CSemaphore* semOutbound = NULL;
boost::condition_variable messageHandlerCondition;
// Signals for message handling
static CNodeSignals g_signals;
CNodeSignals& GetNodeSignals() { return g_signals; }
void AddOneShot(string strDest)
{
LOCK(cs_vOneShots);
vOneShots.push_back(strDest);
}
unsigned short GetListenPort()
{
return (unsigned short)(GetArg("-port", Params().GetDefaultPort()));
}
// find 'best' local address for a particular peer
bool GetLocal(CService& addr, const CNetAddr* paddrPeer)
{
if (!fListen)
return false;
int nBestScore = -1;
int nBestReachability = -1;
{
LOCK(cs_mapLocalHost);
for (map<CNetAddr, LocalServiceInfo>::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++) {
int nScore = (*it).second.nScore;
int nReachability = (*it).first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) {
addr = CService((*it).first, (*it).second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
}
}
return nBestScore >= 0;
}
// get best local address for a particular peer as a CAddress
// Otherwise, return the unroutable 0.0.0.0 but filled in with
// the normal parameters, since the IP may be changed to a useful
// one by discovery.
CAddress GetLocalAddress(const CNetAddr* paddrPeer)
{
CAddress ret(CService("0.0.0.0", GetListenPort()), 0);
CService addr;
if (GetLocal(addr, paddrPeer)) {
ret = CAddress(addr);
}
ret.nServices = nLocalServices;
ret.nTime = GetAdjustedTime();
return ret;
}
bool RecvLine(SOCKET hSocket, string& strLine)
{
strLine = "";
while (true) {
char c;
int nBytes = recv(hSocket, &c, 1, 0);
if (nBytes > 0) {
if (c == '\n')
continue;
if (c == '\r')
return true;
strLine += c;
if (strLine.size() >= 9000)
return true;
} else if (nBytes <= 0) {
boost::this_thread::interruption_point();
if (nBytes < 0) {
int nErr = WSAGetLastError();
if (nErr == WSAEMSGSIZE)
continue;
if (nErr == WSAEWOULDBLOCK || nErr == WSAEINTR || nErr == WSAEINPROGRESS) {
MilliSleep(10);
continue;
}
}
if (!strLine.empty())
return true;
if (nBytes == 0) {
// socket closed
LogPrint("net", "socket closed\n");
return false;
} else {
// socket error
int nErr = WSAGetLastError();
LogPrint("net", "recv failed: %s\n", NetworkErrorString(nErr));
return false;
}
}
}
}
int GetnScore(const CService& addr)
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == LOCAL_NONE)
return 0;
return mapLocalHost[addr].nScore;
}
// Is our peer's addrLocal potentially useful as an external IP source?
bool IsPeerAddrLocalGood(CNode* pnode)
{
return fDiscover && pnode->addr.IsRoutable() && pnode->addrLocal.IsRoutable() &&
!IsLimited(pnode->addrLocal.GetNetwork());
}
// pushes our own address to a peer
void AdvertizeLocal(CNode* pnode)
{
if (fListen && pnode->fSuccessfullyConnected) {
CAddress addrLocal = GetLocalAddress(&pnode->addr);
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() ||
GetRand((GetnScore(addrLocal) > LOCAL_MANUAL) ? 8 : 2) == 0)) {
addrLocal.SetIP(pnode->addrLocal);
}
if (addrLocal.IsRoutable()) {
LogPrintf("AdvertizeLocal: advertizing address %s\n", addrLocal.ToString());
pnode->PushAddress(addrLocal);
}
}
}
void SetReachable(enum Network net, bool fFlag)
{
LOCK(cs_mapLocalHost);
vfReachable[net] = fFlag;
if (net == NET_IPV6 && fFlag)
vfReachable[NET_IPV4] = true;
}
// learn a new local address
bool AddLocal(const CService& addr, int nScore)
{
if (!addr.IsRoutable())
return false;
if (!fDiscover && nScore < LOCAL_MANUAL)
return false;
if (IsLimited(addr))
return false;
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
LOCK(cs_mapLocalHost);
bool fAlready = mapLocalHost.count(addr) > 0;
LocalServiceInfo& info = mapLocalHost[addr];
if (!fAlready || nScore >= info.nScore) {
info.nScore = nScore + (fAlready ? 1 : 0);
info.nPort = addr.GetPort();
}
SetReachable(addr.GetNetwork());
}
return true;
}
bool AddLocal(const CNetAddr& addr, int nScore)
{
return AddLocal(CService(addr, GetListenPort()), nScore);
}
bool RemoveLocal(const CService& addr)
{
LOCK(cs_mapLocalHost);
LogPrintf("RemoveLocal(%s)\n", addr.ToString());
mapLocalHost.erase(addr);
return true;
}
/** Make a particular network entirely off-limits (no automatic connects to it) */
void SetLimited(enum Network net, bool fLimited)
{
if (net == NET_UNROUTABLE)
return;
LOCK(cs_mapLocalHost);
vfLimited[net] = fLimited;
}
bool IsLimited(enum Network net)
{
LOCK(cs_mapLocalHost);
return vfLimited[net];
}
bool IsLimited(const CNetAddr& addr)
{
return IsLimited(addr.GetNetwork());
}
/** vote for a local address */
bool SeenLocal(const CService& addr)
{
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == 0)
return false;
mapLocalHost[addr].nScore++;
}
return true;
}
/** check whether a given address is potentially local */
bool IsLocal(const CService& addr)
{
LOCK(cs_mapLocalHost);
return mapLocalHost.count(addr) > 0;
}
/** check whether a given network is one we can probably connect to */
bool IsReachable(enum Network net)
{
LOCK(cs_mapLocalHost);
return vfReachable[net] && !vfLimited[net];
}
/** check whether a given address is in a network we can probably connect to */
bool IsReachable(const CNetAddr& addr)
{
enum Network net = addr.GetNetwork();
return IsReachable(net);
}
void AddressCurrentlyConnected(const CService& addr)
{
addrman.Connected(addr);
}
uint64_t CNode::nTotalBytesRecv = 0;
uint64_t CNode::nTotalBytesSent = 0;
CCriticalSection CNode::cs_totalBytesRecv;
CCriticalSection CNode::cs_totalBytesSent;
CNode* FindNode(const CNetAddr& ip)
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes)
if ((CNetAddr)pnode->addr == ip)
return (pnode);
return NULL;
}
CNode* FindNode(const CSubNet& subNet)
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes)
if (subNet.Match((CNetAddr)pnode->addr))
return (pnode);
return NULL;
}
CNode* FindNode(const std::string& addrName)
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes)
if (pnode->addrName == addrName)
return (pnode);
return NULL;
}
CNode* FindNode(const CService& addr)
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes) {
if (Params().NetworkID() == CBaseChainParams::REGTEST) {
//if using regtest, just check the IP
if ((CNetAddr)pnode->addr == (CNetAddr)addr)
return (pnode);
} else {
if (pnode->addr == addr)
return (pnode);
}
}
return NULL;
}
CNode* ConnectNode(CAddress addrConnect, const char* pszDest)
{
if (pszDest == NULL) {
// we clean masternode connections in CMasternodeMan::ProcessMasternodeConnections()
// so should be safe to skip this and connect to local Hot MN on CActiveMasternode::ManageStatus()
if (IsLocal(addrConnect))
return NULL;
// Look for an existing connection
CNode* pnode = FindNode((CService)addrConnect);
if (pnode) {
pnode->AddRef();
return pnode;
}
}
/// debug print
LogPrint("net", "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0);
// Connect
SOCKET hSocket;
bool proxyConnectionFailed = false;
if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, Params().GetDefaultPort(), nConnectTimeout, &proxyConnectionFailed) :
ConnectSocket(addrConnect, hSocket, nConnectTimeout, &proxyConnectionFailed)) {
if (!IsSelectableSocket(hSocket)) {
LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n");
CloseSocket(hSocket);
return NULL;
}
addrman.Attempt(addrConnect);
// Add node
CNode* pnode = new CNode(hSocket, addrConnect, pszDest ? pszDest : "", false);
pnode->AddRef();
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
pnode->nTimeConnected = GetTime();
return pnode;
} else if (!proxyConnectionFailed) {
// If connecting to the node failed, and failure is not caused by a problem connecting to
// the proxy, mark this as an attempt.
addrman.Attempt(addrConnect);
}
return NULL;
}
void CNode::CloseSocketDisconnect()
{
fDisconnect = true;
if (hSocket != INVALID_SOCKET) {
LogPrint("net", "disconnecting peer=%d\n", id);
CloseSocket(hSocket);
}
// in case this fails, we'll empty the recv buffer when the CNode is deleted
TRY_LOCK(cs_vRecvMsg, lockRecv);
if (lockRecv)
vRecvMsg.clear();
}
bool CNode::DisconnectOldProtocol(int nVersionRequired, string strLastCommand)
{
fDisconnect = false;
if (nVersion < nVersionRequired) {
LogPrintf("%s : peer=%d using obsolete version %i; disconnecting\n", __func__, id, nVersion);
PushMessage("reject", strLastCommand, REJECT_OBSOLETE, strprintf("Version must be %d or greater", ActiveProtocol()));
fDisconnect = true;
}
return fDisconnect;
}
void CNode::PushVersion()
{
int nBestHeight = g_signals.GetHeight().get_value_or(0);
/// when NTP implemented, change to just nTime = GetAdjustedTime()
int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0", 0)));
CAddress addrMe = GetLocalAddress(&addr);
GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
if (fLogIPs)
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), addrYou.ToString(), id);
else
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), id);
PushMessage("version", PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe,
nLocalHostNonce, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<string>()), nBestHeight, true);
}
banmap_t CNode::setBanned;
CCriticalSection CNode::cs_setBanned;
bool CNode::setBannedIsDirty;
void CNode::ClearBanned()
{
LOCK(cs_setBanned);
setBanned.clear();
setBannedIsDirty = true;
}
bool CNode::IsBanned(CNetAddr ip)
{
bool fResult = false;
{
LOCK(cs_setBanned);
for (banmap_t::iterator it = setBanned.begin(); it != setBanned.end(); it++)
{
CSubNet subNet = (*it).first;
CBanEntry banEntry = (*it).second;
if(subNet.Match(ip) && GetTime() < banEntry.nBanUntil)
fResult = true;
}
}
return fResult;
}
bool CNode::IsBanned(CSubNet subnet)
{
bool fResult = false;
{
LOCK(cs_setBanned);
banmap_t::iterator i = setBanned.find(subnet);
if (i != setBanned.end()) {
CBanEntry banEntry = (*i).second;
if (GetTime() < banEntry.nBanUntil)
fResult = true;
}
}
return fResult;
}
void CNode::Ban(const CNetAddr& addr, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch)
{
CSubNet subNet(addr);
Ban(subNet, banReason, bantimeoffset, sinceUnixEpoch);
}
void CNode::Ban(const CSubNet& subNet, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch)
{
CBanEntry banEntry(GetTime());
banEntry.banReason = banReason;
if (bantimeoffset <= 0)
{
bantimeoffset = GetArg("-bantime", 60*60*24); // Default 24-hour ban
sinceUnixEpoch = false;
}
banEntry.nBanUntil = (sinceUnixEpoch ? 0 : GetTime() )+bantimeoffset;
LOCK(cs_setBanned);
if (setBanned[subNet].nBanUntil < banEntry.nBanUntil)
setBanned[subNet] = banEntry;
setBannedIsDirty = true;
}
bool CNode::Unban(const CNetAddr &addr)
{
CSubNet subNet(addr);
return Unban(subNet);
}
bool CNode::Unban(const CSubNet &subNet)
{
LOCK(cs_setBanned);
if (setBanned.erase(subNet))
{
setBannedIsDirty = true;
return true;
}
return false;
}
void CNode::GetBanned(banmap_t &banMap)
{
LOCK(cs_setBanned);
banMap = setBanned; //create a thread safe copy
}
void CNode::SetBanned(const banmap_t &banMap)
{
LOCK(cs_setBanned);
setBanned = banMap;
setBannedIsDirty = true;
}
void CNode::SweepBanned()
{
int64_t now = GetTime();
LOCK(cs_setBanned);
banmap_t::iterator it = setBanned.begin();
while(it != setBanned.end())
{
CBanEntry banEntry = (*it).second;
if(now > banEntry.nBanUntil)
{
setBanned.erase(it++);
setBannedIsDirty = true;
}
else
++it;
}
}
bool CNode::BannedSetIsDirty()
{
LOCK(cs_setBanned);
return setBannedIsDirty;
}
void CNode::SetBannedSetDirty(bool dirty)
{
LOCK(cs_setBanned); //reuse setBanned lock for the isDirty flag
setBannedIsDirty = dirty;
}
std::vector<CSubNet> CNode::vWhitelistedRange;
CCriticalSection CNode::cs_vWhitelistedRange;
bool CNode::IsWhitelistedRange(const CNetAddr& addr)
{
LOCK(cs_vWhitelistedRange);
BOOST_FOREACH (const CSubNet& subnet, vWhitelistedRange) {
if (subnet.Match(addr))
return true;
}
return false;
}
void CNode::AddWhitelistedRange(const CSubNet& subnet)
{
LOCK(cs_vWhitelistedRange);
vWhitelistedRange.push_back(subnet);
}
#undef X
#define X(name) stats.name = name
void CNode::copyStats(CNodeStats& stats)
{
stats.nodeid = this->GetId();
X(nServices);
X(nLastSend);
X(nLastRecv);
X(nTimeConnected);
X(addrName);
X(nVersion);
X(cleanSubVer);
X(fInbound);
X(nStartingHeight);
X(nSendBytes);
X(nRecvBytes);
X(fWhitelisted);
// It is common for nodes with good ping times to suddenly become lagged,
// due to a new block arriving or other large transfer.
// Merely reporting pingtime might fool the caller into thinking the node was still responsive,
// since pingtime does not update until the ping is complete, which might take a while.
// So, if a ping is taking an unusually long time in flight,
// the caller can immediately detect that this is happening.
int64_t nPingUsecWait = 0;
if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) {
nPingUsecWait = GetTimeMicros() - nPingUsecStart;
}
// Raw ping time is in microseconds, but show it to user as whole seconds (Iox users should be well used to small numbers with many decimal places by now :)
stats.dPingTime = (((double)nPingUsecTime) / 1e6);
stats.dPingWait = (((double)nPingUsecWait) / 1e6);
// Leave string empty if addrLocal invalid (not filled in yet)
stats.addrLocal = addrLocal.IsValid() ? addrLocal.ToString() : "";
}
#undef X
// requires LOCK(cs_vRecvMsg)
bool CNode::ReceiveMsgBytes(const char* pch, unsigned int nBytes)
{
while (nBytes > 0) {
// get current incomplete message, or create a new one
if (vRecvMsg.empty() ||
vRecvMsg.back().complete())
vRecvMsg.push_back(CNetMessage(SER_NETWORK, nRecvVersion));
CNetMessage& msg = vRecvMsg.back();
// absorb network data
int handled;
if (!msg.in_data)
handled = msg.readHeader(pch, nBytes);
else
handled = msg.readData(pch, nBytes);
if (handled < 0)
return false;
if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
LogPrint("net", "Oversized message from peer=%i, disconnecting", GetId());
return false;
}
pch += handled;
nBytes -= handled;
if (msg.complete()) {
msg.nTime = GetTimeMicros();
messageHandlerCondition.notify_one();
}
}
return true;
}
int CNetMessage::readHeader(const char* pch, unsigned int nBytes)
{
// copy data to temporary parsing buffer
unsigned int nRemaining = 24 - nHdrPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
memcpy(&hdrbuf[nHdrPos], pch, nCopy);
nHdrPos += nCopy;
// if header incomplete, exit
if (nHdrPos < 24)
return nCopy;
// deserialize to CMessageHeader
try {
hdrbuf >> hdr;
} catch (const std::exception&) {
return -1;
}
// reject messages larger than MAX_SIZE
if (hdr.nMessageSize > MAX_SIZE)
return -1;
// switch state to reading message data
in_data = true;
return nCopy;
}
int CNetMessage::readData(const char* pch, unsigned int nBytes)
{
unsigned int nRemaining = hdr.nMessageSize - nDataPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
if (vRecv.size() < nDataPos + nCopy) {
// Allocate up to 256 KiB ahead, but never more than the total message size.
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
return nCopy;
}
// requires LOCK(cs_vSend)
void SocketSendData(CNode* pnode)
{
std::deque<CSerializeData>::iterator it = pnode->vSendMsg.begin();
while (it != pnode->vSendMsg.end()) {
const CSerializeData& data = *it;
assert(data.size() > pnode->nSendOffset);
int nBytes = send(pnode->hSocket, &data[pnode->nSendOffset], data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
if (nBytes > 0) {
pnode->nLastSend = GetTime();
pnode->nSendBytes += nBytes;
pnode->nSendOffset += nBytes;
pnode->RecordBytesSent(nBytes);
if (pnode->nSendOffset == data.size()) {
pnode->nSendOffset = 0;
pnode->nSendSize -= data.size();
it++;
} else {
// could not send full message; stop sending more
break;
}
} else {
if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
LogPrintf("socket send error %s\n", NetworkErrorString(nErr));
pnode->CloseSocketDisconnect();
}
}
// couldn't send anything at all
break;
}
}
if (it == pnode->vSendMsg.end()) {
assert(pnode->nSendOffset == 0);
assert(pnode->nSendSize == 0);
}
pnode->vSendMsg.erase(pnode->vSendMsg.begin(), it);
}
static list<CNode*> vNodesDisconnected;
void ThreadSocketHandler()
{
unsigned int nPrevNodeCount = 0;
while (true) {
//
// Disconnect nodes
//
{
LOCK(cs_vNodes);
// Disconnect unused nodes
vector<CNode*> vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
if (pnode->fDisconnect ||
(pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0 && pnode->ssSend.empty())) {
// remove from vNodes
vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end());
// release outbound grant (if any)
pnode->grantOutbound.Release();
// close socket and cleanup
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
if (pnode->fNetworkNode || pnode->fInbound)
pnode->Release();
vNodesDisconnected.push_back(pnode);
}
}
}
{
// Delete disconnected nodes
list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected;
BOOST_FOREACH (CNode* pnode, vNodesDisconnectedCopy) {
// wait until threads are done using it
if (pnode->GetRefCount() <= 0) {
bool fDelete = false;
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend) {
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
TRY_LOCK(pnode->cs_inventory, lockInv);
if (lockInv)
fDelete = true;
}
}
}
if (fDelete) {
vNodesDisconnected.remove(pnode);
delete pnode;
}
}
}
}
size_t vNodesSize;
{
LOCK(cs_vNodes);
vNodesSize = vNodes.size();
}
if(vNodesSize != nPrevNodeCount) {
nPrevNodeCount = vNodesSize;
uiInterface.NotifyNumConnectionsChanged(nPrevNodeCount);
}
//
// Find which sockets have data to receive
//
struct timeval timeout;
timeout.tv_sec = 0;
timeout.tv_usec = 50000; // frequency to poll pnode->vSend
fd_set fdsetRecv;
fd_set fdsetSend;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
SOCKET hSocketMax = 0;
bool have_fds = false;
BOOST_FOREACH (const ListenSocket& hListenSocket, vhListenSocket) {
FD_SET(hListenSocket.socket, &fdsetRecv);
hSocketMax = max(hSocketMax, hListenSocket.socket);
have_fds = true;
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (pnode->hSocket == INVALID_SOCKET)
continue;
FD_SET(pnode->hSocket, &fdsetError);
hSocketMax = max(hSocketMax, pnode->hSocket);
have_fds = true;
// Implement the following logic:
// * If there is data to send, select() for sending data. As this only
// happens when optimistic write failed, we choose to first drain the
// write buffer in this case before receiving more. This avoids
// needlessly queueing received data, if the remote peer is not themselves
// receiving data. This means properly utilizing TCP flow control signalling.
// * Otherwise, if there is no (complete) message in the receive buffer,
// or there is space left in the buffer, select() for receiving data.
// * (if neither of the above applies, there is certainly one message
// in the receiver buffer ready to be processed).
// Together, that means that at least one of the following is always possible,
// so we don't deadlock:
// * We send some data.
// * We wait for data to be received (and disconnect after timeout).
// * We process a message in the buffer (message handler thread).
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend && !pnode->vSendMsg.empty()) {
FD_SET(pnode->hSocket, &fdsetSend);
continue;
}
}
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv && (pnode->vRecvMsg.empty() || !pnode->vRecvMsg.front().complete() ||
pnode->GetTotalRecvSize() <= ReceiveFloodSize()))
FD_SET(pnode->hSocket, &fdsetRecv);
}
}
}
int nSelect = select(have_fds ? hSocketMax + 1 : 0,
&fdsetRecv, &fdsetSend, &fdsetError, &timeout);
boost::this_thread::interruption_point();
if (nSelect == SOCKET_ERROR) {
if (have_fds) {
int nErr = WSAGetLastError();
LogPrintf("socket select error %s\n", NetworkErrorString(nErr));
for (unsigned int i = 0; i <= hSocketMax; i++)
FD_SET(i, &fdsetRecv);
}
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
MilliSleep(timeout.tv_usec / 1000);
}
//
// Accept new connections
//
BOOST_FOREACH (const ListenSocket& hListenSocket, vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET && FD_ISSET(hListenSocket.socket, &fdsetRecv)) {
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr*)&sockaddr, &len);
CAddress addr;
int nInbound = 0;
if (hSocket != INVALID_SOCKET)
if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr))
LogPrintf("Warning: Unknown socket family\n");
bool whitelisted = hListenSocket.whitelisted || CNode::IsWhitelistedRange(addr);
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
if (pnode->fInbound)
nInbound++;
}
if (hSocket == INVALID_SOCKET) {
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK)
LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr));
} else if (!IsSelectableSocket(hSocket)) {
LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString());
CloseSocket(hSocket);
} else if (nInbound >= nMaxConnections - MAX_OUTBOUND_CONNECTIONS) {
LogPrint("net", "connection from %s dropped (full)\n", addr.ToString());
CloseSocket(hSocket);
} else if (CNode::IsBanned(addr) && !whitelisted) {
LogPrintf("connection from %s dropped (banned)\n", addr.ToString());
CloseSocket(hSocket);
} else {
CNode* pnode = new CNode(hSocket, addr, "", true);
pnode->AddRef();
pnode->fWhitelisted = whitelisted;
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
}
}
}
//
// Service each socket
//
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->AddRef();
}
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
boost::this_thread::interruption_point();
//
// Receive
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetRecv) || FD_ISSET(pnode->hSocket, &fdsetError)) {
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
{
// typical socket buffer is 8K-64K
char pchBuf[0x10000];
int nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT);
if (nBytes > 0) {
if (!pnode->ReceiveMsgBytes(pchBuf, nBytes))
pnode->CloseSocketDisconnect();
pnode->nLastRecv = GetTime();
pnode->nRecvBytes += nBytes;
pnode->RecordBytesRecv(nBytes);
} else if (nBytes == 0) {
// socket closed gracefully
if (!pnode->fDisconnect)
LogPrint("net", "socket closed\n");
pnode->CloseSocketDisconnect();
} else if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
if (!pnode->fDisconnect)
LogPrintf("socket recv error %s\n", NetworkErrorString(nErr));
pnode->CloseSocketDisconnect();
}
}
}
}
}
//
// Send
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetSend)) {
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
SocketSendData(pnode);
}
//
// Inactivity checking
//
int64_t nTime = GetTime();
if (nTime - pnode->nTimeConnected > 60) {
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) {
LogPrint("net", "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
pnode->fDisconnect = true;
} else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) {
LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend);
pnode->fDisconnect = true;
} else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) {
LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv);
pnode->fDisconnect = true;
} else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) {
LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart));
pnode->fDisconnect = true;
}
}
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->Release();
}
}
}
#ifdef USE_UPNP
void ThreadMapPort()
{
std::string port = strprintf("%u", GetListenPort());
const char* multicastif = 0;
const char* minissdpdpath = 0;
struct UPNPDev* devlist = 0;
char lanaddr[64];
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0);
#elif MINIUPNPC_API_VERSION < 14
/* miniupnpc 1.6 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
#else
/* miniupnpc 1.9.20150730 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
#endif
struct UPNPUrls urls;
struct IGDdatas data;
int r;
r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr));
if (r == 1) {
if (fDiscover) {
char externalIPAddress[40];
r = UPNP_GetExternalIPAddress(urls.controlURL, data.first.servicetype, externalIPAddress);
if (r != UPNPCOMMAND_SUCCESS)
LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r);
else {
if (externalIPAddress[0]) {
LogPrintf("UPnP: ExternalIPAddress = %s\n", externalIPAddress);
AddLocal(CNetAddr(externalIPAddress), LOCAL_UPNP);
} else
LogPrintf("UPnP: GetExternalIPAddress failed.\n");
}
}
string strDesc = "Iox " + FormatFullVersion();
try {
while (true) {
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0);
#else
/* miniupnpc 1.6 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0");
#endif
if (r != UPNPCOMMAND_SUCCESS)
LogPrintf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n",
port, port, lanaddr, r, strupnperror(r));
else
LogPrintf("UPnP Port Mapping successful.\n");
;
MilliSleep(20 * 60 * 1000); // Refresh every 20 minutes
}
} catch (boost::thread_interrupted) {
r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0);
LogPrintf("UPNP_DeletePortMapping() returned : %d\n", r);
freeUPNPDevlist(devlist);
devlist = 0;
FreeUPNPUrls(&urls);
throw;
}
} else {
LogPrintf("No valid UPnP IGDs found\n");
freeUPNPDevlist(devlist);
devlist = 0;
if (r != 0)
FreeUPNPUrls(&urls);
}
}
void MapPort(bool fUseUPnP)
{
static boost::thread* upnp_thread = NULL;
if (fUseUPnP) {
if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
}
upnp_thread = new boost::thread(boost::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort));
} else if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
upnp_thread = NULL;
}
}
#else
void MapPort(bool)
{
// Intentionally left blank.
}
#endif
void ThreadDNSAddressSeed()
{
// goal: only query DNS seeds if address need is acute
if ((addrman.size() > 0) &&
(!GetBoolArg("-forcednsseed", false))) {
MilliSleep(11 * 1000);
LOCK(cs_vNodes);
if (vNodes.size() >= 2) {
LogPrintf("P2P peers available. Skipped DNS seeding.\n");
return;
}
}
const vector<CDNSSeedData>& vSeeds = Params().DNSSeeds();
int found = 0;
LogPrintf("Loading addresses from DNS seeds (could take a while)\n");
BOOST_FOREACH (const CDNSSeedData& seed, vSeeds) {
if (HaveNameProxy()) {
AddOneShot(seed.host);
} else {
vector<CNetAddr> vIPs;
vector<CAddress> vAdd;
if (LookupHost(seed.host.c_str(), vIPs)) {
BOOST_FOREACH (CNetAddr& ip, vIPs) {
int nOneDay = 24 * 3600;
CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()));
addr.nTime = GetTime() - 3 * nOneDay - GetRand(4 * nOneDay); // use a random age between 3 and 7 days old
vAdd.push_back(addr);
found++;
}
}
addrman.Add(vAdd, CNetAddr(seed.name, true));
}
}
LogPrintf("%d addresses found from DNS seeds\n", found);
}
void DumpAddresses()
{
int64_t nStart = GetTimeMillis();
CAddrDB adb;
adb.Write(addrman);
LogPrint("net", "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
void DumpData()
{
DumpAddresses();
if (CNode::BannedSetIsDirty())
{
DumpBanlist();
CNode::SetBannedSetDirty(false);
}
}
void static ProcessOneShot()
{
string strDest;
{
LOCK(cs_vOneShots);
if (vOneShots.empty())
return;
strDest = vOneShots.front();
vOneShots.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
if (!OpenNetworkConnection(addr, &grant, strDest.c_str(), true))
AddOneShot(strDest);
}
}
void ThreadOpenConnections()
{
// Connect to specific addresses
if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) {
for (int64_t nLoop = 0;; nLoop++) {
ProcessOneShot();
BOOST_FOREACH (string strAddr, mapMultiArgs["-connect"]) {
CAddress addr;
OpenNetworkConnection(addr, NULL, strAddr.c_str());
for (int i = 0; i < 10 && i < nLoop; i++) {
MilliSleep(500);
}
}
MilliSleep(500);
}
}
// Initiate network connections
int64_t nStart = GetTime();
while (true) {
ProcessOneShot();
MilliSleep(500);
CSemaphoreGrant grant(*semOutbound);
boost::this_thread::interruption_point();
// Add seed nodes if DNS seeds are all down (an infrastructure attack?).
if (addrman.size() == 0 && (GetTime() - nStart > 60)) {
static bool done = false;
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
addrman.Add(Params().FixedSeeds(), CNetAddr("127.0.0.1"));
done = true;
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
// Do this here so we don't have to critsect vNodes inside mapAddresses critsect.
int nOutbound = 0;
set<vector<unsigned char> > setConnected;
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!pnode->fInbound) {
setConnected.insert(pnode->addr.GetGroup());
nOutbound++;
}
}
}
int64_t nANow = GetAdjustedTime();
int nTries = 0;
while (true) {
CAddress addr = addrman.Select();
// if we selected an invalid address, restart
if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr))
break;
// If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
// stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman addresses.
nTries++;
if (nTries > 100)
break;
if (IsLimited(addr))
continue;
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30)
continue;
// do not allow non-default ports, unless after 50 invalid addresses selected already
if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50)
continue;
addrConnect = addr;
break;
}
if (addrConnect.IsValid())
OpenNetworkConnection(addrConnect, &grant);
}
}
void ThreadOpenAddedConnections()
{
{
LOCK(cs_vAddedNodes);
vAddedNodes = mapMultiArgs["-addnode"];
}
if (HaveNameProxy()) {
while (true) {
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH (string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
BOOST_FOREACH (string& strAddNode, lAddresses) {
CAddress addr;
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(addr, &grant, strAddNode.c_str());
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
for (unsigned int i = 0; true; i++) {
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH (string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
list<vector<CService> > lservAddressesToAdd(0);
BOOST_FOREACH (string& strAddNode, lAddresses) {
vector<CService> vservNode(0);
if (Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0)) {
lservAddressesToAdd.push_back(vservNode);
{
LOCK(cs_setservAddNodeAddresses);
BOOST_FOREACH (CService& serv, vservNode)
setservAddNodeAddresses.insert(serv);
}
}
}
// Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry
// (keeping in mind that addnode entries can have many IPs if fNameLookup)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
for (list<vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++)
BOOST_FOREACH (CService& addrNode, *(it))
if (pnode->addr == addrNode) {
it = lservAddressesToAdd.erase(it);
it--;
break;
}
}
BOOST_FOREACH (vector<CService>& vserv, lservAddressesToAdd) {
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), &grant);
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
// if successful, this moves the passed grant to the constructed node
bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant* grantOutbound, const char* pszDest, bool fOneShot)
{
//
// Initiate outbound network connection
//
boost::this_thread::interruption_point();
if (!pszDest) {
if (IsLocal(addrConnect) ||
FindNode((CNetAddr)addrConnect) || CNode::IsBanned(addrConnect) ||
FindNode(addrConnect.ToStringIPPort()))
return false;
} else if (FindNode(pszDest))
return false;
CNode* pnode = ConnectNode(addrConnect, pszDest);
boost::this_thread::interruption_point();
if (!pnode)
return false;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
pnode->fNetworkNode = true;
if (fOneShot)
pnode->fOneShot = true;
return true;
}
void ThreadMessageHandler()
{
boost::mutex condition_mutex;
boost::unique_lock<boost::mutex> lock(condition_mutex);
SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL);
while (true) {
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
pnode->AddRef();
}
}
// Poll the connected nodes for messages
CNode* pnodeTrickle = NULL;
if (!vNodesCopy.empty())
pnodeTrickle = vNodesCopy[GetRand(vNodesCopy.size())];
bool fSleep = true;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
if (pnode->fDisconnect)
continue;
// Receive messages
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
if (!g_signals.ProcessMessages(pnode))
pnode->CloseSocketDisconnect();
if (pnode->nSendSize < SendBufferSize()) {
if (!pnode->vRecvGetData.empty() || (!pnode->vRecvMsg.empty() && pnode->vRecvMsg[0].complete())) {
fSleep = false;
}
}
}
}
boost::this_thread::interruption_point();
// Send messages
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
g_signals.SendMessages(pnode, pnode == pnodeTrickle || pnode->fWhitelisted);
}
boost::this_thread::interruption_point();
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->Release();
}
if (fSleep)
messageHandlerCondition.timed_wait(lock, boost::posix_time::microsec_clock::universal_time() + boost::posix_time::milliseconds(100));
}
}
// ppcoin: stake minter thread
void static ThreadStakeMinter()
{
boost::this_thread::interruption_point();
LogPrintf("ThreadStakeMinter started\n");
CWallet* pwallet = pwalletMain;
try {
BitcoinMiner(pwallet, true);
boost::this_thread::interruption_point();
} catch (std::exception& e) {
LogPrintf("ThreadStakeMinter() exception \n");
} catch (...) {
LogPrintf("ThreadStakeMinter() error \n");
}
LogPrintf("ThreadStakeMinter exiting,\n");
}
bool BindListenPort(const CService& addrBind, string& strError, bool fWhitelisted)
{
strError = "";
int nOne = 1;
// Create socket for listening for incoming connections
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
strError = strprintf("Error: Bind address family for %s not supported", addrBind.ToString());
LogPrintf("%s\n", strError);
return false;
}
SOCKET hListenSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (hListenSocket == INVALID_SOCKET) {
strError = strprintf("Error: Couldn't open socket for incoming connections (socket returned error %s)", NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
return false;
}
if (!IsSelectableSocket(hListenSocket)) {
strError = "Error: Couldn't create a listenable socket for incoming connections";
LogPrintf("%s\n", strError);
return false;
}
#ifndef WIN32
#ifdef SO_NOSIGPIPE
// Different way of disabling SIGPIPE on BSD
setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&nOne, sizeof(int));
#endif
// Allow binding if the port is still in TIME_WAIT state after
// the program was closed and restarted. Not an issue on windows!
setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void*)&nOne, sizeof(int));
#endif
// Set to non-blocking, incoming connections will also inherit this
if (!SetSocketNonBlocking(hListenSocket, true)) {
strError = strprintf("BindListenPort: Setting listening socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
return false;
}
// some systems don't have IPV6_V6ONLY but are always v6only; others do have the option
// and enable it by default or not. Try to enable it, if possible.
if (addrBind.IsIPv6()) {
#ifdef IPV6_V6ONLY
#ifdef WIN32
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&nOne, sizeof(int));
#else
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&nOne, sizeof(int));
#endif
#endif
#ifdef WIN32
int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED;
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int));
#endif
}
if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE)
strError = strprintf(_("Unable to bind to %s on this computer. Iox Core is probably already running."), addrBind.ToString());
else
strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr));
LogPrintf("%s\n", strError);
CloseSocket(hListenSocket);
return false;
}
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) {
strError = strprintf(_("Error: Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
CloseSocket(hListenSocket);
return false;
}
vhListenSocket.push_back(ListenSocket(hListenSocket, fWhitelisted));
if (addrBind.IsRoutable() && fDiscover && !fWhitelisted)
AddLocal(addrBind, LOCAL_BIND);
return true;
}
void static Discover(boost::thread_group& threadGroup)
{
if (!fDiscover)
return;
#ifdef WIN32
// Get local host IP
char pszHostName[256] = "";
if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) {
vector<CNetAddr> vaddr;
if (LookupHost(pszHostName, vaddr)) {
BOOST_FOREACH (const CNetAddr& addr, vaddr) {
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString());
}
}
}
#else
// Get local host ip
struct ifaddrs* myaddrs;
if (getifaddrs(&myaddrs) == 0) {
for (struct ifaddrs* ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next) {
if (ifa->ifa_addr == NULL) continue;
if ((ifa->ifa_flags & IFF_UP) == 0) continue;
if (strcmp(ifa->ifa_name, "lo") == 0) continue;
if (strcmp(ifa->ifa_name, "lo0") == 0) continue;
if (ifa->ifa_addr->sa_family == AF_INET) {
struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
} else if (ifa->ifa_addr->sa_family == AF_INET6) {
struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
}
}
freeifaddrs(myaddrs);
}
#endif
}
void StartNode(boost::thread_group& threadGroup, CScheduler& scheduler)
{
uiInterface.InitMessage(_("Loading addresses..."));
// Load addresses for peers.dat
int64_t nStart = GetTimeMillis();
{
CAddrDB adb;
if (!adb.Read(addrman))
LogPrintf("Invalid or missing peers.dat; recreating\n");
}
//try to read stored banlist
CBanDB bandb;
banmap_t banmap;
if (!bandb.Read(banmap))
LogPrintf("Invalid or missing banlist.dat; recreating\n");
CNode::SetBanned(banmap); //thread save setter
CNode::SetBannedSetDirty(false); //no need to write down just read or nonexistent data
CNode::SweepBanned(); //sweap out unused entries
LogPrintf("Loaded %i addresses from peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
fAddressesInitialized = true;
if (semOutbound == NULL) {
// initialize semaphore
int nMaxOutbound = min(MAX_OUTBOUND_CONNECTIONS, nMaxConnections);
semOutbound = new CSemaphore(nMaxOutbound);
}
if (pnodeLocalHost == NULL)
pnodeLocalHost = new CNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), nLocalServices));
Discover(threadGroup);
//
// Start threads
//
if (!GetBoolArg("-dnsseed", true))
LogPrintf("DNS seeding disabled\n");
else
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "dnsseed", &ThreadDNSAddressSeed));
// Map ports with UPnP
MapPort(GetBoolArg("-upnp", DEFAULT_UPNP));
// Send and receive from sockets, accept connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "net", &ThreadSocketHandler));
// Initiate outbound connections from -addnode
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "addcon", &ThreadOpenAddedConnections));
// Initiate outbound connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "opencon", &ThreadOpenConnections));
// Process messages
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "msghand", &ThreadMessageHandler));
// Dump network addresses
scheduler.scheduleEvery(&DumpData, DUMP_ADDRESSES_INTERVAL);
// ppcoin:mint proof-of-stake blocks in the background
if (GetBoolArg("-staking", true))
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "stakemint", &ThreadStakeMinter));
}
bool StopNode()
{
LogPrintf("StopNode()\n");
MapPort(false);
if (semOutbound)
for (int i = 0; i < MAX_OUTBOUND_CONNECTIONS; i++)
semOutbound->post();
if (fAddressesInitialized) {
DumpData();
fAddressesInitialized = false;
}
return true;
}
class CNetCleanup
{
public:
CNetCleanup() {}
~CNetCleanup()
{
// Close sockets
BOOST_FOREACH (CNode* pnode, vNodes)
if (pnode->hSocket != INVALID_SOCKET)
CloseSocket(pnode->hSocket);
BOOST_FOREACH (ListenSocket& hListenSocket, vhListenSocket)
if (hListenSocket.socket != INVALID_SOCKET)
if (!CloseSocket(hListenSocket.socket))
LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError()));
// clean up some globals (to help leak detection)
BOOST_FOREACH (CNode* pnode, vNodes)
delete pnode;
BOOST_FOREACH (CNode* pnode, vNodesDisconnected)
delete pnode;
vNodes.clear();
vNodesDisconnected.clear();
vhListenSocket.clear();
delete semOutbound;
semOutbound = NULL;
delete pnodeLocalHost;
pnodeLocalHost = NULL;
#ifdef WIN32
// Shutdown Windows Sockets
WSACleanup();
#endif
}
} instance_of_cnetcleanup;
void CExplicitNetCleanup::callCleanup()
{
// Explicit call to destructor of CNetCleanup because it's not implicitly called
// when the wallet is restarted from within the wallet itself.
CNetCleanup* tmp = new CNetCleanup();
delete tmp; // Stroustrup's gonna kill me for that
}
void RelayTransaction(const CTransaction& tx)
{
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(10000);
ss << tx;
RelayTransaction(tx, ss);
}
void RelayTransaction(const CTransaction& tx, const CDataStream& ss)
{
CInv inv(MSG_TX, tx.GetHash());
{
LOCK(cs_mapRelay);
// Expire old relay messages
while (!vRelayExpiration.empty() && vRelayExpiration.front().first < GetTime()) {
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
}
// Save original serialized message so newer versions are preserved
mapRelay.insert(std::make_pair(inv, ss));
vRelayExpiration.push_back(std::make_pair(GetTime() + 15 * 60, inv));
}
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!pnode->fRelayTxes)
continue;
LOCK(pnode->cs_filter);
if (pnode->pfilter) {
if (pnode->pfilter->IsRelevantAndUpdate(tx))
pnode->PushInventory(inv);
} else
pnode->PushInventory(inv);
}
}
void RelayTransactionLockReq(const CTransaction& tx, bool relayToAll)
{
CInv inv(MSG_TXLOCK_REQUEST, tx.GetHash());
//broadcast the new lock
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!relayToAll && !pnode->fRelayTxes)
continue;
pnode->PushMessage("ix", tx);
}
}
void RelayInv(CInv& inv)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes){
if((pnode->nServices==NODE_BLOOM_WITHOUT_MN) && inv.IsMasterNodeType())
continue;
if (pnode->nVersion >= ActiveProtocol())
pnode->PushInventory(inv);
}
}
void CNode::RecordBytesRecv(uint64_t bytes)
{
LOCK(cs_totalBytesRecv);
nTotalBytesRecv += bytes;
}
void CNode::RecordBytesSent(uint64_t bytes)
{
LOCK(cs_totalBytesSent);
nTotalBytesSent += bytes;
}
uint64_t CNode::GetTotalBytesRecv()
{
LOCK(cs_totalBytesRecv);
return nTotalBytesRecv;
}
uint64_t CNode::GetTotalBytesSent()
{
LOCK(cs_totalBytesSent);
return nTotalBytesSent;
}
void CNode::Fuzz(int nChance)
{
if (!fSuccessfullyConnected) return; // Don't fuzz initial handshake
if (GetRand(nChance) != 0) return; // Fuzz 1 of every nChance messages
switch (GetRand(3)) {
case 0:
// xor a random byte with a random value:
if (!ssSend.empty()) {
CDataStream::size_type pos = GetRand(ssSend.size());
ssSend[pos] ^= (unsigned char)(GetRand(256));
}
break;
case 1:
// delete a random byte:
if (!ssSend.empty()) {
CDataStream::size_type pos = GetRand(ssSend.size());
ssSend.erase(ssSend.begin() + pos);
}
break;
case 2:
// insert a random byte at a random position
{
CDataStream::size_type pos = GetRand(ssSend.size());
char ch = (char)GetRand(256);
ssSend.insert(ssSend.begin() + pos, ch);
}
break;
}
// Chance of more than one change half the time:
// (more changes exponentially less likely):
Fuzz(2);
}
//
// CAddrDB
//
CAddrDB::CAddrDB()
{
pathAddr = GetDataDir() / "peers.dat";
}
bool CAddrDB::Write(const CAddrMan& addr)
{
// Generate random temporary filename
unsigned short randv = 0;
GetRandBytes((unsigned char*)&randv, sizeof(randv));
std::string tmpfn = strprintf("peers.dat.%04x", randv);
// serialize addresses, checksum data up to that point, then append csum
CDataStream ssPeers(SER_DISK, CLIENT_VERSION);
ssPeers << FLATDATA(Params().MessageStart());
ssPeers << addr;
uint256 hash = Hash(ssPeers.begin(), ssPeers.end());
ssPeers << hash;
// open output file, and associate with CAutoFile
boost::filesystem::path pathAddr = GetDataDir() / "peers.dat";
FILE* file = fopen(pathAddr.string().c_str(), "wb");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// Write and commit header, data
try {
fileout << ssPeers;
} catch (std::exception& e) {
return error("%s : Serialize or I/O error - %s", __func__, e.what());
}
FileCommit(fileout.Get());
fileout.fclose();
return true;
}
bool CAddrDB::Read(CAddrMan& addr)
{
// open input file, and associate with CAutoFile
FILE* file = fopen(pathAddr.string().c_str(), "rb");
CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// use file size to size memory buffer
uint64_t fileSize = boost::filesystem::file_size(pathAddr);
uint64_t dataSize = fileSize - sizeof(uint256);
// Don't try to resize to a negative number if file is small
if (fileSize >= sizeof(uint256))
dataSize = fileSize - sizeof(uint256);
vector<unsigned char> vchData;
vchData.resize(dataSize);
uint256 hashIn;
// read data and checksum from file
try {
filein.read((char*)&vchData[0], dataSize);
filein >> hashIn;
} catch (std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
filein.fclose();
CDataStream ssPeers(vchData, SER_DISK, CLIENT_VERSION);
// verify stored checksum matches input data
uint256 hashTmp = Hash(ssPeers.begin(), ssPeers.end());
if (hashIn != hashTmp)
return error("%s : Checksum mismatch, data corrupted", __func__);
unsigned char pchMsgTmp[4];
try {
// de-serialize file header (network specific magic number) and ..
ssPeers >> FLATDATA(pchMsgTmp);
// ... verify the network matches ours
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("%s : Invalid network magic number", __func__);
// de-serialize address data into one CAddrMan object
ssPeers >> addr;
} catch (std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
return true;
}
unsigned int ReceiveFloodSize() { return 1000 * GetArg("-maxreceivebuffer", 5 * 1000); }
unsigned int SendBufferSize() { return 1000 * GetArg("-maxsendbuffer", 1 * 1000); }
CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) : ssSend(SER_NETWORK, INIT_PROTO_VERSION), setAddrKnown(5000)
{
nServices = 0;
hSocket = hSocketIn;
nRecvVersion = INIT_PROTO_VERSION;
nLastSend = 0;
nLastRecv = 0;
nSendBytes = 0;
nRecvBytes = 0;
nTimeConnected = GetTime();
addr = addrIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
nVersion = 0;
strSubVer = "";
fWhitelisted = false;
fOneShot = false;
fClient = false; // set by version message
fInbound = fInboundIn;
fNetworkNode = false;
fSuccessfullyConnected = false;
fDisconnect = false;
nRefCount = 0;
nSendSize = 0;
nSendOffset = 0;
hashContinue = 0;
nStartingHeight = -1;
fGetAddr = false;
fRelayTxes = false;
setInventoryKnown.max_size(SendBufferSize() / 1000);
pfilter = new CBloomFilter();
nPingNonceSent = 0;
nPingUsecStart = 0;
nPingUsecTime = 0;
fPingQueued = false;
{
LOCK(cs_nLastNodeId);
id = nLastNodeId++;
}
if (fLogIPs)
LogPrint("net", "Added connection to %s peer=%d\n", addrName, id);
else
LogPrint("net", "Added connection peer=%d\n", id);
// Be shy and don't send version until we hear
if (hSocket != INVALID_SOCKET && !fInbound)
PushVersion();
GetNodeSignals().InitializeNode(GetId(), this);
}
CNode::~CNode()
{
CloseSocket(hSocket);
if (pfilter)
delete pfilter;
GetNodeSignals().FinalizeNode(GetId());
}
void CNode::AskFor(const CInv& inv)
{
if (mapAskFor.size() > MAPASKFOR_MAX_SZ)
return;
// We're using mapAskFor as a priority queue,
// the key is the earliest time the request can be sent
int64_t nRequestTime;
limitedmap<CInv, int64_t>::const_iterator it = mapAlreadyAskedFor.find(inv);
if (it != mapAlreadyAskedFor.end())
nRequestTime = it->second;
else
nRequestTime = 0;
LogPrint("net", "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime / 1000000), id);
// Make sure not to reuse time indexes to keep things in the same order
int64_t nNow = GetTimeMicros() - 1000000;
static int64_t nLastTime;
++nLastTime;
nNow = std::max(nNow, nLastTime);
nLastTime = nNow;
// Each retry is 2 minutes after the last
nRequestTime = std::max(nRequestTime + 2 * 60 * 1000000, nNow);
if (it != mapAlreadyAskedFor.end())
mapAlreadyAskedFor.update(it, nRequestTime);
else
mapAlreadyAskedFor.insert(std::make_pair(inv, nRequestTime));
mapAskFor.insert(std::make_pair(nRequestTime, inv));
}
void CNode::BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend)
{
ENTER_CRITICAL_SECTION(cs_vSend);
assert(ssSend.size() == 0);
ssSend << CMessageHeader(pszCommand, 0);
LogPrint("net", "sending: %s ", SanitizeString(pszCommand));
}
void CNode::AbortMessage() UNLOCK_FUNCTION(cs_vSend)
{
ssSend.clear();
LEAVE_CRITICAL_SECTION(cs_vSend);
LogPrint("net", "(aborted)\n");
}
void CNode::EndMessage() UNLOCK_FUNCTION(cs_vSend)
{
// The -*messagestest options are intentionally not documented in the help message,
// since they are only used during development to debug the networking code and are
// not intended for end-users.
if (mapArgs.count("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 2)) == 0) {
LogPrint("net", "dropmessages DROPPING SEND MESSAGE\n");
AbortMessage();
return;
}
if (mapArgs.count("-fuzzmessagestest"))
Fuzz(GetArg("-fuzzmessagestest", 10));
if (ssSend.size() == 0)
return;
// Set the size
unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE;
memcpy((char*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], &nSize, sizeof(nSize));
// Set the checksum
uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end());
unsigned int nChecksum = 0;
memcpy(&nChecksum, &hash, sizeof(nChecksum));
assert(ssSend.size() >= CMessageHeader::CHECKSUM_OFFSET + sizeof(nChecksum));
memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], &nChecksum, sizeof(nChecksum));
LogPrint("net", "(%d bytes) peer=%d\n", nSize, id);
std::deque<CSerializeData>::iterator it = vSendMsg.insert(vSendMsg.end(), CSerializeData());
ssSend.GetAndClear(*it);
nSendSize += (*it).size();
// If write queue empty, attempt "optimistic write"
if (it == vSendMsg.begin())
SocketSendData(this);
LEAVE_CRITICAL_SECTION(cs_vSend);
}
//
// CBanDB
//
CBanDB::CBanDB()
{
pathBanlist = GetDataDir() / "banlist.dat";
}
bool CBanDB::Write(const banmap_t& banSet)
{
// Generate random temporary filename
unsigned short randv = 0;
GetRandBytes((unsigned char*)&randv, sizeof(randv));
std::string tmpfn = strprintf("banlist.dat.%04x", randv);
// serialize banlist, checksum data up to that point, then append csum
CDataStream ssBanlist(SER_DISK, CLIENT_VERSION);
ssBanlist << FLATDATA(Params().MessageStart());
ssBanlist << banSet;
uint256 hash = Hash(ssBanlist.begin(), ssBanlist.end());
ssBanlist << hash;
// open temp output file, and associate with CAutoFile
boost::filesystem::path pathTmp = GetDataDir() / tmpfn;
FILE *file = fopen(pathTmp.string().c_str(), "wb");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s: Failed to open file %s", __func__, pathTmp.string());
// Write and commit header, data
try {
fileout << ssBanlist;
}
catch (const std::exception& e) {
return error("%s: Serialize or I/O error - %s", __func__, e.what());
}
FileCommit(fileout.Get());
fileout.fclose();
// replace existing banlist.dat, if any, with new banlist.dat.XXXX
if (!RenameOver(pathTmp, pathBanlist))
return error("%s: Rename-into-place failed", __func__);
return true;
}
bool CBanDB::Read(banmap_t& banSet)
{
// open input file, and associate with CAutoFile
FILE *file = fopen(pathBanlist.string().c_str(), "rb");
CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s: Failed to open file %s", __func__, pathBanlist.string());
// use file size to size memory buffer
uint64_t fileSize = boost::filesystem::file_size(pathBanlist);
uint64_t dataSize = 0;
// Don't try to resize to a negative number if file is small
if (fileSize >= sizeof(uint256))
dataSize = fileSize - sizeof(uint256);
vector<unsigned char> vchData;
vchData.resize(dataSize);
uint256 hashIn;
// read data and checksum from file
try {
filein.read((char *)&vchData[0], dataSize);
filein >> hashIn;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
filein.fclose();
CDataStream ssBanlist(vchData, SER_DISK, CLIENT_VERSION);
// verify stored checksum matches input data
uint256 hashTmp = Hash(ssBanlist.begin(), ssBanlist.end());
if (hashIn != hashTmp)
return error("%s: Checksum mismatch, data corrupted", __func__);
unsigned char pchMsgTmp[4];
try {
// de-serialize file header (network specific magic number) and ..
ssBanlist >> FLATDATA(pchMsgTmp);
// ... verify the network matches ours
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("%s: Invalid network magic number", __func__);
// de-serialize address data into one CAddrMan object
ssBanlist >> banSet;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
return true;
}
void DumpBanlist()
{
int64_t nStart = GetTimeMillis();
CNode::SweepBanned(); //clean unused entires (if bantime has expired)
CBanDB bandb;
banmap_t banmap;
CNode::GetBanned(banmap);
bandb.Write(banmap);
LogPrint("net", "Flushed %d banned node ips/subnets to banlist.dat %dms\n",
banmap.size(), GetTimeMillis() - nStart);
}
|
#include <bits/stdc++.h>
using namespace std;
#define LSOne(S) ((S) & -(S)) // the key operation
typedef long long ll; // for extra flexibility
typedef vector<ll> vll;
typedef vector<int> vi;
class FenwickTree { // index 0 is not used
private:
vll ft; // internal FT is an array
public:
FenwickTree(int m) { ft.assign(m+1, 0); } // create an empty FT
void build(const vll &f) {
int m = (int)f.size()-1; // note f[0] is always 0
ft.assign(m+1, 0);
for (int i = 1; i <= m; ++i) { // O(m)
ft[i] += f[i]; // add this value
if (i+LSOne(i) <= m) // i has parent
ft[i+LSOne(i)] += ft[i]; // add to that parent
}
}
FenwickTree(const vll &f) { build(f); } // create FT based on f
FenwickTree(int m, const vi &s) { // create FT based on s
vll f(m+1, 0);
for (int i = 0; i < (int)s.size(); ++i) // do the conversion first
++f[s[i]]; // in O(n)
build(f); // in O(m)
}
ll rsq(int j) { // returns RSQ(1, j)
ll sum = 0;
for (; j; j -= LSOne(j))
sum += ft[j];
return sum;
}
ll rsq(int i, int j) { return rsq(j) - rsq(i-1); } // inc/exclusion
// updates value of the i-th element by v (v can be +ve/inc or -ve/dec)
void update(int i, ll v) {
for (; i < (int)ft.size(); i += LSOne(i))
ft[i] += v;
}
int select(ll k) { // O(log m)
int p = 1;
while (p*2 < (int)ft.size()) p *= 2;
int i = 0;
while (p) {
if (k > ft[i+p]) {
k -= ft[i+p];
i += p;
}
p /= 2;
}
return i+1;
}
};
class RUPQ { // RUPQ variant
private:
FenwickTree ft; // internally use PURQ FT
public:
RUPQ(int m) : ft(FenwickTree(m)) {}
void range_update(int ui, int uj, int v) {
ft.update(ui, v); // [ui, ui+1, .., m] +v
ft.update(uj+1, -v); // [uj+1, uj+2, .., m] -v
} // [ui, ui+1, .., uj] +v
ll point_query(int i) { return ft.rsq(i); } // rsq(i) is sufficient
};
class RURQ { // RURQ variant
private: // needs two helper FTs
RUPQ rupq; // one RUPQ and
FenwickTree purq; // one PURQ
public:
RURQ(int m) : rupq(RUPQ(m)), purq(FenwickTree(m)) {} // initialization
void range_update(int ui, int uj, int v) {
rupq.range_update(ui, uj, v); // [ui, ui+1, .., uj] +v
purq.update(ui, v*(ui-1)); // -(ui-1)*v before ui
purq.update(uj+1, -v*uj); // +(uj-ui+1)*v after uj
}
ll rsq(int j) {
return rupq.point_query(j)*j - // optimistic calculation
purq.rsq(j); // cancelation factor
}
ll rsq(int i, int j) { return rsq(j) - rsq(i-1); } // standard
};
int main() {
vll f = {0,0,1,0,1,2,3,2,1,1,0}; // index 0 is always 0
FenwickTree ft(f);
printf("%lld\n", ft.rsq(1, 6)); // 7 => ft[6]+ft[4] = 5+2 = 7
printf("%d\n", ft.select(7)); // index 6, rsq(1, 6) == 7, which is >= 7
ft.update(5, 1); // update demo
printf("%lld\n", ft.rsq(1, 10)); // now 12
printf("=====\n");
RUPQ rupq(10);
RURQ rurq(10);
rupq.range_update(2, 9, 7); // indices in [2, 3, .., 9] updated by +7
rurq.range_update(2, 9, 7); // same as rupq above
rupq.range_update(6, 7, 3); // indices 6&7 are further updated by +3 (10)
rurq.range_update(6, 7, 3); // same as rupq above
// idx = 0 (unused) | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10
// val = - | 0 | 7 | 7 | 7 | 7 |10 |10 | 7 | 7 | 0
for (int i = 1; i <= 10; i++)
printf("%d -> %lld\n", i, rupq.point_query(i));
printf("RSQ(1, 10) = %lld\n", rurq.rsq(1, 10)); // 62
printf("RSQ(6, 7) = %lld\n", rurq.rsq(6, 7)); // 20
return 0;
}
|
/*
* Copyright (C) 2015 David Devecsery
*/
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <map>
#include "include/Debug.h"
#include "include/ControlFlowGraph.h"
#include "include/SEG.h"
extern void T4(SEG &G, const SEG &Xp);
extern void T2(SEG &G, SEG &Xp);
extern void T7(SEG &G);
extern void T6(SEG &G);
extern void T5(SEG &G);
extern void Ramalingam(SEG &G);
extern SEG createGp(const SEG &G);
static void test_assert(bool check, std::string msg) {
if (!check) {
std::cerr << "ERROR: " << msg << std::endl;
exit(EXIT_FAILURE);
}
}
int main(void) {
/* Testing Gp creation
* Nodes with captial letters are M nodes!
* A
* |
* V
* b<--
* /| |
* v V |
* d C |
* \ | |
* VV |
* e---
* |
* V
* F
*
*/
// First create the graph:
SEG G;
// test1
auto a = G.addNode<CFG::Node>();
auto b = G.addNode<CFG::Node>();
auto c = G.addNode<CFG::Node>();
auto d = G.addNode<CFG::Node>();
auto e = G.addNode<CFG::Node>();
auto f = G.addNode<CFG::Node>();
auto &a_node = G.getNode<CFG::Node>(a);
auto &c_node = G.getNode<CFG::Node>(c);
auto &f_node = G.getNode<CFG::Node>(f);
a_node.setM();
c_node.setM();
f_node.setM();
// b's backedges
G.addPred(b, a);
G.addPred(b, e);
// c's backedges
G.addPred(c, b);
// d's backedges
G.addPred(d, b);
// e's backedges
G.addPred(e, d);
G.addPred(e, c);
// f's backedges
G.addPred(f, e);
// gimme Gp
SEG Gp = createGp(G);
// Check the creation of Gp
// First, verify the nodes that should be dead:
auto a_null = Gp.tryGetNode(a);
auto c_null = Gp.tryGetNode(c);
auto f_null = Gp.tryGetNode(f);
test_assert(a_null == nullptr, "A was not removed from Gp");
test_assert(c_null == nullptr, "C was not removed from Gp");
test_assert(f_null == nullptr, "F was not removed from Gp");
// Now, make sure that the other nodes are all fine and dandy
auto b_node = Gp.getNode(b);
auto d_node = Gp.getNode(d);
auto e_node = Gp.getNode(e);
auto b_rep = b_node.id();
auto d_rep = d_node.id();
auto e_rep = e_node.id();
// Verify correct preds
for (auto pred_id : b_node.preds()) {
auto ppred_node = Gp.tryGetNode(pred_id);
if (ppred_node != nullptr) {
auto pred_rep = ppred_node->id();
test_assert(pred_rep == e_rep, "Node B has bad Pred");
}
}
for (auto pred_id : d_node.preds()) {
auto ppred_node = Gp.tryGetNode(pred_id);
if (ppred_node != nullptr) {
auto pred_rep = ppred_node->id();
test_assert(pred_rep == b_rep, "Node D has bad Pred");
}
}
for (auto pred_id : e_node.preds()) {
auto ppred_node = Gp.tryGetNode(pred_id);
if (ppred_node != nullptr) {
auto pred_rep = ppred_node->id();
test_assert(pred_rep == d_rep, "Node E has bad Pred");
}
}
return EXIT_SUCCESS;
}
|
#include <iostream>
#include <string>
#define WITH_PLOTTING
#include "trignoclient.hpp"
using namespace trigno;
using namespace std::literals::chrono_literals;
void interruptHandler(int signum) {
printf("\nUser-requested shutdown\n");
exit(0);
}
int main(int argc, char const *argv[]) {
// register interrupt handler
signal(SIGINT, interruptHandler);
// debug/verbose ouput
printf("##################################\n%s\n##################################\n", __FILE__);
// parse input arguments
if (argc < 2) {
printf("Usage: ./plot_sequence <FILE>\n");
return 1;
}
auto file_path = std::string(argv[1]);
// sort data according to timestap
// not required, just to ensure data is time-ordered
// data.sort();
tools::Iterative< tools::Plotter > plotter;
tools::Plotter plotter2;
// configure plotter
plotter.get().window.refresh_interval = 33ms; // 30Hz
plotter.get().window.autofity = false;
// plotter.get().window().ymax = ;
// plotter.get().window().ylim = { -0.002, 0.002 };
// plotter2.run(duplicate.begin(20000));
printf("---------------------\n");
// plotter2.launch(duplicate);
// plotter2.wait();
}
|
#include <stdio.h>
int test(int a) {
return a;
}
int main() {
printf("%d\n", test(5));
return 0;
}
|
/*
* ****** COPS v7 Emulator - Open Source ******
* Copyright (C) 2012 - 2014 Jean-Philippe Boivin
*
* Please read the WARNING, DISCLAIMER and PATENTS
* sections in the LICENSE file.
*/
#include "atomic.h"
#include "script.h"
#include "client.h"
#include "player.h"
#include "allmsg.h"
#include "lua.hpp"
/* static */
Script::State* Script::State::sState = nullptr;
/* static */
lua_State&
Script::State :: getState()
{
static volatile atomic_t protect = 0;
if (sState == nullptr)
{
if (1 == atomic_inc(&protect))
{
// create the instance
sState = new State();
}
else
{
while (sState == nullptr)
QThread::yieldCurrentThread();
}
}
return *(sState->mState);
}
Script::State :: State()
{
mState = luaL_newstate();
ASSERT(mState != nullptr);
luaL_openlibs(mState);
}
Script::State :: ~State()
{
lua_close(mState);
}
/* static */
err_t
Script :: registerFunctions()
{
err_t err = ERROR_SUCCESS;
lua_State* state = Script::getState();
// Getters / Setters Lua methods
lua_register(state, "getName", Script::getName);
lua_register(state, "getMate", Script::getMate);
lua_register(state, "getLook", Script::getLook);
lua_register(state, "getHair", Script::getHair);
lua_register(state, "getMoney", Script::getMoney);
lua_register(state, "getCPs", Script::getCPs);
lua_register(state, "getExp", Script::getExp);
lua_register(state, "getForce", Script::getForce);
lua_register(state, "getHealth", Script::getHealth);
lua_register(state, "getDexterity", Script::getDexterity);
lua_register(state, "getSoul", Script::getSoul);
lua_register(state, "getAddPoints", Script::getAddPoints);
lua_register(state, "getCurHP", Script::getCurHP);
lua_register(state, "getMaxHP", Script::getMaxHP);
lua_register(state, "getCurMP", Script::getCurMP);
lua_register(state, "getMaxMP", Script::getMaxMP);
lua_register(state, "getPkPoints", Script::getPkPoints);
lua_register(state, "getLevel", Script::getLevel);
lua_register(state, "getProfession", Script::getProfession);
lua_register(state, "gainMoney", Script::gainMoney);
lua_register(state, "gainCPs", Script::gainCPs);
lua_register(state, "spendMoney", Script::spendMoney);
lua_register(state, "spendCPs", Script::spendCPs);
lua_register(state, "move", Script::move);
// MsgDialog Lua methods
lua_register(state, "text", Script::text);
lua_register(state, "link", Script::link);
lua_register(state, "pic", Script::pic);
lua_register(state, "create", Script::create);
return err;
}
Script :: Script(uint32_t aUID, const char* aPath)
: mUID(aUID)
{
ASSERT(aPath != nullptr && aPath[0] != '\0');
lua_State* state = Script::getState();
if (luaL_dofile(state, aPath) != 0)
{
LOG(ERROR, "Failed to parse Lua script `%s`:\n%s",
aPath, lua_tostring(state, -1));
}
}
Script :: ~Script()
{
}
/* static */
int
Script :: getName(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushstring(aState, player.getName());
return 1;
}
/* static */
int
Script :: getMate(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushstring(aState, player.getMate());
return 1;
}
/* static */
int
Script :: getLook(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getLook());
return 1;
}
/* static */
int
Script :: getHair(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getHair());
return 1;
}
/* static */
int
Script :: getMoney(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getMoney());
return 1;
}
/* static */
int
Script :: getCPs(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getCPs());
return 1;
}
/* static */
int
Script :: getExp(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getExp());
return 1;
}
/* static */
int
Script :: getForce(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getForce());
return 1;
}
/* static */
int
Script :: getHealth(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getHealth());
return 1;
}
/* static */
int
Script :: getDexterity(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getDexterity());
return 1;
}
/* static */
int
Script :: getSoul(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getSoul());
return 1;
}
/* static */
int
Script :: getAddPoints(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getAddPoints());
return 1;
}
/* static */
int
Script :: getCurHP(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getCurHP());
return 1;
}
/* static */
int
Script :: getMaxHP(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getMaxHP());
return 1;
}
/* static */
int
Script :: getCurMP(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getCurMP());
return 1;
}
/* static */
int
Script :: getMaxMP(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getMaxMP());
return 1;
}
/* static */
int
Script :: getPkPoints(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getPkPoints());
return 1;
}
/* static */
int
Script :: getLevel(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getLevel());
return 1;
}
/* static */
int
Script :: getProfession(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
lua_pushinteger(aState, player.getProfession());
return 1;
}
/* static */
int
Script :: gainMoney(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
int money = lua_tointeger(aState, 2);
bool success = player.gainMoney((uint32_t)money, true);
lua_pushboolean(aState, success);
return 1;
}
/* static */
int
Script :: gainCPs(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
int cps = lua_tointeger(aState, 2);
bool success = player.gainCPs((uint32_t)cps, true);
lua_pushboolean(aState, success);
return 1;
}
/* static */
int
Script :: spendMoney(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
int money = lua_tointeger(aState, 2);
bool success = player.spendMoney((uint32_t)money, true);
lua_pushboolean(aState, success);
return 1;
}
/* static */
int
Script :: spendCPs(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
int cps = lua_tointeger(aState, 2);
bool success = player.spendCPs((uint32_t)cps, true);
lua_pushboolean(aState, success);
return 1;
}
/* static */
int
Script :: move(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
Player& player = *client.getPlayer();
ASSERT(&player != nullptr);
uint32_t mapId = (uint32_t)lua_tointeger(aState, 2);
uint16_t x = (uint16_t)lua_tointeger(aState, 3);
uint16_t y = (uint16_t)lua_tointeger(aState, 4);
bool success = player.move(mapId, x, y);
lua_pushboolean(aState, success);
return 1;
}
/* static */
int
Script :: text(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
const char* text = lua_tolstring(aState, 2, nullptr);
MsgDialog msg(text, 0, 0, MsgDialog::ACTION_TEXT);
client.send(&msg);
return 0;
}
/* static */
int
Script :: link(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
const char* text = lua_tolstring(aState, 2, nullptr);
int idx = lua_tointeger(aState, 3);
MsgDialog msg(text, 0, idx, MsgDialog::ACTION_LINK);
client.send(&msg);
return 0;
}
/* static */
int
Script :: pic(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
int pic = lua_tointeger(aState, 2);
MsgDialog msg(0, 0, pic, 0, MsgDialog::ACTION_PIC);
client.send(&msg);
return 0;
}
/* static */
int
Script :: create(lua_State* aState)
{
Client& client = *((Client*)lua_tointeger(aState, 1));
ASSERT(&client != nullptr);
MsgDialog msg(0xFF, MsgDialog::ACTION_CREATE);
client.send(&msg);
return 0;
}
|
#include <iostream>
#include <transport/TSocket.h>
#include <transport/TBufferTransports.h>
#include <protocol/TBinaryProtocol.h>
#include <concurrency/PosixThreadFactory.h>
#include <concurrency/Thread.h>
#include <concurrency/ThreadManager.h>
#include <concurrency/Mutex.h>
#include "RegistryProxy.h" // As an example
#include "../frproxy.h"
#include "../core/ClientPool.h"
#include "../core/ZkClient.h"
#include "../utils/ArgsParser.h"
#include "../log/logger.h"
using namespace std;
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using namespace apache::thrift::concurrency;
using boost::shared_ptr;
using namespace FinagleRegistryProxy;
#ifndef DiffMs_
#define DiffMs(end, start) ((double) (end - start) / CLOCKS_PER_SEC * 1000)
#endif
class Cursor {
public:
Cursor(int seed) :
seed(seed) {
}
~Cursor() {
}
private:
int seed;
apache::thrift::concurrency::Mutex locker;
public:
void add() {
locker.lock();
seed++;
locker.unlock();
}
int get() {
return seed;
}
};
class ClientTask: public Runnable {
private:
string host;
int port;
string name;
int count;
Cursor *cursor;
public:
ClientTask(string host = "127.0.0.1", int port = 9009, string name = "testservice", int count = 0, Cursor *cursor = 0) :
host(host), port(port), name(name), count(count), cursor(cursor) {
}
~ClientTask() {
}
void dump() {
boost::shared_ptr<TSocket> socket(new TSocket(host, port));
boost::shared_ptr<TTransport> transport(new TFramedTransport(socket));
boost::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
RegistryProxyClient client(protocol);
int i;
try {
transport->open();
std::string ret;
client.dump(ret);
cout << ret << endl;
} catch (const apache::thrift::transport::TTransportException& ex) {
//transport->close();
cout << ex.what() << endl;
} catch (const std::exception& ex) {
cout << ex.what() << endl;
}
transport->close();
}
void get_once() {
boost::shared_ptr<TSocket> socket(new TSocket(host, port));
boost::shared_ptr<TTransport> transport(new TFramedTransport(socket));
boost::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
RegistryProxyClient client(protocol);
int i;
try {
long start = utils::now_in_us();
transport->open();
long open = utils::now_in_us();
std::string ret;
cout << "getting" << endl;
client.get(ret, name);
long done = utils::now_in_us();
cout << "client get total=" << DiffMs(done, start) << "ms. open cost=" << DiffMs(open, start) << "ms. get cost="
<< DiffMs(done, open) << endl;
cout << "result: " << ret << endl;
} catch (const apache::thrift::TException& ex) {
//transport->close();
cout << "client get thrift excepiton: " << ex.what() << endl;
} catch (const std::exception& ex) {
cout << "client get unknown exception:" << ex.what() << endl;
}
transport->close();
}
void status() {
boost::shared_ptr<TSocket> socket(new TSocket(host, port));
boost::shared_ptr<TTransport> transport(new TFramedTransport(socket));
boost::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
RegistryProxyClient client(protocol);
int i;
try {
transport->open();
int status = client.status();
cout << "status : " << status << endl;
cout << " tips: get 0 is fine; if >0 means sth not ok. 1 zk conn fail, 2 cache empty , 4 watcher loss, 8 thread pool fail." << endl;
} catch (const apache::thrift::transport::TTransportException& ex) {
//transport->close();
cout << ex.what() << endl;
} catch (const std::exception& ex) {
cout << ex.what() << endl;
}
transport->close();
}
void reset() {
boost::shared_ptr<TSocket> socket(new TSocket(host, port));
boost::shared_ptr<TTransport> transport(new TFramedTransport(socket));
boost::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
RegistryProxyClient client(protocol);
int i;
try {
transport->open();
string ret = "";
client.reset(ret);
cout << "reset result : " << ret << endl;
cout << " tips: empty result is fine." << endl;
} catch (const apache::thrift::transport::TTransportException& ex) {
//transport->close();
cout << ex.what() << endl;
} catch (const std::exception& ex) {
cout << ex.what() << endl;
}
transport->close();
}
void run() {
int c = 0;
// cout << "count = " << count << endl;
while (c++ < count || count == 0) {
// cout << "thread[" << pthread_self() << "]=" << c << endl;
get_once();
}
// cout << "thread[" << pthread_self() << "]=" << c << endl;
if (cursor) {
cursor->add();
}
}
};
void usage(int egg = 0) {
cout << "client version: " << FinagleRegistryProxy::FRPROXY_VERSION << endl;
cout << "Usage: client [option [option_value]]" << endl;
cout << "Options:" << endl;
cout << " " << "-v, --version: show version info. " << endl;
cout << " " << "-h, --help: print usage. " << endl;
cout << " " << "-s, --server: server of frproxy : default 127.0.0.1. eg. -s localhost" << endl;
cout << " " << "-p, --port: default 9009. eg. -p 9090" << endl;
cout << " " << "-n, --name: name of service: default testservice. eg. -n rpc.counter.thrift" << endl;
cout << " " << "-t, --threadcount:default 0, single thread. eg. -t 2" << endl;
cout << " " << "-c, --count: default 1 when single thread. or 1. eg. -c 9999" << endl;
cout << " " << "-m, --method: candidatation include get, remove, dump. default get" << endl;
cout << " " << "-l, --list: list frproxy server" << endl;
cout << " "
<< "-a, --status: list frproxy working status. get 0 is fine; if >0 means sth not ok. 1 zk conn fail, 2 cache empty , 4 watcher loss, 8 thread pool fail."
<< endl;
cout << " " << "-z, --zkhosts: zookeeper hosts. default 127.0.0.1:2181. eg. -z 192.168.2.202:2181" << endl;
if (egg > 0) {
cout << " " << "-r, --reset:\t\t reset server status." << endl;
}
cout << "eg. " << endl;
cout << " " << "./client " << endl;
cout << " " << "./client -c 999" << endl;
cout << " " << "./client -c 999 -t 2" << endl;
cout << " " << "./client -c 999 -t 2 -s 192.168.1.111 -p 9009" << endl;
cout << " " << "./client -m dump" << endl;
cout << " " << "./client -l -z 192.168.113.212:2181" << endl;
cout << " " << "./client -n rpc.counter.thrift" << endl;
}
void listServer(string zkhosts) {
ClientPool pool = ClientPool(new ZkClientFactory(zkhosts, 0));
ZkClient* client = (ZkClient*) pool.open();
string root = "/soa/proxies";
client->get_all_services(root);
vector<string> v = client->get_children(root);
cout << "frproxy servers regeistered in zk: " << zkhosts << " total:" << v.size() << endl;
for (vector<string>::iterator it = v.begin(); it != v.end(); it++) {
cout << " " << *it << endl;
}
}
int main(int argc, char **argv) {
string host = option_value(argc, argv, "-s", "--server", "127.0.0.1");
int port = option_value(argc, argv, "-p", "--port", 9009);
string service_name = option_value(argc, argv, "-n", "--name", "testservice");
string zkhosts = option_value(argc, argv, "-z", "--zkhosts", "localhost:2181");
int pool_size = option_value(argc, argv, "-t", "--threadcount", 0);
int count = option_value(argc, argv, "-c", "--count", pool_size > 0 ? 0 : 1);
string method = option_value(argc, argv, "-m", "--method", "");
if (option_exists(argc, argv, "-v") || option_exists(argc, argv, "--version")) {
cout << "frproxy client version " << FRPROXY_VERSION << endl;
return 0;
}
if (option_exists(argc, argv, "-h") || option_exists(argc, argv, "--help")) {
usage();
return 0;
}
if (option_exists(argc, argv, "-l") || option_exists(argc, argv, "--list")) {
listServer(zkhosts);
return 0;
}
if (option_exists(argc, argv, "-a") || option_exists(argc, argv, "--status")) {
ClientTask task(host, port, service_name, count);
task.status();
return 0;
}
if (option_exists(argc, argv, "-r") || option_exists(argc, argv, "--reset")) {
ClientTask task(host, port, service_name, count);
task.reset();
return 0;
}
// cout << "connecting to port=" << port << endl;
if (method == "dump") {
ClientTask task(host, port, service_name, count);
task.dump();
return 0;
}
// cout << "service name=" << service_name << endl;
if (pool_size > 0) {
// cout << "multi thread modeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee;" << endl;
Cursor *cursor = new Cursor(pool_size);
shared_ptr<ThreadManager> threadManager = ThreadManager::newSimpleThreadManager(pool_size);
shared_ptr<PosixThreadFactory> threadFactory = shared_ptr<PosixThreadFactory>(new PosixThreadFactory());
threadManager->threadFactory(threadFactory);
threadManager->start();
for (int i = 0; i < pool_size; i++) {
int batch = count / pool_size == 0 ? (count == 0 ? 0 : 1) : count / pool_size;
threadManager->add(shared_ptr<ClientTask>(new ClientTask(host, port, service_name, batch, cursor)), 0, 0);
}
while (threadManager->idleWorkerCount() < pool_size) {
usleep(1000);
}
cout << "idle thread count " << threadManager->idleWorkerCount() << " pool_size=" << pool_size << " count=" << count
<< endl;
// while(cursor.get() < pool_size){
// usleep(1000);
// }
usleep(100000);
threadManager->stop();
delete cursor;
cursor = 0;
} else {
// cout << "single threadddddddddddddddddddddddddddddddddddd mode;" << endl;
ClientTask task(host, port, service_name, count);
task.run();
}
return 0;
}
|
#include "../includes/zs-math.h"
#include "../includes/zs-camera.h"
ZSPROJECTIONTYPE projection_type;
float FOV;
float ZNearPlane;
float ZFarPlane;
float CAMERA_PROJ_WIDTH;
float CAMERA_PROJ_HEIGHT;
ZSVECTOR3 camera_up = ZSVECTOR3(0.0f, 1.0f, 0.0f);
ZSVECTOR3 camera_pos = ZSVECTOR3(0.0f, 0.0f, -3.0f);
ZSVECTOR3 camera_target = ZSVECTOR3(0.0f, 0.0f, -1.0f);
ZSMATRIX4x4 PROJECTION;
ZSMATRIX4x4 VIEW;
ZSMATRIX4x4 CAMMATRIX;
void ZSpire::InitializeCamera(){
FOV = 45.0f;
ZNearPlane = 0.3f;
ZFarPlane = 1000.0f;
projection_type = CAMERA_PROJECTION_ORTHOGRAPHIC;
CAMERA_PROJ_WIDTH = 640;
CAMERA_PROJ_HEIGHT = 480;
}
void ZSpire::setCameraProjectionType(ZSPROJECTIONTYPE type) {
projection_type = type;
}
void ZSpire::setCameraZPlanes(float znear, float zfar) {
ZNearPlane = znear;
ZFarPlane = zfar;
}
void ZSpire::setCameraFOV(float nfov) {
FOV = nfov;
}
void ZSpire::updateCameraMatrix() {
if (projection_type == CAMERA_PROJECTION_ORTHOGRAPHIC) {
PROJECTION = getOrthogonal(0, CAMERA_PROJ_WIDTH, 0, CAMERA_PROJ_HEIGHT);
}
else {
PROJECTION = getPerspective(FOV, CAMERA_PROJ_WIDTH / CAMERA_PROJ_HEIGHT, ZNearPlane, ZFarPlane);
}
VIEW = matrixLookAt(camera_pos, camera_pos + camera_target, camera_up);
CAMMATRIX = PROJECTION * VIEW;
}
void ZSpire::setCameraProjectionResolution(float WIDTH, float HEIGHT) {
CAMERA_PROJ_WIDTH = WIDTH;
CAMERA_PROJ_HEIGHT = HEIGHT;
}
ZSMATRIX4x4 ZSpire::getCameraProjectionMatrix(){
return PROJECTION;
}
ZSMATRIX4x4 ZSpire::getCameraViewMatrix() {
return VIEW;
}
void ZSpire::setCameraPosition(ZSVECTOR3 position) {
camera_pos = position;
updateCameraMatrix();
}
void ZSpire::setCameraFront(ZSVECTOR3 front) {
camera_target = front;
updateCameraMatrix();
}
ZSVECTOR3 ZSpire::getCameraPos(){
return camera_pos;
}
ZSVECTOR3 ZSpire::getCameraFront(){
return camera_target;
}
ZSVECTOR3 ZSpire::getCameraRight(){
return vCross(camera_target, camera_up);
}
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
#include "extern/beatsaber-hook/shared/utils/byref.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "extern/beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: System
namespace System {
// Forward declaring type: Array
class Array;
// Forward declaring type: Type
class Type;
}
// Forward declaring namespace: UnityEngine
namespace UnityEngine {
// Forward declaring type: Object
class Object;
}
// Forward declaring namespace: System::Collections
namespace System::Collections {
// Forward declaring type: IList
class IList;
}
// Completed forward declares
// Type namespace: UnityEngine.ResourceManagement.Util
namespace UnityEngine::ResourceManagement::Util {
// Size: 0x10
#pragma pack(push, 1)
// Autogenerated type: UnityEngine.ResourceManagement.Util.ResourceManagerConfig
// [TokenAttribute] Offset: FFFFFFFF
class ResourceManagerConfig : public ::Il2CppObject {
public:
// Creating value type constructor for type: ResourceManagerConfig
ResourceManagerConfig() noexcept {}
// static System.Boolean ExtractKeyAndSubKey(System.Object keyObj, out System.String mainKey, out System.String subKey)
// Offset: 0x166FB50
static bool ExtractKeyAndSubKey(::Il2CppObject* keyObj, ByRef<::Il2CppString*> mainKey, ByRef<::Il2CppString*> subKey);
// static public System.Boolean IsPathRemote(System.String path)
// Offset: 0x166E050
static bool IsPathRemote(::Il2CppString* path);
// static public System.Boolean ShouldPathUseWebRequest(System.String path)
// Offset: 0x166EEBC
static bool ShouldPathUseWebRequest(::Il2CppString* path);
// static public System.Array CreateArrayResult(System.Type type, UnityEngine.Object[] allAssets)
// Offset: 0x1670928
static System::Array* CreateArrayResult(System::Type* type, ::Array<UnityEngine::Object*>* allAssets);
// static public TObject CreateArrayResult(UnityEngine.Object[] allAssets)
// Offset: 0xFFFFFFFF
template<class TObject>
static TObject CreateArrayResult(::Array<UnityEngine::Object*>* allAssets) {
static auto ___internal__logger = ::Logger::get().WithContext("UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateArrayResult");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod("UnityEngine.ResourceManagement.Util", "ResourceManagerConfig", "CreateArrayResult", std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<TObject>::get()}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(allAssets)})));
static auto* ___generic__method = THROW_UNLESS(::il2cpp_utils::MakeGenericMethod(___internal__method, std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<TObject>::get()}));
return ::il2cpp_utils::RunMethodThrow<TObject, false>(static_cast<Il2CppClass*>(nullptr), ___generic__method, allAssets);
}
// static public System.Collections.IList CreateListResult(System.Type type, UnityEngine.Object[] allAssets)
// Offset: 0x1670AF8
static System::Collections::IList* CreateListResult(System::Type* type, ::Array<UnityEngine::Object*>* allAssets);
// static public TObject CreateListResult(UnityEngine.Object[] allAssets)
// Offset: 0xFFFFFFFF
template<class TObject>
static TObject CreateListResult(::Array<UnityEngine::Object*>* allAssets) {
static auto ___internal__logger = ::Logger::get().WithContext("UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateListResult");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod("UnityEngine.ResourceManagement.Util", "ResourceManagerConfig", "CreateListResult", std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<TObject>::get()}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(allAssets)})));
static auto* ___generic__method = THROW_UNLESS(::il2cpp_utils::MakeGenericMethod(___internal__method, std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<TObject>::get()}));
return ::il2cpp_utils::RunMethodThrow<TObject, false>(static_cast<Il2CppClass*>(nullptr), ___generic__method, allAssets);
}
// static public System.Boolean IsInstance()
// Offset: 0xFFFFFFFF
template<class T1, class T2>
static bool IsInstance() {
static auto ___internal__logger = ::Logger::get().WithContext("UnityEngine::ResourceManagement::Util::ResourceManagerConfig::IsInstance");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod("UnityEngine.ResourceManagement.Util", "ResourceManagerConfig", "IsInstance", std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T1>::get(), ::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T2>::get()}, ::std::vector<const Il2CppType*>{})));
static auto* ___generic__method = THROW_UNLESS((::il2cpp_utils::MakeGenericMethod(___internal__method, std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T1>::get(), ::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T2>::get()})));
return ::il2cpp_utils::RunMethodThrow<bool, false>(static_cast<Il2CppClass*>(nullptr), ___generic__method);
}
}; // UnityEngine.ResourceManagement.Util.ResourceManagerConfig
#pragma pack(pop)
}
DEFINE_IL2CPP_ARG_TYPE(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*, "UnityEngine.ResourceManagement.Util", "ResourceManagerConfig");
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::ExtractKeyAndSubKey
// Il2CppName: ExtractKeyAndSubKey
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (*)(::Il2CppObject*, ByRef<::Il2CppString*>, ByRef<::Il2CppString*>)>(&UnityEngine::ResourceManagement::Util::ResourceManagerConfig::ExtractKeyAndSubKey)> {
static const MethodInfo* get() {
static auto* keyObj = &::il2cpp_utils::GetClassFromName("System", "Object")->byval_arg;
static auto* mainKey = &::il2cpp_utils::GetClassFromName("System", "String")->this_arg;
static auto* subKey = &::il2cpp_utils::GetClassFromName("System", "String")->this_arg;
return ::il2cpp_utils::FindMethod(classof(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*), "ExtractKeyAndSubKey", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{keyObj, mainKey, subKey});
}
};
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::IsPathRemote
// Il2CppName: IsPathRemote
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (*)(::Il2CppString*)>(&UnityEngine::ResourceManagement::Util::ResourceManagerConfig::IsPathRemote)> {
static const MethodInfo* get() {
static auto* path = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*), "IsPathRemote", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{path});
}
};
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::ShouldPathUseWebRequest
// Il2CppName: ShouldPathUseWebRequest
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (*)(::Il2CppString*)>(&UnityEngine::ResourceManagement::Util::ResourceManagerConfig::ShouldPathUseWebRequest)> {
static const MethodInfo* get() {
static auto* path = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg;
return ::il2cpp_utils::FindMethod(classof(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*), "ShouldPathUseWebRequest", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{path});
}
};
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateArrayResult
// Il2CppName: CreateArrayResult
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Array* (*)(System::Type*, ::Array<UnityEngine::Object*>*)>(&UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateArrayResult)> {
static const MethodInfo* get() {
static auto* type = &::il2cpp_utils::GetClassFromName("System", "Type")->byval_arg;
static auto* allAssets = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("UnityEngine", "Object"), 1)->byval_arg;
return ::il2cpp_utils::FindMethod(classof(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*), "CreateArrayResult", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{type, allAssets});
}
};
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateArrayResult
// Il2CppName: CreateArrayResult
// Cannot write MetadataGetter for generic methods!
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateListResult
// Il2CppName: CreateListResult
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Collections::IList* (*)(System::Type*, ::Array<UnityEngine::Object*>*)>(&UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateListResult)> {
static const MethodInfo* get() {
static auto* type = &::il2cpp_utils::GetClassFromName("System", "Type")->byval_arg;
static auto* allAssets = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("UnityEngine", "Object"), 1)->byval_arg;
return ::il2cpp_utils::FindMethod(classof(UnityEngine::ResourceManagement::Util::ResourceManagerConfig*), "CreateListResult", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{type, allAssets});
}
};
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::CreateListResult
// Il2CppName: CreateListResult
// Cannot write MetadataGetter for generic methods!
// Writing MetadataGetter for method: UnityEngine::ResourceManagement::Util::ResourceManagerConfig::IsInstance
// Il2CppName: IsInstance
// Cannot write MetadataGetter for generic methods!
|
/// \file stream.cpp
///
/// Unit tests for stream file external interface
#include <vg/io/stream.hpp>
#include <vg/io/protobuf_iterator.hpp>
#include <vg/io/protobuf_emitter.hpp>
#include <vg/vg.pb.h>
#include "catch.hpp"
#include <sstream>
#include <iostream>
#include <unordered_map>
namespace vg {
namespace unittest {
using namespace std;
TEST_CASE("Protobuf messages that are all default can be stored and retrieved", "[stream]") {
for (auto compress : {false, true}) {
stringstream datastream;
// Write one empty message
REQUIRE(vg::io::write<Graph>(datastream, 1, [](size_t i) {
return Graph();
}, compress));
vg::io::finish(datastream, compress);
// Look for it
int seen = 0;
vg::io::for_each<Graph>(datastream, [&](const Graph& item) {
seen++;
});
// Make sure it comes back
REQUIRE(seen == 1);
}
}
TEST_CASE("Protobuf messages can be written and read back", "[stream]") {
for (auto compress : {false, true}) {
stringstream datastream;
// Define some functions to make and check fake Protobuf objects
using message_t = Position;
auto get_message = [&](size_t index) {
message_t item;
item.set_node_id(index);
#ifdef debug
cerr << "Made item " << index << endl;
#endif
return item;
};
size_t index_expected = 0;
auto check_message = [&](const message_t& item) {
#ifdef debug
cerr << "Read item " << item.node_id() << endl;
#endif
REQUIRE(item.node_id() == index_expected);
index_expected++;
};
// Serialize some objects
REQUIRE(vg::io::write<message_t>(datastream, 10, get_message, compress));
vg::io::finish(datastream, compress);
#ifdef debug
// Dump the possibly compressed data
auto data = datastream.str();
for (size_t i = 0; i < data.size(); i++) {
ios state(nullptr);
state.copyfmt(cerr);
cerr << setfill('0') << setw(2) << hex << (int)(uint8_t)data[i] << " ";
if (i % 8 == 7) {
cerr << endl;
}
cerr.copyfmt(state);
}
cerr << endl;
#endif
// Read them back
vg::io::for_each<message_t>(datastream, check_message);
}
}
TEST_CASE("Multiple write calls work correctly on the same stream", "[stream]") {
for (auto compress : {false, true}) {
stringstream datastream;
// Define some functions to make and check fake Protobuf objects
using message_t = Position;
size_t index_to_make = 0;
auto get_message = [&](size_t index) {
message_t item;
item.set_node_id(index_to_make);
index_to_make++;
return item;
};
size_t index_expected = 0;
auto check_message = [&](const message_t& item) {
REQUIRE(item.node_id() == index_expected);
index_expected++;
};
for (size_t i = 0; i < 10; i++) {
// Serialize some objects
REQUIRE(vg::io::write<message_t>(datastream, 1, get_message, compress));
}
vg::io::finish(datastream, compress);
// Read them back
vg::io::for_each<message_t>(datastream, check_message);
}
}
/// Deconstruct a virtual offset into its component parts
static pair<size_t, size_t> unvo(int64_t virtual_offset) {
pair<size_t, size_t> to_return;
to_return.first = virtual_offset >> 16;
to_return.second = virtual_offset & 0xFFFF;
return to_return;
}
TEST_CASE("ProtobufIterator can read serialized data", "[stream]") {
for (auto compress : {false, true}) {
#ifdef debug
cerr << "Compress: " << compress << endl;
#endif
stringstream datastream;
// Define some functions to make and check fake Protobuf objects
using message_t = Position;
// Keep a map so we can look up the group offset for saved items
unordered_map<size_t, int64_t> index_to_group;
size_t index_to_make = 0;
auto get_message = [&](size_t index) {
message_t item;
item.set_node_id(index_to_make);
index_to_make++;
return item;
};
for (size_t i = 0; i < 10; i++) {
// Serialize some objects (20, in groups of 2)
REQUIRE(vg::io::write<message_t>(datastream, 2, get_message, compress));
}
vg::io::finish(datastream, compress);
{
// Scan and populate the table
vg::io::ProtobufIterator<message_t> it(datastream);
size_t index_found = 0;
while (it.has_current()) {
#ifdef debug
cerr << "We wrote " << index_found << " at VO " << it.tell_group() << endl;
#endif
index_to_group[index_found] = it.tell_group();
index_found++;
++it;
}
}
// Start over
datastream = stringstream(datastream.str());
SECTION("Data can be found by seeking") {
vg::io::ProtobufIterator<message_t> it(datastream);
#ifdef debug
cerr << "Try and load from VO " << index_to_group.at(4) << endl;
#endif
// We know #4 should lead its group.
bool sought = it.seek_group(index_to_group.at(4));
REQUIRE(sought == true);
REQUIRE((*it).node_id() == 4);
}
SECTION("Data can be iterated back all in a run") {
size_t index_expected = 0;
for (vg::io::ProtobufIterator<message_t> it(datastream); it.has_current(); it.advance()) {
auto vo_parts = unvo(it.tell_group());
#ifdef debug
cerr << "Found item " << (*it).node_id() << " at VO " << it.tell_group()
<< " = " << vo_parts.first << ", " << vo_parts.second << endl;
#endif
// Each item should be the right item
REQUIRE((*it).node_id() == index_expected);
// And it should be in the right group at the right place
//REQUIRE(it.tell_group() == index_to_group.at(index_expected));
index_expected++;
}
}
}
}
TEST_CASE("We can read a tag-only GAM file with for_each_parallel", "[stream][gam][empty]") {
stringstream ss;
{
// Make an empty GAM by creating and destroying an Alignment ProtobufEmitter
vg::io::ProtobufEmitter<Alignment> empty_gam_maker(ss);
}
// Make sure it wrote something
REQUIRE(ss.str().size() != 0);
vg::io::for_each_parallel<Alignment>(ss, [&](const Alignment& observed) {
// Should never be triggered
REQUIRE(false);
});
// We should complete the test without any errors from the reader code.
}
}
}
|
/*************************************************************************/
/* aabb.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include <godot_cpp/variant/aabb.hpp>
#include <godot_cpp/core/defs.hpp>
#include <godot_cpp/variant/string.hpp>
namespace godot {
real_t AABB::get_area() const {
return size.x * size.y * size.z;
}
bool AABB::operator==(const AABB &p_rval) const {
return ((position == p_rval.position) && (size == p_rval.size));
}
bool AABB::operator!=(const AABB &p_rval) const {
return ((position != p_rval.position) || (size != p_rval.size));
}
void AABB::merge_with(const AABB &p_aabb) {
Vector3 beg_1, beg_2;
Vector3 end_1, end_2;
Vector3 min, max;
beg_1 = position;
beg_2 = p_aabb.position;
end_1 = Vector3(size.x, size.y, size.z) + beg_1;
end_2 = Vector3(p_aabb.size.x, p_aabb.size.y, p_aabb.size.z) + beg_2;
min.x = (beg_1.x < beg_2.x) ? beg_1.x : beg_2.x;
min.y = (beg_1.y < beg_2.y) ? beg_1.y : beg_2.y;
min.z = (beg_1.z < beg_2.z) ? beg_1.z : beg_2.z;
max.x = (end_1.x > end_2.x) ? end_1.x : end_2.x;
max.y = (end_1.y > end_2.y) ? end_1.y : end_2.y;
max.z = (end_1.z > end_2.z) ? end_1.z : end_2.z;
position = min;
size = max - min;
}
bool AABB::is_equal_approx(const AABB &p_aabb) const {
return position.is_equal_approx(p_aabb.position) && size.is_equal_approx(p_aabb.size);
}
AABB AABB::intersection(const AABB &p_aabb) const {
Vector3 src_min = position;
Vector3 src_max = position + size;
Vector3 dst_min = p_aabb.position;
Vector3 dst_max = p_aabb.position + p_aabb.size;
Vector3 min, max;
if (src_min.x > dst_max.x || src_max.x < dst_min.x) {
return AABB();
} else {
min.x = (src_min.x > dst_min.x) ? src_min.x : dst_min.x;
max.x = (src_max.x < dst_max.x) ? src_max.x : dst_max.x;
}
if (src_min.y > dst_max.y || src_max.y < dst_min.y) {
return AABB();
} else {
min.y = (src_min.y > dst_min.y) ? src_min.y : dst_min.y;
max.y = (src_max.y < dst_max.y) ? src_max.y : dst_max.y;
}
if (src_min.z > dst_max.z || src_max.z < dst_min.z) {
return AABB();
} else {
min.z = (src_min.z > dst_min.z) ? src_min.z : dst_min.z;
max.z = (src_max.z < dst_max.z) ? src_max.z : dst_max.z;
}
return AABB(min, max - min);
}
bool AABB::intersects_ray(const Vector3 &p_from, const Vector3 &p_dir, Vector3 *r_clip, Vector3 *r_normal) const {
Vector3 c1, c2;
Vector3 end = position + size;
real_t near = -1e20;
real_t far = 1e20;
int axis = 0;
for (int i = 0; i < 3; i++) {
if (p_dir[i] == 0) {
if ((p_from[i] < position[i]) || (p_from[i] > end[i])) {
return false;
}
} else { // ray not parallel to planes in this direction
c1[i] = (position[i] - p_from[i]) / p_dir[i];
c2[i] = (end[i] - p_from[i]) / p_dir[i];
if (c1[i] > c2[i]) {
SWAP(c1, c2);
}
if (c1[i] > near) {
near = c1[i];
axis = i;
}
if (c2[i] < far) {
far = c2[i];
}
if ((near > far) || (far < 0)) {
return false;
}
}
}
if (r_clip) {
*r_clip = c1;
}
if (r_normal) {
*r_normal = Vector3();
(*r_normal)[axis] = p_dir[axis] ? -1 : 1;
}
return true;
}
bool AABB::intersects_segment(const Vector3 &p_from, const Vector3 &p_to, Vector3 *r_clip, Vector3 *r_normal) const {
real_t min = 0, max = 1;
int axis = 0;
real_t sign = 0;
for (int i = 0; i < 3; i++) {
real_t seg_from = p_from[i];
real_t seg_to = p_to[i];
real_t box_begin = position[i];
real_t box_end = box_begin + size[i];
real_t cmin, cmax;
real_t csign;
if (seg_from < seg_to) {
if (seg_from > box_end || seg_to < box_begin) {
return false;
}
real_t length = seg_to - seg_from;
cmin = (seg_from < box_begin) ? ((box_begin - seg_from) / length) : 0;
cmax = (seg_to > box_end) ? ((box_end - seg_from) / length) : 1;
csign = -1.0;
} else {
if (seg_to > box_end || seg_from < box_begin) {
return false;
}
real_t length = seg_to - seg_from;
cmin = (seg_from > box_end) ? (box_end - seg_from) / length : 0;
cmax = (seg_to < box_begin) ? (box_begin - seg_from) / length : 1;
csign = 1.0;
}
if (cmin > min) {
min = cmin;
axis = i;
sign = csign;
}
if (cmax < max) {
max = cmax;
}
if (max < min) {
return false;
}
}
Vector3 rel = p_to - p_from;
if (r_normal) {
Vector3 normal;
normal[axis] = sign;
*r_normal = normal;
}
if (r_clip) {
*r_clip = p_from + rel * min;
}
return true;
}
bool AABB::intersects_plane(const Plane &p_plane) const {
Vector3 points[8] = {
Vector3(position.x, position.y, position.z),
Vector3(position.x, position.y, position.z + size.z),
Vector3(position.x, position.y + size.y, position.z),
Vector3(position.x, position.y + size.y, position.z + size.z),
Vector3(position.x + size.x, position.y, position.z),
Vector3(position.x + size.x, position.y, position.z + size.z),
Vector3(position.x + size.x, position.y + size.y, position.z),
Vector3(position.x + size.x, position.y + size.y, position.z + size.z),
};
bool over = false;
bool under = false;
for (int i = 0; i < 8; i++) {
if (p_plane.distance_to(points[i]) > 0) {
over = true;
} else {
under = true;
}
}
return under && over;
}
Vector3 AABB::get_longest_axis() const {
Vector3 axis(1, 0, 0);
real_t max_size = size.x;
if (size.y > max_size) {
axis = Vector3(0, 1, 0);
max_size = size.y;
}
if (size.z > max_size) {
axis = Vector3(0, 0, 1);
}
return axis;
}
int AABB::get_longest_axis_index() const {
int axis = 0;
real_t max_size = size.x;
if (size.y > max_size) {
axis = 1;
max_size = size.y;
}
if (size.z > max_size) {
axis = 2;
}
return axis;
}
Vector3 AABB::get_shortest_axis() const {
Vector3 axis(1, 0, 0);
real_t max_size = size.x;
if (size.y < max_size) {
axis = Vector3(0, 1, 0);
max_size = size.y;
}
if (size.z < max_size) {
axis = Vector3(0, 0, 1);
}
return axis;
}
int AABB::get_shortest_axis_index() const {
int axis = 0;
real_t max_size = size.x;
if (size.y < max_size) {
axis = 1;
max_size = size.y;
}
if (size.z < max_size) {
axis = 2;
}
return axis;
}
AABB AABB::merge(const AABB &p_with) const {
AABB aabb = *this;
aabb.merge_with(p_with);
return aabb;
}
AABB AABB::expand(const Vector3 &p_vector) const {
AABB aabb = *this;
aabb.expand_to(p_vector);
return aabb;
}
AABB AABB::grow(real_t p_by) const {
AABB aabb = *this;
aabb.grow_by(p_by);
return aabb;
}
void AABB::get_edge(int p_edge, Vector3 &r_from, Vector3 &r_to) const {
ERR_FAIL_INDEX(p_edge, 12);
switch (p_edge) {
case 0: {
r_from = Vector3(position.x + size.x, position.y, position.z);
r_to = Vector3(position.x, position.y, position.z);
} break;
case 1: {
r_from = Vector3(position.x + size.x, position.y, position.z + size.z);
r_to = Vector3(position.x + size.x, position.y, position.z);
} break;
case 2: {
r_from = Vector3(position.x, position.y, position.z + size.z);
r_to = Vector3(position.x + size.x, position.y, position.z + size.z);
} break;
case 3: {
r_from = Vector3(position.x, position.y, position.z);
r_to = Vector3(position.x, position.y, position.z + size.z);
} break;
case 4: {
r_from = Vector3(position.x, position.y + size.y, position.z);
r_to = Vector3(position.x + size.x, position.y + size.y, position.z);
} break;
case 5: {
r_from = Vector3(position.x + size.x, position.y + size.y, position.z);
r_to = Vector3(position.x + size.x, position.y + size.y, position.z + size.z);
} break;
case 6: {
r_from = Vector3(position.x + size.x, position.y + size.y, position.z + size.z);
r_to = Vector3(position.x, position.y + size.y, position.z + size.z);
} break;
case 7: {
r_from = Vector3(position.x, position.y + size.y, position.z + size.z);
r_to = Vector3(position.x, position.y + size.y, position.z);
} break;
case 8: {
r_from = Vector3(position.x, position.y, position.z + size.z);
r_to = Vector3(position.x, position.y + size.y, position.z + size.z);
} break;
case 9: {
r_from = Vector3(position.x, position.y, position.z);
r_to = Vector3(position.x, position.y + size.y, position.z);
} break;
case 10: {
r_from = Vector3(position.x + size.x, position.y, position.z);
r_to = Vector3(position.x + size.x, position.y + size.y, position.z);
} break;
case 11: {
r_from = Vector3(position.x + size.x, position.y, position.z + size.z);
r_to = Vector3(position.x + size.x, position.y + size.y, position.z + size.z);
} break;
}
}
AABB::operator String() const {
return position.operator String() + " - " + size.operator String();
}
} // namespace godot
|
/**
* Copyright (c) 2017-present, Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "logdevice/common/settings/Settings.h"
#include <cctype>
#include <limits>
#include <utility>
#include <zstd.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/program_options.hpp>
#include <boost/thread/thread.hpp>
#include <folly/Format.h>
#include <folly/String.h>
#include "logdevice/common/SnapshotStoreTypes.h"
#include "logdevice/common/Sockaddr.h"
#include "logdevice/common/commandline_util_chrono.h"
#include "logdevice/common/debug.h"
#include "logdevice/common/protocol/Compatibility.h"
#include "logdevice/common/protocol/MessageTypeNames.h"
#include "logdevice/common/settings/Validators.h"
using namespace facebook::logdevice::setting_validators;
namespace facebook { namespace logdevice {
static int parse_num_workers(const std::string& value) {
// Following formats are allowed:
// 1. Number, e.g. "16"
// 2. Number of physical cores, "ncores" or "cores"
// 3. Number of physical cores and multiplicator, e.g. "ncores*1.4"
std::string num_workers = value;
int workers;
double multiplicator = 1.0;
std::size_t multiplier = value.find("*");
if (multiplier != std::string::npos) {
num_workers = value.substr(0, multiplier);
std::string right_operand = value.substr(multiplier + 1);
if (num_workers != "cores" && num_workers != "ncores") {
throw boost::program_options::error(
std::string("Invalid value for --num-workers. Left operand must be "
"cores or ncores. Example: ncores*1.4"));
}
try {
multiplicator = std::stod(right_operand);
} catch (const std::invalid_argument& ex) {
throw boost::program_options::error(
std::string("Invalid multiplicator for --num-workers."));
}
if (multiplicator <= 0) {
throw boost::program_options::error(
std::string("Multiplicator for --num-workers must be > 0."));
}
}
if (num_workers == "cores" || num_workers == "ncores") {
workers = boost::thread::physical_concurrency();
} else {
try {
workers = std::stol(num_workers, nullptr, 10);
} catch (const std::logic_error& ex) {
throw boost::program_options::error(
std::string("Invalid value for --num-workers."));
}
validate_range<int>(1, MAX_WORKERS)("num-workers", workers);
}
return std::max(std::min<int>(workers * multiplicator, MAX_WORKERS), 1);
}
static std::pair<int64_t, int64_t>
parse_test_timestamp_linear_tranform(const std::string& value) {
std::vector<std::string> test_timestamp_linear_tranformation;
folly::split(",", value, test_timestamp_linear_tranformation, true);
if (test_timestamp_linear_tranformation.size() != 2) {
throw boost::program_options::error(
std::string("Invalid value for test-timestamp-linear-tranform."));
}
try {
return std::make_pair<int64_t, int64_t>(
std::stoll(test_timestamp_linear_tranformation[0], nullptr, 10),
std::stoll(test_timestamp_linear_tranformation[1], nullptr, 10));
} catch (const std::logic_error& ex) {
throw boost::program_options::error(
std::string("Invalid value for test-timestamp-linear-tranform."));
}
}
static std::vector<node_index_t>
parse_recipients_list(const std::string& value) {
std::vector<std::string> recipients_tmp;
std::vector<node_index_t> recipients;
folly::split(",", value, recipients_tmp, true);
try {
for (const auto& nid : recipients_tmp) {
recipients.push_back(std::stoi(nid, nullptr, 10));
}
} catch (const std::logic_error& ex) {
throw boost::program_options::error(
std::string("Invalid node ID in recipients list."));
}
return recipients;
}
static std::unordered_set<logid_t> parse_log_set(const std::string& value) {
std::unordered_set<logid_t> res;
std::vector<std::string> logs_tmp;
folly::split(",", value, logs_tmp, true);
try {
for (const auto& str : logs_tmp) {
auto rv = res.insert(logid_t(std::stoull(str)));
if (!rv.second) {
throw boost::program_options::error(
std::string("Duplicate Log ID in the list."));
}
}
} catch (const std::logic_error& ex) {
throw boost::program_options::error(
std::string("Invalid Log ID in the list."));
}
return res;
}
static Status validate_reject_hello(const std::string& value) {
if (value == "ACCESS") {
return E::ACCESS;
} else if (value == "PROTONOSUPPORT") {
return E::PROTONOSUPPORT;
} else if (value == "INVALID_CLUSTER") {
return E::INVALID_CLUSTER;
} else if (value == "DESTINATION_MISMATCH") {
return E::DESTINATION_MISMATCH;
} else if (value == "OK") { // default
return E::OK; // do not reject
} else {
throw boost::program_options::error(
"Invalid value for --test-reject-hello. "
"Expected ACCESS, PROTONOSUPPORT, DESTINATION_MISMATCH, "
"or INVALID_CLUSTER. Got " +
value);
}
}
static std::unordered_set<MessageType>
parse_message_types(const std::string& val) {
std::unordered_set<MessageType> res;
if (val == "all") {
auto types = messageTypeNames().allValidKeys();
res.insert(types.begin(), types.end());
return res;
}
std::vector<std::string> tokens;
folly::split(",", val, tokens, true);
bool inverse = false;
if (!tokens.empty() && !tokens[0].empty() && tokens[0][0] == '~') {
inverse = true;
tokens[0].erase(tokens[0].begin());
}
for (const auto& str : tokens) {
MessageType type = messageTypeNames().reverseLookup(str);
if (type == MessageType::INVALID) {
throw boost::program_options::error(
std::string("Invalid message type in the list (\"" + str + "\")."));
}
auto rv = res.insert(type);
if (!rv.second) {
throw boost::program_options::error(
std::string("Duplicate message type in the list (\"" + str + "\")."));
}
}
if (inverse) {
auto exclude = std::move(res);
auto types = messageTypeNames().allValidKeys();
res = std::unordered_set<MessageType>(types.begin(), types.end());
for (MessageType t : exclude) {
res.erase(t);
}
}
return res;
}
static SockaddrSet parse_sockaddrs(const std::string& val) {
std::unordered_set<Sockaddr, Sockaddr::Hash> elements;
bool anonymous_unix_socket_present = false;
std::vector<std::string> tokens;
folly::split(",", val, tokens, true);
for (const auto& str : tokens) {
folly::SocketAddress tmp;
try {
if (boost::starts_with(str, "unix://")) {
if (str == "unix://") {
anonymous_unix_socket_present = true;
continue;
}
tmp.setFromPath(str.substr(7));
} else {
try {
tmp.setFromIpPort(str);
} catch (std::exception& e) {
tmp.setFromIpPort(str + ":0");
}
}
} catch (std::invalid_argument& e) {
throw boost::program_options::error(
std::string("Invalid socket address \"" + str + "\""));
} catch (std::exception& e) {
throw boost::program_options::error(
std::string("Couldn't parse address\"" + str + "\": " + e.what()));
}
elements.insert(Sockaddr(std::move(tmp)));
}
return {elements, anonymous_unix_socket_present};
}
dbg::Level parse_log_level(const std::string& val) {
const auto level = dbg::tryParseLoglevel(val.c_str());
if (!level.has_value()) {
std::array<char, 1024> buf;
snprintf(buf.data(),
buf.size(),
"Invalid value for --loglevel: %s. "
"Expected one of: critical, error, warning, notify, "
"info, debug, spew, none",
val.c_str());
throw boost::program_options::error(std::string(buf.data()));
}
return level.value();
}
dbg::Colored parse_log_colored(const std::string& val) {
const auto colored = dbg::tryParseLogColored(val.c_str());
if (!colored.has_value()) {
std::array<char, 1024> buf;
snprintf(buf.data(),
buf.size(),
"Invalid value for --logcolored: %s. "
"Expected one of: always, auto, nerver, none",
val.c_str());
throw boost::program_options::error(std::string(buf.data()));
}
return colored.value();
}
static int parse_scd_copyset_reordering(const std::string& val) {
if (val == "none") {
return 0;
} else if (val == "hash-shuffle") {
return 1;
} else if (val == "hash-shuffle-client-seed") {
return 2;
} else {
std::array<char, 1024> buf;
snprintf(buf.data(),
buf.size(),
"Invalid value for --scd-copyset-ordering-max: %s. "
"Expected one of: none, hash-shuffle, hash-shuffle-client-seed",
val.c_str());
throw boost::program_options::error(std::string(buf.data()));
}
}
std::istream& operator>>(std::istream& in, NodeLocationScope& val) {
std::string key;
in >> key;
std::transform(key.begin(), key.end(), key.begin(), ::toupper);
NodeLocationScope e;
if (key == "NONE") {
e = NodeLocationScope::ROOT;
} else {
e = NodeLocation::scopeNames().reverseLookup(key);
}
if (e == NodeLocationScope::INVALID) {
in.setstate(std::ios::failbit);
throw boost::program_options::error(
"Invalid location scope name. Expected one of: " +
rangeToString(NodeLocation::scopeNames().begin(),
NodeLocation::scopeNames().end()) +
", NONE");
}
val = e;
return in;
}
std::istream& operator>>(std::istream& in, Status& val) {
std::string token;
in >> token;
Status status = errorStrings().reverseLookup<std::string>(
token,
[](const std::string& s, const ErrorCodeInfo& e) { return s == e.name; });
if (status == E::UNKNOWN) {
in.setstate(std::ios::failbit);
throw boost::program_options::error("Invalid status: " + token);
}
val = status;
return in;
}
std::istream& operator>>(std::istream& in, Durability& val) {
std::string token;
in >> token;
std::transform(token.begin(), token.end(), token.begin(), ::toupper);
Durability durability = durabilityStrings().reverseLookup(token);
if (durability == Durability::INVALID || durability == Durability::ALL) {
in.setstate(std::ios::failbit);
throw boost::program_options::error("Invalid Durability setting: " + token);
}
val = durability;
return in;
}
folly::Optional<std::chrono::milliseconds>
parse_optional_chrono_option(const std::string& value) {
folly::Optional<std::chrono::milliseconds> result;
if (value == "") {
return result;
}
std::chrono::milliseconds parsed_duration_value;
if (parse_chrono_string(value, &parsed_duration_value) != 0) {
throw boost::program_options::error("Invalid timeout value: " + value);
}
result.assign(parsed_duration_value);
return result;
};
decltype(auto)
parse_time_threshold_per_monitoring_tag(const std::string& value) {
folly::F14FastMap<std::string, std::chrono::milliseconds> res;
std::vector<std::string> tokens;
folly::split(',', value, tokens, true);
for (const auto& token : tokens) {
std::string monitoring_tag;
std::string time_threshold_str;
if (!folly::split(':', token, monitoring_tag, time_threshold_str)) {
throw boost::program_options::error(
"Invalid monitoring tag / time threshold pair: " + token);
}
std::chrono::milliseconds time_threshold;
if (parse_chrono_string(time_threshold_str, &time_threshold) != 0) {
throw boost::program_options::error("Invalid time threhold: " +
time_threshold_str);
}
res[monitoring_tag] = time_threshold;
}
return res;
}
Compression parse_compression(const std::string& value) {
Compression compression;
auto rv = parseCompression(value.c_str(), &compression);
if (rv == -1) {
throw boost::program_options::error("Invalid compression value: " + value);
}
return compression;
}
std::unordered_map<ShardID, AuthoritativeStatus>
parse_authoritative_status_overrides(const std::string& value) {
std::unordered_map<ShardID, AuthoritativeStatus> res;
std::vector<std::string> tokens;
folly::split(',', value, tokens, true);
for (const std::string& tok : tokens) {
bool ok = false;
do { // while (false)
// tok format: "N7:S2-5:UNDERREPLICATION" or "N7:S2:UNDERREPLICATION"
std::string node_str;
std::string shard_str;
std::string status_str;
if (!folly::split(':', tok, node_str, shard_str, status_str)) {
break;
}
AuthoritativeStatus status;
if (node_str.empty() || shard_str.empty() || node_str[0] != 'N' ||
shard_str[0] != 'S' ||
!parseAuthoritativeStatus(status_str, status)) {
break;
}
node_str.erase(node_str.begin());
shard_str.erase(shard_str.begin());
try {
std::vector<std::string> shard_tok;
folly::split('-', shard_str, shard_tok);
if (shard_tok.empty() || shard_tok.size() > 2) {
break;
}
int min_shard = std::stoi(shard_tok[0]);
int max_shard =
shard_tok.size() == 2 ? std::stoi(shard_tok[1]) : min_shard;
if (max_shard < min_shard || min_shard < 0 || max_shard >= MAX_SHARDS) {
break;
}
int node = std::stoi(node_str);
if (node < 0 || node > (int)std::numeric_limits<shard_index_t>::max()) {
break;
}
for (int shard = min_shard; shard <= max_shard; ++shard) {
ShardID shard_id((node_index_t)node, (shard_index_t)shard);
auto ins = res.emplace(shard_id, status);
if (!ins.second) {
throw boost::program_options::error(
"Duplicate authoritative status override for " +
shard_id.toString());
}
}
ok = true;
} catch (const std::logic_error&) {
break;
}
} while (false);
if (!ok) {
throw boost::program_options::error(
"Invalid authoritative status override: " + tok +
"; expected N<number>:S<number>[-<number>]:<status>, where status is "
"one of: " +
toString(allAuthoritativeStatusStrings()));
}
}
return res;
}
static SnapshotStoreType validate_rsm_snapshot_store(const std::string& value) {
if (value == "legacy") {
return SnapshotStoreType::LEGACY;
} else if (value == "log") {
return SnapshotStoreType::LOG;
} else if (value == "message") {
return SnapshotStoreType::MESSAGE;
} else if (value == "local-store") {
return SnapshotStoreType::LOCAL_STORE;
} else {
throw boost::program_options::error(
"Invalid value for snapshot store: " + value +
". Expected one of 'legacy', 'log', 'message', 'local-store'.");
}
}
void Settings::defineSettings(SettingEasyInit& init) {
using namespace SettingFlag;
init("server",
&server,
"false",
nullptr, // no validation
"if true, the Processor with this Settings object is running in a "
"LogDevice server. If false, it's in a LogDeviceClient. This isn't set "
"by "
"any parsed setting, but is only set directly by servers",
INTERNAL_ONLY);
init("bootstrapping",
&bootstrapping,
"false",
nullptr, // no validation
"if true, the Processor with this Settings object is only used to "
"perform bootstrapping and will be destroyed after bootstrapping is "
"completed. This isn't set by any parsed setting, but is only set "
"directly internally",
INTERNAL_ONLY);
init(
"max-incoming-connections",
&max_incoming_connections,
std::to_string(std::numeric_limits<ssize_t>::max()).c_str(),
parse_positive<ssize_t>(),
"(server-only setting) Maximum number number of incoming connections "
"this "
"server will accept. This is normally not set directly, but derived from "
"other settings (such as the fd limit).",
INTERNAL_ONLY);
init(
"connection-backlog",
&connection_backlog,
"2000",
parse_positive<ssize_t>(),
"(server-only setting) Maximum number of incoming connections that have "
"been accepted by listener (have an open FD) but have not been processed "
"by workers (made logdevice protocol handshake).",
SERVER,
SettingsCategory::Network);
init("max-external-connections",
&max_external_connections,
std::to_string(std::numeric_limits<ssize_t>::max()).c_str(),
parse_positive<ssize_t>(),
"(server-only setting) Maximum number of established incoming "
"connections, coming from outside of the cluster, with handshake "
"completed. Usually calculated from other settings.",
INTERNAL_ONLY,
SettingsCategory::Network);
init("num-workers",
&num_workers,
"cores",
parse_num_workers,
"number of worker threads to run, or \"cores\" for one thread "
"per CPU core",
SERVER | CLIENT | REQUIRES_RESTART /* used in Processor ctor */,
SettingsCategory::Execution);
init("msg-error-injection-chance",
&message_error_injection_chance_percent,
"0",
validate_range<double>(0, 100),
"percentage chance of a forced message error on a Socket. "
"Used to exercise error handling paths.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("msg-error-injection-status",
&message_error_injection_status,
"NOBUFS",
nullptr, // no validation
"Status that should be returned for a simulated message transmission "
"error. Some values are treated in special ways: CBREGISTERED pretends "
"that the message was delayed by traffic shaping (only if traffic "
"shaping is enabled); DROPPED makes Sender to pretend that the message "
"is in-flight indefinitely, without ever sending it.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Testing);
init("disable-trace-logger",
&trace_logger_disabled,
"false",
nullptr, // no validation
"If disabled, NoopTraceLogger will be used,"
" otherwise FBTraceLogger is used",
SERVER | CLIENT | REQUIRES_RESTART /* init'ed at startup */,
SettingsCategory::Monitoring);
init("outbytes-mb",
&outbufs_mb_max_per_thread,
"512",
parse_positive<ssize_t>(),
"per-thread limit on bytes pending in output evbuffers (in MB)",
SERVER | CLIENT,
SettingsCategory::Network);
init("sendbuf-kb",
&tcp_sendbuf_kb,
"-1",
parse_validate_lower_bound<ssize_t>(-1),
"TCP socket sendbuf size in KB. Changing this setting on-the-fly will "
"not "
"apply it to existing sockets, only to newly created ones",
SERVER | CLIENT,
SettingsCategory::Network);
init(
"rcvbuf-kb",
&tcp_rcvbuf_kb,
"-1",
parse_validate_lower_bound<ssize_t>(-1),
"TCP socket rcvbuf size in KB. Changing this setting on-the-fly will not "
"apply it to existing sockets, only to newly created ones",
SERVER | CLIENT,
SettingsCategory::Network);
init(
"nagle",
&nagle,
"false",
nullptr, // no validation
"enable Nagle's algorithm on TCP sockets. Changing this setting "
"on-the-fly will not apply it to existing sockets, only to newly created "
"ones",
SERVER | CLIENT,
SettingsCategory::Network);
init(
"outbuf-kb",
&outbuf_overflow_kb,
"32768",
parse_positive<ssize_t>(),
"max output buffer size (userspace extension of socket sendbuf) in KB. "
"Changing this setting on-the-fly will not apply it to existing sockets, "
"only to newly created ones", // TODO (t13429319): fix this
SERVER | CLIENT,
SettingsCategory::Network);
init("outbufs-limit-per-peer-type",
&outbufs_limit_per_peer_type_enabled,
"true",
nullptr, // no validation
"If enabled, the outbytes-mb limit is split in half between "
"client-to-server and server-to-server connections. If disabled, the "
"limit is shared; in particular, a few misbehaving clients may cause "
"the server to use up all its outbytes-mb and become unable to send to "
"other servers.",
SERVER,
SettingsCategory::Network);
init("outbuf-socket-min-kb",
&outbuf_socket_min_kb,
"1",
parse_positive<ssize_t>(),
"Minimum outstanding bytes per socket in kb. Global sender's budget "
"will be ignored if socket's outstanding bytes is less than this value. "
"Changing this setting on-the-fly will not apply it to existing "
"sockets, only to newly created ones", // TODO (t13429319): fix this
SERVER,
SettingsCategory::Network);
init("output-max-records-kb",
&output_max_records_kb,
"1024",
parse_validate_lower_bound<ssize_t>(-1),
"amount of RECORD data to push to the client at once",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("max-time-to-allow-socket-drain",
&max_time_to_allow_socket_drain,
"3min",
validate_positive<ssize_t>(),
"If a socket does not drain a complete message for "
"max-time-to-allow-socket-drain. Then the socket is closed.",
SERVER | CLIENT,
SettingsCategory::Network);
init("socket-idle-threshold",
&socket_idle_threshold,
"1000000",
validate_positive<ssize_t>(),
"A socket is considered idle if number of bytes pending in the socket "
"is below or equal to this threshold. This is used along with "
"min_socket_idle_threshold_percent to find active socket and select "
"them for health check. Check socket-health-check-period for more "
"details.",
SERVER | CLIENT,
SettingsCategory::Network);
init("min-socket-idle-threshold-percent",
&min_socket_idle_threshold_percent,
"50",
validate_positive<ssize_t>(),
"A socket is considered active if it had bytes pending in the socket "
"above socket-idle-threshold for greater than "
"min-socket-idle-threshold-percent of socket-health-check-period.",
SERVER | CLIENT,
SettingsCategory::Network);
init("min-bytes-to-drain-per-second",
&min_bytes_to_drain_per_second,
"1000000",
validate_positive<ssize_t>(),
"Refer socket-health-check-period for details.",
SERVER | CLIENT,
SettingsCategory::Network);
init("socket-health-check-period",
&socket_health_check_period,
"1min",
validate_positive<ssize_t>(),
"Time between consecutive socket health check. Every "
"socket-health-check-period, a socket is closed, if it was not draining "
"for max-time-to-allow-socket-drain or it was active but the throughput "
"during the time it was active dropped below"
"min-bytes-to-drain-per-second due to network congestion.",
SERVER | CLIENT,
SettingsCategory::Network);
init("rate-limit-socket-closed",
&rate_limit_socket_closed,
"1",
validate_positive<ssize_t>(),
"Max number of sockets closed in a socket health check period.",
SERVER | CLIENT,
SettingsCategory::Network);
init("idle-connection-keep-alive",
&idle_connection_keep_alive,
"5min",
validate_positive<ssize_t>(),
"How long inactive client-to-server connection will stay open before "
"being shut down automatically.",
CLIENT,
SettingsCategory::Network);
init("rate-limit-idle-connection-closed",
&rate_limit_idle_connection_closed,
"10",
validate_nonnegative<ssize_t>(),
"Max number of idle connections closed in single round of socket healh "
"check. Set to 0 to disable closing of idle connections completely.",
CLIENT,
SettingsCategory::Network);
init("max-cached-digest-record-queued-kb",
&max_cached_digest_record_queued_kb,
"256",
parse_positive<ssize_t>(),
"amount of RECORD data to push to the client at once for cached "
"digesting",
SERVER | REQUIRES_RESTART /* used in Worker ctor */,
SettingsCategory::Recovery);
init(
"max-active-cached-digests",
&max_active_cached_digests,
"2000",
parse_positive<ssize_t>(),
"maximum number of active cached digest streams on a storage node at the "
"same time",
SERVER | REQUIRES_RESTART /* set at startup */,
SettingsCategory::Recovery);
init("max-record-bytes-read-at-once",
&max_record_bytes_read_at_once,
"1048576", // 1MB
parse_positive<ssize_t>(),
"amount of RECORD data to read from local log store at once",
SERVER,
SettingsCategory::ReadPath);
init("max-record-read-execution-time",
&max_record_read_execution_time,
"1s",
validate_positive<ssize_t>(),
"Maximum execution time for reading records. 'max' means no limit.",
SERVER | EXPERIMENTAL,
SettingsCategory::ResourceManagement);
init("read-requests",
&requests_from_pipe,
"128",
parse_positive<ssize_t>(),
"deprecated, to be removed",
SERVER | CLIENT | DEPRECATED);
init("execute-requests",
&hi_requests_per_iteration,
"13",
parse_positive<ssize_t>(),
"number of HI_PRI requests to process per worker event loop iteration",
SERVER | CLIENT,
SettingsCategory::Execution);
init("mid_requests_per_iteration",
&mid_requests_per_iteration,
"2",
parse_positive<ssize_t>(),
"number of MID_PRI requests to process per worker event loop iteration",
SERVER | CLIENT,
SettingsCategory::Execution);
init("lo_requests_per_iteration",
&lo_requests_per_iteration,
"1",
parse_positive<ssize_t>(),
"number of LO_PRI requests to process per worker event loop iteration",
SERVER | CLIENT,
SettingsCategory::Execution);
init("worker-request-pipe-capacity",
&worker_request_pipe_capacity,
"524288",
parse_positive<ssize_t>(),
"size each worker request queue to hold this many requests",
SERVER | CLIENT | REQUIRES_RESTART /* sized at startup. This is tech
debt as an MPSCQ-based request queue can be resized at any time. */
,
SettingsCategory::Execution);
init("prioritized-task-execution",
&enable_executor_priority_queues,
"true",
nullptr,
"Enable prioritized execution of requests within CPU executor. Setting "
"this false ignores per request and per message ExecutorPriority.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Execution);
init("use-legacy-eventbase",
&use_legacy_eventbase,
"false",
nullptr,
"Use libevent2 based event base to create EventLoop threadpool in "
"logdevice. DEPRECATED as libevent2 is being removed from codebase",
SERVER | CLIENT | REQUIRES_RESTART | DEPRECATED,
SettingsCategory::Execution);
init("request-exec-threshold",
&request_execution_delay_threshold,
"10ms",
validate_positive<ssize_t>(),
"Request Execution time beyond which it is considered slow, "
"and 'worker_slow_requests' stat is bumped",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("slow-background-task-threshold",
&slow_background_task_threshold,
"100ms",
validate_positive<ssize_t>(),
"Background task execution time beyond which it is considered slow, "
"and we log it",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("flow-groups-run-yield-interval",
&flow_groups_run_yield_interval,
"2ms",
validate_positive<ssize_t>(),
"Maximum duration of Sender::runFlowGroups() before yielding to the "
"event loop.",
SERVER,
SettingsCategory::ResourceManagement);
init("flow-groups-run-deadline",
&flow_groups_run_deadline,
"5ms",
validate_positive<ssize_t>(),
"Maximum delay (plus one cycle of the event loop) between "
"a request to run FlowGroups and Sender::runFlowGroups() executing.",
SERVER,
SettingsCategory::ResourceManagement);
init("read-messages",
&incoming_messages_max_per_socket,
"128",
parse_positive<ssize_t>(),
"read up to this many incoming messages before returning to libevent",
SERVER | CLIENT,
SettingsCategory::Network);
init("incoming-messages-max-bytes-limit",
&incoming_messages_max_bytes_limit,
"524288000",
parse_positive<ssize_t>(),
"maximum byte limit of unprocessed messages within the system.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Network);
init("max-inflight-storage-tasks",
&max_inflight_storage_tasks,
"4096",
parse_validate_lower_bound<ssize_t>(2),
"max number of StorageTask instances that one worker thread may "
"have in flight to each database shard",
SERVER | REQUIRES_RESTART /* queues sized at startup */,
SettingsCategory::ResourceManagement);
init("max-concurrent-purging-for-release-per-shard",
&max_concurrent_purging_for_release_per_shard,
"4",
parse_validate_lower_bound<ssize_t>(2),
"max number of concurrently running purging state machines for RELEASE "
"messages per each storage shard for each worker",
SERVER | REQUIRES_RESTART /* used in PurgeScheduler ctor */,
SettingsCategory::Recovery);
init("enable-record-cache",
&enable_record_cache,
"true",
nullptr, // no validation
"Enable caching of unclean records on storage nodes. Used to minimize "
"local log store access during log recovery.",
SERVER | REQUIRES_RESTART /* used in LogStorageStateMap ctor*/,
SettingsCategory::Recovery);
init("record-cache-max-size",
&record_cache_max_size,
"4294967296", // 4GB
parse_nonnegative<ssize_t>(),
"Maximum size enforced for the record cache, 0 for unlimited. If "
"positive "
"and record cache size grows more than that, it will start evicting "
"records from the cache. This is also the maximum total number of bytes "
"allowed to be persisted in record cache snapshots. For snapshot limit, "
"this is enforced per-shard with each shard having its own limit of "
"(max_record_cache_snapshot_bytes / num_shards).",
SERVER,
SettingsCategory::Recovery);
init("record-cache-monitor-interval",
&record_cache_monitor_interval,
// use 2s by default since we can only receive 2.5GB in 2 sec over a
// 10Gbps link
"2s",
validate_positive<ssize_t>(),
"polling interval for the record cache eviction thread for monitoring "
"the "
"size of the record cache.",
SERVER,
SettingsCategory::Recovery);
init("abort-on-failed-check",
&abort_on_failed_check,
"true", // ClientSettingsImpl overrides this default value
nullptr, // no validation
"When an ld_check() fails, call abort(). If not, just continue "
"executing. We'll log either way.",
SERVER | CLIENT,
SettingsCategory::Testing,
"`false` in the client, `true` elsewhere");
init("abort-on-failed-catch",
&abort_on_failed_catch,
folly::kIsDebug ? "true" : "false",
nullptr, // no validation
"When an ld_catch() fails, call abort(). If not, just continue "
"executing. We'll log either way.",
SERVER | CLIENT,
SettingsCategory::Testing,
"`true` in debug builds, `false` in release builds");
init("watchdog-poll-interval",
&watchdog_poll_interval_ms,
"5000ms",
validate_positive<ssize_t>(),
"Interval after which watchdog detects stuck workers",
SERVER | CLIENT | REQUIRES_RESTART /* used in Processor ctor */,
SettingsCategory::Monitoring);
init("watchdog-abort-on-stall",
&watchdog_abort_on_stall,
"false",
nullptr, // no validation
"Should we abort logdeviced if watchdog detected stalled workers.",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("watchdog-print-bt-on-stall",
&watchdog_print_bt_on_stall,
"true",
nullptr, // no validation
"Should we print backtrace of stalled workers.",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init(
"watchdog-bt-ratelimit",
&watchdog_bt_ratelimit,
"10/120s",
[](const std::string& val) -> rate_limit_t {
rate_limit_t res;
int rv = parse_rate_limit(val.c_str(), &res);
if (rv != 0) {
throw boost::program_options::error(
"Invalid value(" + val +
") for --watchdog-bt-ratelimit."
"Expected format is <count>/<duration><unit>, e.g. 1/1s");
}
return res;
},
"Maximum allowed rate of printing backtraces.",
SERVER | CLIENT | REQUIRES_RESTART /* Passed to WatchDogThread ctor */,
SettingsCategory::Monitoring);
init("maximum-percent-unhealthy-seq-nodes-hbsp",
&maximum_percent_unhealthy_seq_nodes,
"0.5",
validate_range<double>(0, 1.0),
"Percent of UNHEALTHY nodes in the cluster at which HealthBasedHashing "
"is no longer a viable option. This value MUST be the same on client "
"and server to ensure correct conditions for health based sequencer "
"placement.",
SERVER | CLIENT /* used in ClusterState */,
SettingsCategory::Sequencer);
init("enable-health-based-sequencer-placement",
&enable_health_based_sequencer_placement,
"false",
nullptr,
"Toggle use of HealthMonitor determined node status in sequencer "
"location. This value MUST be the same on client and server to ensure "
"correct conditions for health based sequencer placement.",
SERVER | CLIENT /* used in HashBasedSequencerLocator */,
SettingsCategory::Sequencer);
init("enable-health-monitor",
&enable_health_monitor,
"true",
nullptr,
"Toggle use of HealthMonitor to determine node status on server-side.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-poll-interval",
&health_monitor_poll_interval_ms,
"500ms",
validate_positive<ssize_t>(),
"Interval after which health monitor detects issues on node.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-max-delay",
&health_monitor_max_delay,
"50ms",
validate_positive<ssize_t>(),
"Maximum tolerated delay inbetween health monitor loops.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init(
"health-monitor-max-queue-stalls-avg",
&health_monitor_max_queue_stalls_avg_ms,
"100ms",
validate_positive<ssize_t>(),
"Maximum average of queue stalls in health-monitor-poll-interval-ms that "
"are not counted towards overload even if their sum exceeds "
"health-monitor-max_queue-stall-duration.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-max-queue-stall-duration",
&health_monitor_max_queue_stall_duration_ms,
"200ms",
validate_positive<ssize_t>(),
"Value of summed queue stalls over a period of "
"health-monitor-poll-interval-ms that if generated by less than "
"health-monitor-max-queue-stalls queued requests results in worker "
"being counted as overloaded by health monitor ",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-max-overloaded-worker-percentage",
&health_monitor_max_overloaded_worker_percentage,
"0.3",
validate_range<double>(0, 1.1),
"Maximum tolarable percent of HM detected overloaded workers. If the "
"percentage of overloaded workers rises above this value the whole node "
"transitions into an overloaded state in the HM. Setting this "
"percentage to a value greater than 1.0 ensures that overloaded workers "
"are excluded from HM decision-making.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-max-stalls-avg",
&health_monitor_max_stalls_avg_ms,
"90ms",
validate_positive<ssize_t>(),
"Maximum average of worker stalls in health-monitor-poll-interval-ms "
"that are not counted towards UNHEALTHY.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("health-monitor-max-stalled-worker-percentage",
&health_monitor_max_stalled_worker_percentage,
"0.2",
validate_range<double>(0, 1.1),
"Maximum tolarable percent of HM detected stalled workers. If the "
"percentage of stalled workers rises above this value the whole node "
"transitions into an UNDEALTHY state in the HM. Setting this percentage "
"to a value greater than 1.0 ensures that stalled workers are excluded "
"from HM decision-making.",
SERVER | REQUIRES_RESTART /* used in ServerProcessor init */,
SettingsCategory::Monitoring);
init("worker-stall-error-injection-chance",
&worker_stall_error_injection_chance,
"0",
validate_range<double>(0, 100),
"Percentage chance of delayed request execution in a worker thread. "
"Used to exercise error handling paths in Health Monitor.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("worker-queue-stall-error-injection-chance",
&worker_queue_stall_error_injection_chance,
"0",
validate_range<double>(0, 100),
"Percentage chance of delayed request queuing in a worker thread. "
"Used to exercise error handling paths in Health Monitor.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("watchdog-detected-worker-stall-error-injection-chance",
&watchdog_detected_worker_stall_error_injection_chance,
"0",
validate_range<double>(0, 100),
"Percentage chance of detection of stalled workers in watchdog thread. "
"Used to exercise error handling paths in Health Monitor.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("block-logsconfig-rsm",
&block_logsconfig_rsm,
"false",
nullptr,
"If true, the LogsConfig replicated state machine will not publish any "
"state updates. This simulates the case where we cannot finish loading "
"the state on startup. Changing the value will cause the RSM to publish "
"the state immediately if it can.",
SERVER | CLIENT,
SettingsCategory::Testing);
init("block-eventlog-rsm",
&block_eventlog_rsm,
"false",
nullptr,
"If true, the EventLog replicated state machine will not publish any "
"state updates. This simulates the case where we cannot finish loading "
"the state on startup. Changing the value will cause the RSM to publish "
"the state immediately if it can.",
SERVER,
SettingsCategory::Testing);
init("purging-use-metadata-log-only",
&purging_use_metadata_log_only,
"false",
nullptr,
"If true, the NodeSetFinder within PurgeUncleanEpochs will use "
"only the metadata log as source for fetching historical metadata."
"used only for migration",
SERVER | DEPRECATED,
SettingsCategory::Recovery);
init(
"send-to-gossip-port",
&send_to_gossip_port,
"true",
nullptr, // no validation
"Send gossip messages to destination's gossip port (if one is specified) "
"instead of data port. This is the default. Sending gossips to data port "
"may increase gossipping delays and adversely affect the accuracy of "
"failure detection.",
SERVER | DEPRECATED,
SettingsCategory::FailureDetector);
init("ssl-on-gossip-port",
&ssl_on_gossip_port,
"false",
nullptr,
"If true, gossip port will reject all plaintext connections. Only SSL "
"connections will be accepted. WARNING: Any change to this setting "
"should only be performed while send-to-gossip-port = false, in order "
"to avoid failure detection issues while the setting change propagates "
"through the cluster.",
SERVER,
SettingsCategory::Security);
init("max-nodes",
&max_nodes,
"512",
parse_positive<ssize_t>(),
"Number of preallocated nodes in the cluster. Used for sizing "
"data structures of the failure detector.",
SERVER | REQUIRES_RESTART | DEPRECATED,
SettingsCategory::Core);
init("sbr-node-threshold",
&space_based_retention_node_threshold,
"0",
validate_range<double>(0, 1),
"threshold fraction of full nodes which triggers space-based retention, "
"if enabled (sequencer-only option), 0 means disabled",
SERVER,
SettingsCategory::LogsDB);
init("gray-list-threshold",
&gray_list_nodes_threshold,
"0.25",
validate_range<double>(0, 1),
"if the number of storage nodes graylisted on the write path of a log "
"exceeds this fraction of the log's nodeset size the gray list will be "
"cleared to make sure that copysets can still be picked",
SERVER,
SettingsCategory::WritePath);
init("store-timeout",
&store_timeout,
"10ms..1min",
validate_positive<ssize_t>(),
"timeout for attempts to store a record copy on a specific "
"storage node. This value is used by sequencers only and is NOT the "
"client request timeout.",
SERVER,
SettingsCategory::WritePath);
init("connect-throttle",
&connect_throttle,
"1ms..10s",
validate_nonnegative<ssize_t>(),
"timeout after it which two nodes retry to connect when they loose a "
"a connection. Used in ConnectThrottle to ensure we don't retry too "
"often. Needs restart to load the new values.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Network);
init("disable-chain-sending",
&disable_chain_sending,
"false",
nullptr, // no validation
"never send a wave of STORE messages through a chain",
SERVER,
SettingsCategory::WritePath);
init("sbr-low-watermark-check-interval",
&sbr_low_watermark_check_interval,
"60s",
validate_positive<ssize_t>(),
"Time after which space based trim check can be done on a nodeset",
SERVER,
SettingsCategory::LogsDB);
init("nospace-retry-interval",
&nospace_retry_interval,
"60s",
validate_positive<ssize_t>(),
"Time interval during which a sequencer will not route record copies "
"to a storage node that reported an out of disk space condition.",
SERVER,
SettingsCategory::WritePath);
init("node-health-check-retry-interval",
&node_health_check_retry_interval,
"5s",
validate_positive<ssize_t>(),
"Time interval during which a node health check probe will not be sent "
"if there is an outstanding request for the same node in the nodeset",
SERVER,
SettingsCategory::WritePath);
init("slow-node-retry-interval",
&slow_node_retry_interval,
"600s",
validate_positive<ssize_t>(),
"After a sequencer's request to store a record copy on a storage node "
"times out that sequencer will graylist that node for at least this "
"time interval. "
"The sequencer will not pick graylisted nodes for copysets unless "
"--gray-list-threshold is reached or no valid copyset can be selected "
"from nodeset nodes not yet graylisted. "
"For outlier-based graylisting increases exponentially for each "
"new graylisting up until 10x of this value and decreases "
"at linear rate down to this value when not graylisted",
SERVER,
SettingsCategory::WritePath);
init("check-node-health-request-timeout",
&check_node_health_request_timeout,
"120s",
validate_positive<ssize_t>(),
"Timeout for health check probes that sequencers send to unresponsive "
"storage nodes. If no reply arrives after timeout, another probe is "
"sent.",
SERVER,
SettingsCategory::WritePath);
init("unroutable-retry-interval",
&unroutable_retry_interval,
"60s",
validate_positive<ssize_t>(),
"Time interval during which a sequencer will not pick for copysets a "
"storage node whose IP address was reported unroutable by the socket "
"layer",
SERVER,
SettingsCategory::WritePath);
init("overloaded-retry-interval",
&overloaded_retry_interval,
"1s",
validate_positive<ssize_t>(),
"Time interval during which a sequencer will not route record copies "
"to a storage node that reported itself overloaded (storage task queue "
"too long).",
SERVER,
SettingsCategory::WritePath);
init("disabled-retry-interval",
&disabled_retry_interval,
"30s",
validate_nonnegative<ssize_t>(),
"Time interval during which a sequencer will not route record copies "
"to a storage node that reported a permanent error.",
SERVER,
SettingsCategory::WritePath);
init("nodeset-state-refresh-interval",
&nodeset_state_refresh_interval,
"1s",
validate_positive<ssize_t>(),
"Time interval that rate-limits how often a sequencer can refresh "
"the states of nodes in the nodeset in use",
SERVER,
SettingsCategory::WritePath);
init("connect-timeout",
&connect_timeout,
"100ms",
validate_nonnegative<ssize_t>(),
"connection timeout when establishing a TCP connection to a node",
SERVER | CLIENT,
SettingsCategory::Network);
init("connection-retries",
&connection_retries,
"4",
validate_nonnegative<ssize_t>(),
"the number of TCP connection retries before giving up",
SERVER | CLIENT,
SettingsCategory::Network);
init("connect-timeout-retry-multiplier",
&connect_timeout_retry_multiplier,
"3",
validate_positive<double>(),
"Multiplier that is applied to the connect timeout after every failed "
"connection attempt",
SERVER | CLIENT,
SettingsCategory::Network);
init("handshake-timeout",
&handshake_timeout,
"1s",
validate_nonnegative<ssize_t>(),
"LogDevice protocol handshake timeout",
SERVER | CLIENT,
SettingsCategory::Network);
init("inline-message-execution",
&inline_message_execution,
"false",
nullptr,
"Indicates whether message should be processed right after "
"deserialization. Usually within new worker model all messages are "
"processed after posting them into the work context. This option works "
"only when worker context is run with previous eventloop architecture.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Network);
init("per-worker-storage-task-queue-size",
&per_worker_storage_task_queue_size,
"1",
parse_positive<ssize_t>(),
"max number of StorageTask instances to buffer in each Worker for "
"each local log store shard",
SERVER | REQUIRES_RESTART /* queue is sized in Worker's ctor*/,
SettingsCategory::ResourceManagement);
init("disable-graylisting",
&disable_graylisting,
"false",
nullptr, // no validation
"setting this to true disables graylisting nodes by sequencers "
"in the write path",
SERVER,
SettingsCategory::WritePath);
init("disable-outlier-based-graylisting",
&disable_outlier_based_graylisting,
"true",
nullptr, // no validation
"setting this to true disables the outlier based graylisting nodes by "
"sequencers in the write path",
SERVER | EXPERIMENTAL,
SettingsCategory::WritePath);
init("graylisting-grace-period",
&graylisting_grace_period,
"300s",
nullptr, // no validation
"The duration through which a node need to be consistently an outlier to"
" get graylisted",
SERVER,
SettingsCategory::WritePath);
init("rsm-force-all-send-all",
&rsm_force_all_send_all,
"true",
nullptr, // no validation
"Forces ALL_SEND_ALL mode for read streams associated with RSM.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Core);
init("graylisting-monitored-period",
&graylisting_monitored_period,
"120s",
nullptr, // no validation
"The duration through which a recently ungraylisted node will be "
"monitored and graylisted as soon as it becomes an outlier",
SERVER,
SettingsCategory::WritePath);
init("graylisting-refresh-interval",
&graylisting_refresh_interval,
"30s",
nullptr, // no validation
"The interval at which the graylists are refreshed",
SERVER,
SettingsCategory::WritePath);
init("graylisting-min-latency",
&graylisting_min_latency,
"0ms",
nullptr,
"Don't graylist nodes that have p95 store latency less than this.",
SERVER,
SettingsCategory::WritePath);
init("enable-read-throttling",
&enable_read_throttling,
"false",
nullptr, // no validation
"Throttle Disk I/O due to log read streams",
SERVER,
SettingsCategory::ReadPath);
init("enable-adaptive-store-timeout",
&enable_adaptive_store_timeout,
"false",
nullptr, // no validation
"decides whether to enable an adaptive store timeout",
SERVER | EXPERIMENTAL,
SettingsCategory::WritePath);
init("write-batch-size",
&write_batch_size,
"1024",
parse_positive<ssize_t>(),
"max number of records for a storage thread to write in one batch",
SERVER,
SettingsCategory::Storage);
init("write-batch-bytes",
&write_batch_bytes,
"1048576", // 1MB
parse_positive<ssize_t>(),
"min number of payload bytes for a storage thread to write in one batch "
"unless write-batch-size is reached first",
SERVER,
SettingsCategory::Storage);
init("storage-tasks-use-drr",
&storage_tasks_use_drr,
"false",
nullptr,
"Use DRR for scheduling read IO's.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Storage);
init("storage-tasks-drr-quanta",
&storage_tasks_drr_quanta,
"1",
parse_positive<uint64_t>(),
"Default quanta per-principal. 1 implies request based scheduling. "
"Use something like 1MB for byte based scheduling.",
SERVER,
SettingsCategory::Storage);
#define STORAGE_TASK_PRINCIPAL(name, key, shareVal) \
init("storage-task-" #key "-share", \
&storage_task_shares[(uint64_t)StorageTaskPrincipal::name].share, \
shareVal, \
parse_positive<uint64_t>(), \
"The share for principal " #key " in the DRR scheduler.", \
SERVER, \
SettingsCategory::Storage);
#include "logdevice/common/storage_task_principals.inc"
#undef STORAGE_TASK_PRINCIPAL
init("max-server-read-streams",
&max_server_read_streams,
"150000",
parse_nonnegative<ssize_t>(),
"max number of read streams clients can establish to the server, "
"per worker",
SERVER,
SettingsCategory::ResourceManagement);
init("queue-drop-overload-time",
&queue_drop_overload_time,
"1s",
validate_positive<ssize_t>(),
"max time after worker's storage task queue is dropped "
"before it stops being considered overloaded",
SERVER,
SettingsCategory::ResourceManagement);
init("queue-size-overload-percentage",
&queue_size_overload_percentage,
"50",
validate_range<ssize_t>(0, 100),
"percentage of per-worker-storage-task-queue-size that can be buffered "
"before the queue is considered overloaded",
SERVER,
SettingsCategory::ResourceManagement);
init("concurrent-log-recoveries",
&concurrent_log_recoveries,
"400",
parse_positive<ssize_t>(),
"limit on the number of logs that can be in recovery at the same time",
SERVER,
SettingsCategory::Recovery);
init("appender-buffer-queue-cap",
&appender_buffer_queue_cap,
"10000",
parse_nonnegative<ssize_t>(),
"capacity of per-log queue of pending writes while sequencer "
" is initializing or activating",
SERVER | REQUIRES_RESTART /* queue sized in
Worker/AppenderBuffer ctors */
,
SettingsCategory::WritePath);
init("appender-buffer-process-batch",
&appender_buffer_process_batch,
"20",
parse_positive<ssize_t>(),
"batch size for processing per-log queue of pending writes",
SERVER,
SettingsCategory::WritePath);
init("test-appender-skip-stores",
&test_appender_skip_stores,
"false",
nullptr,
"Allow appenders to skip sending data to storage node. Currently used"
" in tests to make sure an appender state machine is running",
SERVER,
SettingsCategory::Testing);
init("time-delay-before-force-abort",
&time_delay_before_force_abort,
"400",
nullptr,
"Time delay before force abort of remaining work is attempted during "
"shutdown. The value is in 50ms time periods. The quiescence condition "
"is checked once every 50ms time period. When the timer expires for "
"the first time, all pending requests are aborted and the timer is "
"restarted. On second expiration all remaining TCP connections are "
"reset (RST packets sent).",
SERVER,
SettingsCategory::Core);
init("client-read-buffer-size",
&client_read_buffer_size,
"512",
parse_positive<ssize_t>(),
"number of records to buffer per read stream in the client object while "
"reading. If this setting is changed on-the-fly, the change will only "
"apply to new reader instances",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("client-read-flow-control-threshold",
&client_read_flow_control_threshold,
"0.7",
validate_range<double>(std::numeric_limits<double>::min(), 1),
"threshold (relative to buffer size) at which the client broadcasts "
"window update messages (less means more often)",
CLIENT | SERVER /* for event log reads */,
SettingsCategory::ReadPath);
init("client-epoch-metadata-cache-size",
&client_epoch_metadata_cache_size,
"50000",
parse_nonnegative<ssize_t>(),
"maximum number of entries in the client-side epoch metadata cache. "
"Set it to 0 to disable the epoch metadata cache.",
CLIENT | REQUIRES_RESTART,
SettingsCategory::ReadPath);
init("client-readers-flow-tracer-period",
&client_readers_flow_tracer_period,
"0s",
validate_nonnegative<ssize_t>(),
"Period for logging in logdevice_readers_flow scuba table and for "
"triggering certain sampling actions for monitoring. Set it to 0 to "
"disable feature.",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-tracer-unhealthy-publish-weight",
&client_readers_flow_tracer_unhealthy_publish_weight,
"5.0",
validate_positive<ssize_t>(),
"Weight given to traces of unhealthy readers when publishing samples "
"(for improved debuggability).",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-max-acceptable-time-lag-per-tag",
&client_readers_flow_max_acceptable_time_lag_per_tag,
"",
parse_time_threshold_per_monitoring_tag,
"Map that establishes the maximum acceptable time lag for each "
"monitoring tag. A reader that passes the maximum acceptable time lag "
"will be considered unhealthy for the purpose of increasing weight when "
"pushing samples. See "
"'client-readers-flow-tracer-unhealthy-publish-weight'.",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-tracer-GSS-skip-remote-preemption-checks",
&client_readers_flow_tracer_GSS_skip_remote_preemption_checks,
"true",
nullptr,
"If set, skips remote preemption checks (aka CHECK SEALs) on GSSs "
"issued by ClientReadersFlowTracer.",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-tracer-lagging-metric-num-sample-groups",
&client_readers_flow_tracer_lagging_metric_num_sample_groups,
"3",
validate_nonnegative<ssize_t>(),
"Maximum number of samples that are kept by ClientReadersFlowTracer for "
"computing relative reading speed in relation to writing speed. See "
"client_readers_flow_tracer_lagging_slope_threshold.",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-tracer-lagging-metric-sample-group-size",
&client_readers_flow_tracer_lagging_metric_sample_group_size,
"20",
validate_nonnegative<ssize_t>(),
"Number of samples in ClientReadersFlowTracer that are aggregated and "
"recorded as one entry. See "
"client-readers-flow-tracer-lagging-metric-sample-group-size.",
CLIENT,
SettingsCategory::Monitoring);
init(
"client-readers-flow-tracer-lagging-slope-threshold",
&client_readers_flow_tracer_lagging_slope_threshold,
"-0.3",
validate_range<double>(-100, 100),
"If a reader's lag increase at at least this rate, the reader is "
"considered lagging (rate given as variation of time lag per time unit). "
"If the desired read ratio needs to be x\% of the write ratio, set this "
"threshold to be (1 - x / 100).",
CLIENT,
SettingsCategory::Monitoring);
init("client-readers-flow-tracer-high-pri-max-lag",
&client_readers_flow_tracer_high_pri_max_lag,
"max",
validate_nonnegative<ssize_t>(),
"Max allowed amount of lag for high priority readers.",
CLIENT,
SettingsCategory::Monitoring);
init("client-test-force-stats",
&client_test_force_stats,
"false",
nullptr, // no validation
"force instantiation of StatsHolder within ClientImpl even if stats "
"publishing is disabled",
CLIENT | REQUIRES_RESTART,
SettingsCategory::Testing);
init(
"client-is-log-empty-grace-period",
&client_is_log_empty_grace_period,
"5s",
validate_nonnegative<ssize_t>(),
"After receiving responses to an isLogEmpty() request from an f-majority "
"of nodes, wait up to this long for more nodes to chime in if there is "
"not yet consensus.",
CLIENT | DEPRECATED,
SettingsCategory::ReadPath);
init("release-retry-interval",
&release_retry_interval,
"20s",
validate_positive<ssize_t>(),
"RELEASE message retry period",
SERVER,
SettingsCategory::WritePath);
init("release-broadcast-interval",
&release_broadcast_interval,
"300s",
validate_positive<ssize_t>(),
"the time interval for periodic broadcasts of RELEASE "
"messages by sequencers of regular logs. Such broadcasts are not "
"essential for correct cluster operation. They are used as the last "
"line of defence to make sure storage nodes deliver all records "
"eventually even if a regular (point-to-point) RELEASE message "
"is lost due to a TCP connection failure. See also "
"--release-broadcast-interval-internal-logs.",
SERVER,
SettingsCategory::WritePath);
init("release-broadcast-interval-internal-logs",
&release_broadcast_interval_internal_logs,
"5s",
validate_positive<ssize_t>(),
"Same as --release-broadcast-interval but instead applies to internal "
"logs, currently the event logs and logsconfig logs",
SERVER,
SettingsCategory::WritePath);
init("recovery-grace-period",
&recovery_grace_period,
"100ms",
validate_nonnegative<ssize_t>(),
"Grace period time used by epoch recovery after it acquires an "
"authoritative incomplete digest but wants to wait more time for "
"an authoritative complete digest. Millisecond granularity. Can be 0. ",
SERVER,
SettingsCategory::Recovery);
init("event-log-grace-period",
&event_log_grace_period,
"10s",
validate_nonnegative<ssize_t>(),
"grace period before considering event log caught up",
SERVER,
SettingsCategory::Rebuilding);
init("recovery-timeout",
&recovery_timeout,
"120s",
validate_positive<ssize_t>(),
"epoch recovery timeout. Millisecond granularity.",
SERVER,
SettingsCategory::Recovery);
init("gap-grace-period",
&gap_grace_period,
"100ms",
validate_nonnegative<ssize_t>(),
"gap detection grace period for all logs, including data logs, "
"metadata logs, and internal state machine logs. Millisecond "
"granularity. "
"Can be 0.",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("data-log-gap-grace-period",
&data_log_gap_grace_period,
"0ms",
validate_nonnegative<ssize_t>(),
"When non-zero, replaces gap-grace-period for data logs.",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("metadata-log-gap-grace-period",
&metadata_log_gap_grace_period,
"0ms",
validate_nonnegative<ssize_t>(),
"When non-zero, replaces gap-grace-period for metadata logs.",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("reader-stalled-grace-period",
&reader_stalled_grace_period,
"30s",
validate_nonnegative<ssize_t>(),
"Amount of time we wait before declaring a reader stalled because we "
"can't read the metadata or data log. "
"When this grace period expires, the client "
"stat \"read_streams_stalled\" is bumped and record to scuba ",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("reader-stuck-threshold",
&reader_stuck_threshold,
"121s", // 2 min + 1 sec
validate_nonnegative<ssize_t>(),
"Amount of time we wait before we report a read stream that is "
"considered stuck.",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init(
"log-state-recovery-interval",
&log_state_recovery_interval,
"500ms",
validate_nonnegative<ssize_t>(),
"interval between consecutive attempts by a storage node to "
"obtain the attributes of a log residing on that storage node "
"Such 'log state recovery' is performed independently for each log upon "
"the first request to start delivery of records of that log. "
"The attributes to be recovered include the LSN of the last cumulatively "
"released record in the log, which may have to be requested from the "
"log's sequencer over the network.",
SERVER | REQUIRES_RESTART /* init'ed with this in Procesor's ctor */,
SettingsCategory::ReadPath);
init("seq-state-reply-timeout",
&get_seq_state_reply_timeout,
"2s",
validate_positive<ssize_t>(),
"how long to wait for a reply to a 'get sequencer state' request before "
"retrying (usually to a different node)",
SERVER | CLIENT,
SettingsCategory::Sequencer);
init(
"seq-state-backoff-time",
&seq_state_backoff_time,
"1s..10s",
validate_positive<ssize_t>(),
"how long to wait before resending a 'get sequencer state' request after "
"a timeout.",
SERVER | CLIENT,
SettingsCategory::Sequencer);
init("check-seal-req-min-timeout",
&check_seal_req_min_timeout,
"500ms",
validate_positive<ssize_t>(),
"before a sequencer returns its state in response to a 'get "
"sequencer state' request the sequencer checks that it is the most "
"recent (highest numbered) sequencer for the log. It performs the check "
"by sending a 'check seal' request to a valid copyset of nodes in the "
"nodeset of the sequencer's epoch. The 'check seal' request looks "
"for a seal record placed by a higher-numbered sequencer. This setting "
"sets the timeout for 'check seal' requests. The timeout is set to the "
"smaller of this value and half the value of --seq-state-reply-timeout.",
SERVER,
SettingsCategory::Sequencer);
init("update-metadata-map-interval",
&update_metadata_map_interval,
"1h",
nullptr,
"Sequencer has a timer for periodically reading metadata logs and "
"refreshing the in memory metadata_map_. This setting specifies the "
"interval for this timer",
SERVER,
SettingsCategory::Sequencer);
init("delete_log_metadata_request_timeout",
&delete_log_metadata_request_timeout,
"30000ms",
validate_positive<ssize_t>(),
"A timeout to wait for DELETE_LOG_METADATA_REPLY messages after a"
"DELETE_LOG_METADATA message.",
CLIENT | INTERNAL_ONLY | EXPERIMENTAL);
init(
"cluster-state-refresh-interval",
&cluster_state_refresh_interval,
"10s",
validate_positive<ssize_t>(),
"how frequently to search for the sequencer in case of an append timeout",
CLIENT,
SettingsCategory::FailureDetector);
init("enable-is-log-empty-v2",
&enable_is_log_empty_v2,
"true",
nullptr,
"When enabled, the V2 implementation will be used to process all "
"isLogEmpty requests.",
CLIENT | DEPRECATED,
SettingsCategory::Core);
init(
"enable-initial-get-cluster-state",
&enable_initial_get_cluster_state,
"true",
nullptr,
"Enable executing a GetClusterState request to retrieve the state of the "
"cluster as soon as the client is created",
CLIENT,
SettingsCategory::FailureDetector);
init("test-get-cluster-state-recipients",
&test_get_cluster_state_recipients_,
"",
parse_recipients_list,
"Force get-cluster-state recipients as a comma-separated list of node "
"ids",
CLIENT,
SettingsCategory::Testing);
init(
"checksum-bits",
&checksum_bits,
"32",
checksum_bits_notifier,
"how big a checksum to include with newly appended records (0, 32 or 64)",
SERVER | CLIENT,
SettingsCategory::WritePath);
init(
"mutation-timeout",
&mutation_timeout,
"500ms",
validate_positive<ssize_t>(),
"initial timeout used during the mutation phase of log recovery to store "
"enough copies of a record or a hole plug",
SERVER,
SettingsCategory::Recovery);
init("write-sticky-copysets",
&write_sticky_copysets_deprecated,
"true",
nullptr, // no validation
"DEPRECATED. Instead, use --enable-sticky-copysets to enable copyset "
"stickiness and --write-copyset-index to write the copyset index. If "
"set to `false`, has the same effect as --enable-sticky-copysets=false "
"--write-copyset-index=false. Otherwise has no effect.",
SERVER | DEPRECATED | REQUIRES_RESTART /* Used in CopySetManager ctor */,
SettingsCategory::WritePath);
init("enable-sticky-copysets",
&enable_sticky_copysets,
"true",
nullptr, // no validation
"If set, sequencers will enable sticky copysets. Doesn't affect the "
"copyset index.",
SERVER | REQUIRES_RESTART /* Used in CopySetManager ctor */,
SettingsCategory::WritePath);
init("sticky-copysets-block-size",
&sticky_copysets_block_size,
"33554432", // 32MB
parse_positive<ssize_t>(),
"The total size of processed appends (in bytes), after which the sticky "
"copyset manager will start a new block.",
SERVER | REQUIRES_RESTART /* Used in CopySetManager ctor */,
SettingsCategory::WritePath);
init(
"sticky-copysets-block-max-time",
&sticky_copysets_block_max_time,
"10min",
validate_positive<ssize_t>(),
"The time since starting the last block, after which the copyset manager "
"will consider it expired and start a new one.",
SERVER | REQUIRES_RESTART /* Used in CopySetManager ctor */,
SettingsCategory::WritePath);
init("write-copyset-index",
&write_copyset_index_DEPRECATED,
"true",
nullptr, // no validation
"If set, storage nodes will write the copyset index for all records. "
"Note that this won't be used until --rocksdb-use-copyset-index is "
"enabled.",
SERVER | DEPRECATED,
SettingsCategory::WritePath);
init("iterator-cache-ttl",
&iterator_cache_ttl,
"20s",
validate_positive<ssize_t>(),
"expiration time of idle RocksDB iterators in the iterator cache.",
SERVER,
SettingsCategory::RocksDB);
init("max-protocol",
&max_protocol,
std::to_string(Compatibility::MAX_PROTOCOL_SUPPORTED).c_str(),
validate_range<ssize_t>(Compatibility::MIN_PROTOCOL_SUPPORTED,
Compatibility::MAX_PROTOCOL_SUPPORTED),
"maximum version of LogDevice protocol that the server/client will "
"accept",
SERVER | CLIENT,
SettingsCategory::Network);
init("max-total-appenders-size-soft",
&max_total_appenders_size_soft,
"524288000", // 500MB
parse_positive<ssize_t>(),
"Total size in bytes of running Appenders across all workers after "
"which "
"we start taking measures to reduce the Appender residency time.",
SERVER,
SettingsCategory::ResourceManagement);
init("max-total-appenders-size-hard",
&max_total_appenders_size_hard,
"629145600", // 600MB
parse_positive<ssize_t>(),
"Total size in bytes of running Appenders across all workers after "
"which "
"we start rejecting new appends.",
SERVER,
SettingsCategory::ResourceManagement);
init("max-total-buffered-append-size",
&max_total_buffered_append_size,
"1073741824", // 1GB
parse_positive<ssize_t>(),
"Total size in bytes of payloads buffered in BufferedWriters in "
"sequencers for server-side batching and compression. Appends will "
"be rejected when this threshold is significantly exceeded.",
SERVER,
SettingsCategory::ResourceManagement);
init("no-redirect-duration",
&no_redirect_duration,
"5s",
validate_positive<ssize_t>(),
"when a sequencer activates upon request from a client, it does not "
"redirect its clients to a different sequencer node for this amount of "
"time (even if for instance the primary sequencer just started up and "
"an older sequencer may be up and running)",
SERVER,
SettingsCategory::WritePath);
init(
"reactivation-limit",
&reactivation_limit,
"5/1s",
[](const std::string& val) -> rate_limit_t {
rate_limit_t res;
int rv = parse_rate_limit(val.c_str(), &res);
if (rv != 0) {
throw boost::program_options::error(
"Invalid value for --reactivation-limit. Expected format is "
"<count>/<duration><unit>, e.g. 1/1s");
}
return res;
},
"Maximum allowed rate of sequencer reactivations. When exceeded, further "
"appends will fail.",
SERVER | REQUIRES_RESTART /* Passed to Sequencer ctor */,
SettingsCategory::Sequencer);
init("epoch-draining-timeout",
&epoch_draining_timeout,
"2s",
validate_positive<ssize_t>(),
"Maximum time allowed for sequencer to drain one epoch. Sequencer "
"will abort draining the epoch if it takes longer than the timeout. "
"A sequencer 'drains' its epoch (waits for all appenders to complete) "
"while reactivating to serve a higher epoch.",
SERVER,
SettingsCategory::Sequencer);
init("read-historical-metadata-timeout",
&read_historical_metadata_timeout,
"10s",
validate_positive<ssize_t>(),
"maximum time interval for a sequencer to get historical epoch metadata "
"through reading the metadata log before retrying.",
SERVER,
SettingsCategory::Sequencer);
init("check-metadata-log-empty-timeout",
&check_metadata_log_empty_timeout,
"300s",
validate_positive<ssize_t>(),
"Timeout for request that verifies that a metadata log does not already "
"exist for a log that is presumed new and whose metadata provisioning "
"has been initiated by a sequencer activation",
SERVER,
SettingsCategory::Configuration);
init("max-payload-size",
&max_payload_size,
"1048576", // 1MB
parse_validate_range<ssize_t>(16, MAX_PAYLOAD_SIZE_PUBLIC),
("The maximum payload size that will be accepted by the client library "
"or the server. Can't be larger than " +
folly::to<std::string>(MAX_PAYLOAD_SIZE_PUBLIC) + " bytes.")
.c_str(),
SERVER | CLIENT /* Exposed via Client::getMaxPayloadSize() */,
SettingsCategory::ResourceManagement);
init("write-find-time-index",
&write_find_time_index,
"false",
nullptr, // no validation
"Set this to true if you want findTime index to be written. "
"A findTime index speeds up findTime() requests by maintaining an index "
"from timestamps to LSNs in LogsDB data partitions.",
SERVER,
SettingsCategory::Performance);
init("on-demand-logs-config",
&on_demand_logs_config,
"false",
nullptr, // no validation
"Set this to true if you want the client to get log configuration on "
"demand from the server when log configuration is not included in the "
"main config file.",
CLIENT | REQUIRES_RESTART /* used in ClientImpl::create() */,
SettingsCategory::Configuration);
init("on-demand-logs-config-retry-delay",
&on_demand_logs_config_retry_delay,
"5ms..1s",
validate_nonnegative<ssize_t>(),
"When a client's attempt to get log configuration information from "
"server "
"on demand fails, the client waits this much before retrying.",
CLIENT,
SettingsCategory::Configuration);
init("remote-logs-config-cache-ttl",
&remote_logs_config_cache_ttl,
"60s",
validate_nonnegative<ssize_t>(),
"The TTL for cache entries for the remote logs config. If the logs "
"config is not available locally and is fetched from the server, this "
"will determine how fresh the log configuration used by the client will "
"be.",
CLIENT | REQUIRES_RESTART /* used in ClientImpl::create() */,
SettingsCategory::Configuration);
init("alternative-layout-property",
&alternative_layout_property,
"",
nullptr, // no validation
"Set this to the name of an alternate layout property if you want the "
"client to use this property of log configuration instead of standard "
"layout. This is deprecated and designed to support specific Facebook "
"use cases. Do not use.",
CLIENT | REQUIRES_RESTART /* used in ClientImpl::create() */
| DEPRECATED);
init(
"findtime-force-approximate",
&findtime_force_approximate,
"false",
nullptr, // no validation
"(server-only setting) Override the client-supplied FindKeyAccuracy with "
"FindKeyAccuracy::APPROXIMATE. This makes the resource requirements of "
"FindKey requests small and predictable, at the expense of accuracy",
SERVER,
SettingsCategory::Performance);
init("read-storage-tasks-max-mem-bytes",
&read_storage_tasks_max_mem_bytes,
"16106127360", // 15GB
parse_positive<size_t>(),
"Maximum amount of memory that can be allocated by read storage tasks.",
SERVER,
SettingsCategory::ResourceManagement);
init("append-stores-max-mem-bytes",
&append_stores_max_mem_bytes,
"2G",
parse_positive<size_t>(),
"Maximum total size of in-flight StoreStorageTasks from appenders and "
"recoveries. Evenly divided among shards.",
SERVER,
SettingsCategory::ResourceManagement);
init("rebuilding-stores-max-mem-bytes",
&rebuilding_stores_max_mem_bytes,
"2G",
parse_positive<size_t>(),
"Maximum total size of in-flight StoreStorageTasks from rebuilding. "
"Evenly divided among shards.",
SERVER,
SettingsCategory::ResourceManagement);
init("initial-config-load-timeout",
&initial_config_load_timeout,
"15s",
validate_positive<ssize_t>(),
"maximum time to wait for initial server configuration until giving up",
SERVER | REQUIRES_RESTART | CLI_ONLY,
SettingsCategory::Configuration);
init(
"zk-create-root-znodes",
&zk_create_root_znodes,
"true",
nullptr, // no validation
"If \"false\", the root znodes for a tier should be pre-created "
"externally before logdevice can do any ZooKeeper epoch store operations",
SERVER,
SettingsCategory::EpochStore);
init("epoch-store-double-write-new-serialization-format",
&epoch_store_double_write_new_serialization_format,
"false",
nullptr, // no validation
"If set, epoch stores will double write any data it modifies to its "
"corresponding znode and the data serialized with the new serialization "
"format to the parent znode",
SERVER,
SettingsCategory::EpochStore);
init("ssl-load-client-cert",
&ssl_load_client_cert,
"false",
nullptr, // no validation
"Set to include client certificate for mutual ssl authentication",
CLIENT | REQUIRES_RESTART,
SettingsCategory::Security);
init("ssl-cert-path",
&ssl_cert_path,
"",
nullptr, // no validation
"Path to LogDevice SSL certificate.",
SERVER | CLIENT | REQUIRES_RESTART /* used in Worker ctor */,
SettingsCategory::Security);
init("ssl-ca-path",
&ssl_ca_path,
"",
nullptr, // no validation
"Path to CA certificate.",
SERVER | CLIENT | REQUIRES_RESTART /* used in Worker ctor */,
SettingsCategory::Security);
init("ssl-key-path",
&ssl_key_path,
"",
nullptr, // no validation
"Path to LogDevice SSL key.",
SERVER | CLIENT | REQUIRES_RESTART /* used in Worker ctor */,
SettingsCategory::Security);
init("ssl-cert-refresh-interval",
&ssl_cert_refresh_interval,
"300s",
validate_positive<ssize_t>(),
"TTL for an SSL certificate that we have loaded from disk.",
SERVER | CLIENT | REQUIRES_RESTART /* used in Worker ctor */,
SettingsCategory::Security);
init("ssl-use-session-resumption",
&ssl_use_session_resumption,
"false",
nullptr,
"If enabled, new SSL connections will attempt to resume previously "
"cached sessions.",
SERVER | CLIENT,
SettingsCategory::Security);
init("ssl-boundary",
&ssl_boundary,
"none",
nullptr, // no validation
"Enable SSL in cross-X traffic, where X is the setting. Example: if set "
"to \"rack\", all cross-rack traffic will be sent over SSL. Can be one "
"of "
"\"none\", \"node\", \"rack\", \"row\", \"cluster\", \"data_center\" or "
"\"region\". If a value other than \"none\" or \"node\" is specified on "
"the client, --my-location has to be specified as well.",
SERVER | CLIENT,
SettingsCategory::Security);
init(
"my-location",
&client_location,
"",
[](const std::string& val) -> folly::Optional<NodeLocation> {
folly::Optional<NodeLocation> res;
if (val.empty()) {
return res;
}
res.assign(NodeLocation());
if (res->fromDomainString(val) != 0) {
throw boost::program_options::error(
"Invalid value for --my-location. Expecting valid location "
"string: \"{region}.{dc}.{cluster}.{row}.{rack}\"");
}
return res;
},
"{client-only setting}. Specifies the location of the machine running "
"the "
"client. Used for determining whether to use SSL based on "
"--ssl-boundary. Also used in local SCD reading. "
"Format: \"{region}.{dc}.{cluster}.{row}.{rack}\".",
CLIENT | REQUIRES_RESTART /* saved in Sender::initMyLocation() */,
SettingsCategory::Core);
init(
"slow-ioprio",
&slow_ioprio,
"",
[](const std::string& val) -> folly::Optional<std::pair<int, int>> {
folly::Optional<std::pair<int, int>> res;
if (parse_ioprio(val, &res) != 0) {
throw boost::program_options::error(
"value of --low-ioprio must be of the form "
"<class>,<data> e.g. 2,6; " +
val + " given.");
}
return res;
},
"IO priority to request for 'slow' storage threads. "
"Storage threads in the 'slow' thread pool handle high-latency RocksDB "
"IO requests, primarily data reads. "
"Not all kernel IO schedulers supports IO priorities."
"See man ioprio_set for possible values."
"\"any\" or \"\" to keep the default.",
SERVER | REQUIRES_RESTART /* used once when ExecStorageThread starts */,
SettingsCategory::ResourceManagement);
init("checksumming-enabled",
&checksumming_enabled,
"false",
nullptr, // no validation
"A switch to turn on/off checksumming for all LogDevice protocol "
"messages."
" If false: no checksumming is done, "
"If true: checksumming-blacklisted-messages is consulted.",
SERVER | CLIENT | EXPERIMENTAL,
SettingsCategory::Network);
init(
"checksumming-blacklisted-messages",
&checksumming_blacklisted_messages,
// see message_types.inc for message mnemonics
// We can potentially leave out the following messages for performance:
// APPEND, APPENDED, STORE, STORED, LOGS_CONFIG_API, LOGS_CONFIG_API_REPLY
"",
[](const std::string& val) -> std::set<char> {
std::set<char> res;
for (char c : val) {
res.insert(c);
}
return res;
},
"Used to control what messages shouldn't be checksummed at "
"the protocol layer",
SERVER | CLIENT | REQUIRES_RESTART | EXPERIMENTAL,
SettingsCategory::Network);
init("scd-timeout",
&scd_timeout,
"300s",
validate_nonnegative<ssize_t>(),
"Timeout after which ClientReadStream considers a storage node down if "
"it does not send any data for some time but the socket to it remains "
"open.",
SERVER /* for event log */ | CLIENT,
SettingsCategory::ReaderFailover);
init("scd-all-send-all-timeout",
&scd_all_send_all_timeout,
"600s",
validate_nonnegative<ssize_t>(),
"Timeout after which ClientReadStream fails over to asking all storage "
"nodes to send everything they have if it is not able to make progress "
"for some time",
SERVER /* for event log */ | CLIENT,
SettingsCategory::ReaderFailover);
init(
"verify-checksum-before-replicating",
&verify_checksum_before_replicating,
"true",
nullptr, // no validation
"If set, sequencers and rebuilding will verify checksums of records that "
"have checksums. If there is a mismatch, sequencer will reject the "
"append. Note that this setting doesn't make storage nodes verify "
"checksums. Note that if not set, and "
"--rocksdb-verify-checksum-during-store is set, a corrupted record kills "
"write-availability for that log, as the appender keeps retrying and "
"storage nodes reject the record.",
SERVER,
SettingsCategory::WritePath);
init("server-based-nodes-configuration-store-timeout",
&server_based_nodes_configuration_store_timeout,
"60s",
validate_nonnegative<ssize_t>(),
"The timeout of the Server Based Nodes Configuration Store's "
"NODES_CONFIGURATION polling round.",
CLIENT | SERVER,
SettingsCategory::Configuration);
init(
"server-based-nodes-configuration-polling-wave-timeout",
&server_based_nodes_configuration_polling_wave_timeout,
"500ms..10s",
validate_positive<ssize_t>(),
"timeout settings for server based Nodes Configuration Store's multi-wave"
"backoff retry behavior",
CLIENT | SERVER,
SettingsCategory::Configuration);
init(
"server-based-nodes-configuration-store-polling-responses",
&server_based_nodes_configuration_store_polling_responses,
"2",
parse_positive<ssize_t>(),
"how many successful responses for server based Nodes Configuration Store"
"polling to wait for each round",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("server_based_nodes_configuration_store_polling_extra_requests",
&server_based_nodes_configuration_store_polling_extra_requests,
"1",
parse_nonnegative<ssize_t>(),
"how many extra requests to send for server based Nodes Configuration "
"Store polling in addition to the required response for each wave",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("nodes-configuration-seed-servers",
&nodes_configuration_seed_servers,
"",
nullptr, // no validation
"The seed string that will be used to fetch the initial nodes "
"configuration. It can be in the form string:<server1>,<server2>,etc. "
"Or you can provide an smc tier via 'smc:<smc_tier>'. If it's empty, "
"NCM client bootstrapping is not used.",
CLIENT,
SettingsCategory::Configuration);
init("nodes-configuration-init-retry-timeout",
&nodes_configuration_init_retry_timeout,
"500ms..5s",
validate_positive<ssize_t>(),
"timeout settings for the exponential backoff retry behavior for "
"initializing Nodes Configuration for the first time",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("nodes-configuration-init-timeout",
&nodes_configuration_init_timeout,
"60s",
validate_positive<ssize_t>(),
"defines the maximum time allowed on the initial nodes configuration "
"fetch.",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("use-tcp-keep-alive",
&use_tcp_keep_alive,
"true",
nullptr, // no validation
"Enable TCP keepalive for all connections",
SERVER | CLIENT,
SettingsCategory::Network);
init("tcp-keep-alive-time",
&tcp_keep_alive_time,
"-1",
nullptr, // no validation
"TCP keepalive time. This is the time, in seconds, before the first "
"probe will be sent. If negative the OS default will be used.",
SERVER | CLIENT,
SettingsCategory::Network);
init("tcp-keep-alive-intvl",
&tcp_keep_alive_intvl,
"-1",
nullptr, // no validation
"TCP keepalive interval. The interval between successive probes."
"If negative the OS default will be used.",
SERVER | CLIENT,
SettingsCategory::Network);
init("tcp-keep-alive-probes",
&tcp_keep_alive_probes,
"-1",
nullptr, // no validation
"TCP keepalive probes. How many unacknowledged probes before the "
"connection is considered broken. "
"If negative the OS default will be used.",
SERVER | CLIENT,
SettingsCategory::Network);
init("tcp-user-timeout",
&tcp_user_timeout,
"300000", // 5 min
nullptr, // no validation
"The time in milliseconds that transmitted data may remain "
"unacknowledged "
"before TCP will close the connection. "
"0 for system default. "
"-1 to disable. "
"default is 5min = 300000",
SERVER | CLIENT,
SettingsCategory::Network);
init(
"include-cluster-name-on-handshake",
&include_cluster_name_on_handshake,
"true",
nullptr, // no validation
"The cluster name of the connection initiator will be included in the "
"LogDevice protocol handshake. If the cluster name of the initiator does "
"not match the actual cluster name of the destination, the connection is "
"terminated. We don't know of any good reasons to disable this option. "
"If you disable it and move some hosts from one cluster to another, you "
"may have a bad time: some clients or servers may not pick up the update "
"and keep talking to the hosts as if they weren't moved; this may "
"corrupt "
"metadata. Used for testing and internally created connections only.",
SERVER | CLIENT,
SettingsCategory::Testing);
init("isolated-sequencer-ttl",
&isolated_sequencer_ttl,
"1200s",
nullptr, // no validation
"How long we wait before disabling isolated sequencers. "
"A sequencer is declared isolated if nodes outside of the innermost "
"failure domain of the sequencer's epoch appear unreachable "
"to the failure detector. For example, a sequencer of a rack-replicated "
"log epoch is declared isolated if the failure detector can't reach "
"any nodes outside of that sequencer node's rack. A disabled sequencer "
"rejects all append requests.",
SERVER,
SettingsCategory::WritePath);
init("stats-collection-interval",
&stats_collection_interval,
"60s",
nullptr, // no validation
"How often to collect and submit stats upstream. "
"Set to <=0 to disable collection of stats.",
SERVER | CLIENT | REQUIRES_RESTART /* passed to ctor of
StatsCollectionThread */
,
SettingsCategory::Monitoring);
init(
"esn-bits",
&esn_bits,
"32",
validate_range<ssize_t>(2, 32),
"How many bits to use for sequence numbers within an epoch. LSN bits [n,"
" 32) are guaranteed to be 0. Used for testing ESN exhaustion.",
SERVER | REQUIRES_RESTART /* passed to Sequencer ctor in AllSequencers */,
SettingsCategory::Testing);
init("client-initial-redelivery-delay",
&client_initial_redelivery_delay,
"1s",
validate_positive<ssize_t>(),
"Initial delay to use when reader application rejects a record or gap",
SERVER /* event log */ | CLIENT,
SettingsCategory::ReadPath);
init("client-max-redelivery-delay",
&client_max_redelivery_delay,
"30s",
validate_positive<ssize_t>(),
"Maximum delay to use when reader application rejects a record or gap",
SERVER /* event log */ | CLIENT,
SettingsCategory::ReadPath);
init("include-destination-on-handshake",
&include_destination_on_handshake,
"true",
nullptr, // no validation
"Include the destination node ID in the LogDevice protocol handshake. "
"If the actual node ID of the connection target does not match the "
"intended destination ID, the connection is terminated.",
SERVER | CLIENT,
SettingsCategory::Network);
init("client-connect-with-fizz",
&client_connect_with_fizz,
"false",
nullptr,
"Use Fizz (TLS 1.3) when establishing secure connections to servers.",
CLIENT | REQUIRES_RESTART | DEPRECATED,
SettingsCategory::Network);
init("server-connect-with-fizz",
&server_connect_with_fizz,
"false",
nullptr,
"Use Fizz (TLS 1.3) when establishing secure connections to other"
" servers.",
SERVER | REQUIRES_RESTART | DEPRECATED,
SettingsCategory::Network);
init("sequencer-batching",
&sequencer_batching,
"false",
nullptr, // no validation
"Accumulate appends from clients and batch them together to create "
"fewer records in the system. This setting is only used when the log "
"group doesn't override it",
SERVER,
SettingsCategory::Batching);
init("socket-batching-time-trigger",
&socket_batching_time_trigger,
"0s",
nullptr, // no validation
"Socket batching allows us to batch data before flushing it to the "
"socket to save CPU. It increases the amount of "
"memory consumed. And introduces additional latency when sending "
"messages.",
SERVER | CLIENT,
SettingsCategory::Batching);
init("sequencer-batching-time-trigger",
&sequencer_batching_time_trigger,
"1s",
nullptr, // no validation
"Sequencer batching (if used) flushes buffered appends for a log when "
"the oldest buffered append is this old. When enabled, this gets "
"applied to the first new batch. This setting is only used when the log "
"group doesn't override it",
SERVER,
SettingsCategory::Batching);
init("sequencer-batching-size-trigger",
&sequencer_batching_size_trigger,
"-1",
parse_validate_lower_bound<ssize_t>(-1),
"Sequencer batching (if used) flushes buffered appends for a log when "
"the total amount of buffered uncompressed data reaches this many bytes "
"(if positive). When enabled, this gets applied to the first new batch. "
"This setting is only used when the log group doesn't override it",
SERVER,
SettingsCategory::Batching);
init("sequencer-batching-compression",
&sequencer_batching_compression,
"zstd",
parse_compression,
"Compression setting for sequencer batching (if used). It can be 'none' "
"for no compression; 'zstd' for ZSTD; 'lz4' for LZ4; or lz4_hc for LZ4 "
"High Compression. The default is ZSTD. When enabled, this gets applied "
"to the first new batch. This setting is only used when the log group "
"doesn't override it",
SERVER,
SettingsCategory::Batching);
init(
"sequencer-batching-passthru-threshold",
&sequencer_batching_passthru_threshold,
"-1",
parse_validate_lower_bound<ssize_t>(-1),
"Sequencer batching (if used) will pass through any appends with payload "
"size over this threshold (if positive). This saves us a compression "
"round trip when a large batch comes in from BufferedWriter and the "
"benefit of batching and recompressing would be small.",
SERVER,
SettingsCategory::Batching);
init("num-processor-background-threads",
&num_processor_background_threads,
"0",
nullptr, // no validation
"Number of threads in Processor's background thread pool. Background "
"threads are used by, e.g., BufferedWriter to construct/compress "
"large batches. If 0 (default), use num-workers.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Execution);
init("buffered-writer-bg-thread-bytes-threshold",
&buffered_writer_bg_thread_bytes_threshold,
"4096",
parse_nonnegative<ssize_t>(),
"BufferedWriter can send batches to a background thread. For small "
"batches, where the overhead dominates, this will just slow things "
"down. If the total size of the batch is less than this, it will "
"constructed / compressed on the Worker thread, blocking other appends "
"to all logs in that shard. If larger, it will be enqueued to a helper "
"thread.",
SERVER | CLIENT,
SettingsCategory::Batching);
init("buffered-writer-zstd-level",
&buffered_writer_zstd_level,
"1",
parse_validate_range<int>(1, ZSTD_maxCLevel()),
"Zstd compression level to use in BufferedWriter.",
SERVER | CLIENT,
SettingsCategory::Batching);
init("background-queue-size",
&background_queue_size,
"100000",
parse_positive<ssize_t>(),
"Maximum number of events we can queue to background thread. A single "
"queue is shared by all threads in a process.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Execution);
init(
"skip-recovery",
&skip_recovery,
"false",
nullptr, // no validation
"Skip recovery. For tests only. When this option is enabled, recovery "
"does not recover any data but instead immediately marks all epochs as "
"clean in the epoch store and purging immediately marks all epochs as "
"clean in the local log store. This feature should be used as a last "
"resort if a cluster's availability is hurt by recovery and it is "
"important to quickly restore availability at the cost of some "
"inconsistencies. On-the-fly changes of this setting will only apply to "
"new LogRecoveryRequests and will not affect recoveries that are already "
"in progress.",
SERVER,
SettingsCategory::Testing);
init("single-empty-erm",
&single_empty_erm,
"true",
nullptr, // no validation
"A single E:EMPTY response for an epoch is sufficient for "
"GetEpochRecoveryMetadataRequest to consider the epoch as "
"empty if this option is set.",
SERVER | EXPERIMENTAL,
SettingsCategory::Recovery);
init(
"disable-check-seals",
&disable_check_seals,
"false",
nullptr, // no validation
"if true, 'get sequencer state' requests will not be sending 'check "
"seal' "
"requests that they normally do in order to confirm that this sequencer "
"is the most recent one for the log. This saves network and CPU, but may "
"cause getSequencerState() calls to return stale results. Intended for "
"use in production emergencies only.",
SERVER,
SettingsCategory::Performance);
init("recovery-seq-metadata-timeout",
&recovery_seq_metadata_timeout,
"2s..60s",
validate_positive<ssize_t>(),
"Retry backoff timeout used for checking if the latest metadata log "
"record is fully replicated during log recovery.",
SERVER,
SettingsCategory::Recovery);
init("bridge-record-in-empty-epoch",
&bridge_record_in_empty_epoch,
"true",
nullptr, // no validation
"epoch recovery will insert bridge records for empty epoch for data "
"logs. This helps with read availability and efficiency during epoch "
"transitions.",
SERVER | DEPRECATED,
SettingsCategory::Recovery);
init(
"byte-offset-interval",
&byte_offset_interval_DEPRECATED,
"1",
parse_positive<ssize_t>(),
"DEPRECATED! How often the sequencer sends byte offsets to storage nodes."
"Measured in bytes.",
SERVER | REQUIRES_RESTART | DEPRECATED /* passed to Sequencer ctor */);
init("byte-offsets",
&byte_offsets,
"false",
nullptr, // no validation
"Enables the server-side byte offset calculation feature. "
"NOTE: There is no guarantee of byte offsets result correctness if "
"feature"
"was switched on->off->on in period shorter than retention value for"
"logs.",
SERVER,
SettingsCategory::WritePath);
init("enable-config-synchronization",
&enable_config_synchronization,
"false",
nullptr, // no validation
"With config synchronization enabled, nodes on both ends of a connection"
"will synchronize their configs if there is a mismatch in the config"
"version.",
SERVER | CLIENT | DEPRECATED,
SettingsCategory::Configuration);
init("get-erm-for-empty-epoch",
&get_erm_for_empty_epoch,
"true",
nullptr,
"If true, Purging will get the EpochRecoveryMetadata "
"even if the epoch is empty locally",
SERVER | EXPERIMENTAL,
SettingsCategory::Recovery);
init(
"enable-logsconfig-manager",
&enable_logsconfig_manager,
"true",
nullptr,
"If true, logdeviced will load the logs configuration from the internal "
"replicated storage and will ignore the logs section in the config file. "
"This also enables the remote management API for logs config.",
SERVER | CLIENT,
SettingsCategory::Configuration);
init(
"logsconfig-manager-grace-period",
&logsconfig_manager_grace_period,
"0ms",
validate_nonnegative<ssize_t>(),
"Grace period before making a change to the logs config available to the "
"server.",
SERVER | CLIENT,
SettingsCategory::Configuration);
init("logsconfig-snapshotting",
&logsconfig_snapshotting,
"true",
nullptr,
"Allow logsconfig to be snapsthotted onto a snapshot log.",
SERVER | DEPRECATED,
SettingsCategory::Configuration);
init("disable-logsconfig-trimming",
&disable_logsconfig_trimming,
"false",
nullptr,
"Disable the trimming of logsconfig delta log. Used for testing only.",
SERVER,
SettingsCategory::Testing);
init("logsconfig-max-delta-records",
&logsconfig_max_delta_records,
"5000",
nullptr,
"How many delta records to keep in the logsconfig deltas log before we "
"snapshot it.",
SERVER,
SettingsCategory::Configuration);
init(
"logsconfig-max-delta-bytes",
&logsconfig_max_delta_bytes,
"10485760", // 10MB
nullptr,
"How many bytes of deltas to keep in the logsconfig deltas log before we "
"snapshot it.",
SERVER,
SettingsCategory::Configuration);
init("client-config-fetch-allowed",
&client_config_fetch_allowed,
"true",
nullptr, // no validation
"If true, servers will be allowed to fetch configs from the client side "
"of a connection during config synchronization.",
SERVER | DEPRECATED,
SettingsCategory::Configuration);
init("unreleased-record-detector-interval",
&unreleased_record_detector_interval,
"30s",
validate_nonnegative<ssize_t>(),
"Time interval at which to check for unreleased records in storage "
"nodes. Any log which has unreleased records, and for which no records "
"have been released for two consecutive "
"unreleased-record-detector-intervals, is suspected of having a dead "
"sequencer. Set to 0 to disable check.",
SERVER,
SettingsCategory::ReadPath);
init(
"grace-counter-limit",
&grace_counter_limit,
"2", // 3 strikes and you're out
validate_lower_bound<int>(-1),
"Maximum number of consecutive grace periods a storage node may fail to "
"send a record or gap (if in all read all mode) before it is considered "
"disgraced and client read streams no longer wait for it. If all nodes "
"are disgraced or in GAP state, a gap record is issued. May be 0. Set to "
"-1 to disable grace counters and use simpler logic: no disgraced nodes, "
"issue gap record as soon as grace period expires.",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("test-reject-hello",
&reject_hello,
"OK",
validate_reject_hello,
"if set to the name of an error code, reject all HELLOs "
"with the specified error code. Currently supported values are ACCESS "
"and PROTONOSUPPORT. Used for testing.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("force-on-demand-logs-config",
&force_on_demand_logs_config,
"false",
nullptr,
"Set this to true if you want the client to get log configuration on "
"demand from the server even when log configuration is present in the "
"main config file.",
SERVER | CLIENT | REQUIRES_RESTART | DEPRECATED,
SettingsCategory::Configuration);
init("use-dedicated-server-to-server-address",
&use_dedicated_server_to_server_address,
"false",
nullptr,
"Temporary switch to roll out dedicated server-to-server address to "
"running clusters with minor disruption. This setting will be removed "
"soon in a future release as soon as the rollout is completed.",
SERVER,
SettingsCategory::Network);
init("test-bypass-recovery",
&bypass_recovery,
"false",
nullptr,
"If set, sequencers will not automatically run recovery upon "
"activation. Recovery can be started using the 'startrecovery' admin "
"command. Note that last released lsn won't get advanced without "
"recovery.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("hold-store-replies",
&hold_store_replies,
"false",
nullptr,
"If set, we hold all STORED messages (which are replies to STORE "
"messages), until the last one comes is. Has some race conditions and "
"other down sides, so only use in tests. Used to ensure that all "
"storage nodes have had a chance to process the STORE messages, even if "
"one returns PREEMPTED or another error condition.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("sync-metadata-log-writes",
&sync_metadata_log_writes,
"true",
nullptr,
"If set, storage nodes will wait for wal sync of metadata log "
"writes before sending the STORED ack.",
SERVER);
init("publish-single-histogram-stats",
&publish_single_histogram_stats,
"false",
nullptr, // no validation
"If true, single histogram values will be published alongside the rate "
"values.",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("event-log-snapshotting",
&event_log_snapshotting,
"true",
nullptr,
"Allow the event log to be snapshotted onto a snapshot log. This "
"requires the event log group to contain two logs, the first one being "
"the snapshot log and the second one being the delta log.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Rebuilding);
init("event-log-snapshot-compression",
&event_log_snapshot_compression,
"true",
nullptr,
"Use ZSTD compression to compress event log snapshots",
SERVER | CLIENT,
SettingsCategory::Rebuilding);
init("server-default-dscp",
&server_dscp_default,
"0",
parse_validate_range<uint8_t>(0, 63),
"Use default DSCP to setup to server sockets at Sender."
"Range was defined by https://tools.ietf.org/html/rfc4594#section-1.4.4",
SERVER | REQUIRES_RESTART,
SettingsCategory::Configuration);
init("client-default-dscp",
&client_dscp_default,
"0",
parse_validate_range<uint8_t>(0, 63),
"Use default DSCP to setup to client sockets at Sender."
"Range was defined by https://tools.ietf.org/html/rfc4594#section-1.4.4",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Configuration);
init("client-default-network-priority",
&client_default_network_priority,
"",
parse_network_priority,
"Sets the default client network priority. Clients will connect to the "
"server port associated with this priority, unless "
"'enable-port-based-qos' is false. Value must be one of 'low','medium', "
"or 'high.",
CLIENT | SERVER,
SettingsCategory::Network);
init("enable-port-based-qos",
&enable_port_based_qos,
"false",
nullptr,
"Feature gate setting for allowing port-based QoS / connections per "
"network priority. If disabled, all addresses will resolve to the "
"default_data_address listed in nodes configuration. Note that this "
"feature does not apply to connections between servers, only client to "
"server.",
CLIENT | SERVER,
SettingsCategory::Network);
init("disable-event-log-trimming",
&disable_event_log_trimming,
"false",
nullptr,
"Disable trimming of the event log (for tests only)",
SERVER,
SettingsCategory::Testing);
init("event-log-max-delta-records",
&event_log_max_delta_records,
"5000",
nullptr,
"How many delta records to keep in the event log before we "
"snapshot it.",
SERVER,
SettingsCategory::Rebuilding);
init("event-log-max-delta-bytes",
&event_log_max_delta_bytes,
"10485760", // 10MB
parse_nonnegative<ssize_t>(),
"How many bytes of deltas to keep in the event log before "
"we snapshot it.",
SERVER,
SettingsCategory::Rebuilding);
init("event-log-retention",
&event_log_retention,
"14d",
nullptr,
"How long to keep a history of snapshots and deltas for "
"the event log. "
"Unused if the event log has never been snapshotted or "
"if event log "
"trimming is disabled with disable-event-log-trimming.",
SERVER,
SettingsCategory::Rebuilding);
init("append-store-durability",
&append_store_durability,
"async_write",
nullptr, // no validation
"The minimum guaranteed durability of record copies before a storage "
"node "
"confirms the STORE as successful. Can be one of \"memory\" if record "
"is to be stored in a RocksDB memtable only (logdeviced memory), "
"\"async_write\" if record is to be additionally written to the RocksDB "
"WAL file (kernel memory, frequently synced to disk), or \"sync_write\" "
"if the record is to be written to the memtable and WAL, and the STORE "
"acknowledged only after the WAL is synced to disk by a separate WAL "
"syncing thread using fdatasync(3).",
SERVER,
SettingsCategory::WritePath);
init(
"rebuild-store-durability",
&rebuild_store_durability,
"async_write",
nullptr, // no validation
"The minimum guaranteed durability of rebuilding writes before a storage "
"node will confirm the STORE as successful. Can be one of \"memory\", "
"\"async_write\", or \"sync_write\". See --append-store-durability for "
"a description of these options.",
SERVER,
SettingsCategory::Rebuilding);
init("rebuilding-dont-wait-for-flush-callbacks",
&rebuilding_dont_wait_for_flush_callbacks,
"false",
nullptr, // no validation
"Regardless of the value of 'rebuild-store-durability', assume "
"any successfully completed store is durable without waiting for "
"flush notifications. NOTE: Use of this setting will lead to silent "
"under-replication when 'rebuild-store-durability' is set to 'MEMORY'. "
"Use for testing and I/O characterization only.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Rebuilding);
init("rebuild-without-amends",
&rebuild_without_amends,
"false",
nullptr, // no validation
"During rebuilding, send a normal STORE rather than a STORE with "
"the "
"AMEND flag, when updating the copyset of nodes that already have a "
"copy "
"of the record. This option is used by integration tests to fully "
"divorce "
"append content from records touched by rebuilding.",
SERVER,
SettingsCategory::Testing);
init("scd-copyset-reordering-max",
&scd_copyset_reordering_max,
"hash-shuffle",
parse_scd_copyset_reordering,
"SCDCopysetReordering values that clients may ask servers to "
"use. "
"Currently available options: "
"none, hash-shuffle (default), hash-shuffle-client-seed. "
"hash-shuffle results in only one storage node reading a record "
"block "
"from disk, and then serving it to multiple readers from the "
"cache. "
"hash-shuffle-client-seed enables multiple storage nodes to "
"participate "
"in reading the log, which can be benefit non-disk-bound "
"workloads.",
SERVER | CLIENT,
SettingsCategory::ReadPath);
init("rsm-scd-copyset-reordering",
&rsm_scd_copyset_reordering,
"hash-shuffle",
parse_scd_copyset_reordering,
"SCDCopysetReordering values that clients ask servers to "
"use. "
"Currently available options: "
"none, hash-shuffle (default), hash-shuffle-client-seed. "
"hash-shuffle results in only one storage node reading a record "
"block "
"from disk, and then serving it to multiple readers from the "
"cache. "
"hash-shuffle-client-seed enables multiple storage nodes to "
"participate "
"in reading the log, which can be benefit non-disk-bound "
"workloads.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::ReadPath);
init("sequencer-metadata-log-write-retry-delay",
&sequencer_metadata_log_write_retry_delay,
"500ms..30s-2x",
nullptr, // no validation
"The retry delay for sequencer writing into its own "
"metadata log "
"during log reconfiguration.",
SERVER,
SettingsCategory::Configuration);
init("sequencer-epoch-store-write-retry-delay",
&sequencer_epoch_store_write_retry_delay,
"5s..1min-2x",
nullptr, // no validation
"The retry delay for sequencer writing log metadata "
"into the epoch store "
"during log reconfiguration.",
SERVER,
SettingsCategory::Configuration);
init("sequencer-historical-metadata-retry-delay",
&sequencer_historical_metadata_retry_delay,
"5s..1min-2x",
nullptr, // no validation
"The retry delay for sequencer reading metadata log "
"for historical "
"epoch metadata during log reconfiguration.",
SERVER,
SettingsCategory::Configuration);
init("weighted-copyset-selector",
&weighted_copyset_selector,
"true",
nullptr, // no validation
"If true, the shiny new copyset selector will "
"be used for everything. "
"If false, legacy copyset selector will be used "
"when possible. "
"There should be no reason to disable it, "
"unless it's broken in some way.",
SERVER | DEPRECATED,
SettingsCategory::WritePath);
init("copyset-locality-min-scope",
©set_locality_min_scope,
"rack",
nullptr, // no validation
"Tl;dr: if you experience data distribution "
"imbalance caused by hot "
"logs, and you have plenty of unused "
"cross-rack/cross-region bandwidth, "
"try changing this setting to \"root\"; "
"otherwise the default \"rack\" "
"is just fine. More details: let X be the "
"value of this setting, and "
"let Y be the biggest scope in log's "
"replicateAcross property; if Y < X, "
"nothing happens; if Y >= X, at least one "
"copy of each record will be "
"stored in sequencer's domain of scope Y "
"(not X), when it's possible "
"without affecting average data "
"distribution. This, combined with "
"chain-sending, typically reduces the "
"number of cross-Y hops by one per "
"record.",
SERVER,
SettingsCategory::WritePath);
init("test-do-not-pick-in-copysets",
&test_do_not_pick_in_copysets,
"",
parse_recipients_list,
"Copyset selectors won't pick these "
"nodes. Comma-separated list of node "
"indexes, e.g. '1,2,3'. Used in tests.",
SERVER,
SettingsCategory::Testing);
init("traffic-shadow-enabled",
&traffic_shadow_enabled,
"false", // opt-in: defaults to false
nullptr, // no custom validation necessary
"Controls the traffic shadowing feature. Defaults to false to disable "
"shadowing on all clients writing to a cluster. Must be set to true to "
"allow traffic shadowing, which will then be controlled on a per-log "
"basic through parameters in LogsConfig.",
CLIENT,
SettingsCategory::Monitoring);
init("shadow-client-creation-retry-interval",
&shadow_client_creation_retry_interval,
"60s",
validate_nonnegative<ssize_t>(),
"Failed shadow appends because shadow client was not available, "
"enqueue a client recreation request. The retry mechanism retries "
"the enqueued attempt after these many seconds. See ShadowClient.cpp "
"for a detailed explanation. 0 disables the retry feature. 1 silently "
"drops all client creations so that they only get created from the "
"retry path.",
CLIENT,
SettingsCategory::WritePath);
init("shadow-client-timeout",
&shadow_client_timeout,
"30s",
validate_positive<ssize_t>(),
"Timeout to use for shadow clients. See traffic-shadow-enabled.",
CLIENT,
SettingsCategory::Monitoring);
init("enable-nodes-configuration-manager",
&enable_nodes_configuration_manager,
"true",
nullptr, // no custom validation necessary
"If set, NodesConfigurationManager and its workflow will be enabled.",
CLIENT | SERVER | REQUIRES_RESTART,
SettingsCategory::Configuration);
init("use-nodes-configuration-manager-nodes-configuration",
&use_nodes_configuration_manager_nodes_configuration,
"true",
nullptr, // no custom validation necessary
"If true and enable_nodes_configuration_manager is set, logdevice will "
"use the nodes configuration from the NodesConfigurationManager.",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("nodes-configuration-manager-store-polling-interval",
&nodes_configuration_manager_store_polling_interval,
"3s",
validate_positive<ssize_t>(),
"Polling interval of NodesConfigurationManager to "
"NodesConfigurationStore to read NodesConfiguration",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("nodes-configuration-manager-intermediary-shard-state-timeout",
&nodes_configuration_manager_intermediary_shard_state_timeout,
"180s", // 3 minutes
validate_positive<ssize_t>(),
"Timeout for proposing the transition for a shard from an intermediary "
"state to its 'destination' state",
CLIENT | SERVER, // available on the clients for tooling
SettingsCategory::Configuration);
init("admin-client-capabilities",
&admin_client_capabilities,
"false", // defaults to false
nullptr, // no custom validation necessary
"If set, the client will have the capabilities for administrative "
"operations such as changing NodesConfiguration. Usually used by "
"emergency tooling. Beware that admin clients use a different "
"NodesConfigurationStore that may not support a large fan-out, so "
"this settings shouldn't be applied to large number of clients "
"(e.g., through client_settings in settings config).",
CLIENT,
SettingsCategory::Configuration);
init("nodes-configuration-file-store-dir",
&nodes_configuration_file_store_dir,
"", // defaults to empty
nullptr,
"If set, the source of truth of nodes configuration will be under this "
"dir instead of the default (zookeeper) store. Only effective when "
"--enable-nodes-configuration-manager=true; Used by "
"integration testing.",
CLIENT | SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("shadow-client",
&shadow_client,
"false",
nullptr,
"Indicates if the Client object being created is a shadow client, "
"i.e. "
"a client used specifically to perform traffic shadowing. This "
"setting "
"allows the client constructor to disable initialization for members "
"that may not be necessary for shadow clients.",
CLIENT | INTERNAL_ONLY);
init("internal-logs-only-client",
&internal_logs_only_client,
"false",
nullptr, // no validation
"if true, LCM won't be loaded and only internal logs will be present in "
"the logs config. Only effective on clients. Used by internal tools",
CLIENT | INTERNAL_ONLY);
init("real-time-reads-enabled",
&real_time_reads_enabled,
"false", // default
nullptr, // no custom validation necessary
"Turns on the experimental real time reads feature.",
SERVER | EXPERIMENTAL,
SettingsCategory::ReadPath);
init("reject-stores-based-on-copyset",
&reject_stores_based_on_copyset,
"true",
nullptr,
"If true, logdevice will prevent writes to nodes that are being drained "
"(rebuilt in RELOCATE mode). Not recommended to set to false unless "
"you're having a production issue.",
SERVER,
SettingsCategory::Rebuilding);
init("read-stream-guaranteed-delivery-efficiency",
&read_stream_guaranteed_delivery_efficiency,
"false",
nullptr, // no validation
"In this mode, readers will prioritize making progress by "
"issuing DATALOSS gaps instead of stalling when too many nodes are "
"unavailable or taking actions that increase network bandwidth. "
"Using this mode will make readers less robust against silent "
"underreplication and copyset inconsistencies.",
CLIENT | DEPRECATED,
SettingsCategory::ReaderFailover);
init("read-streams-use-metadata-log-only",
&read_streams_use_metadata_log_only,
"true",
nullptr,
"If true, the NodeSetFinder within ClientReadStream will use "
"only the metadata log rather than the sequencer as source for fetching "
"historical metadata of the log. This option is used only for migration "
"and will be removed at some point.",
CLIENT | SERVER | DEPRECATED);
init("max-sequencer-background-activations-in-flight",
&max_sequencer_background_activations_in_flight,
"20",
nullptr, // no validation
"Max number of concurrent background sequencer activations to run. "
"Background sequencer activations perform log metadata changes "
"(reprovisioning) when the configuration attributes of a log change.",
SERVER,
SettingsCategory::Configuration);
init(
"sequencer-reactivation-delay-secs",
&sequencer_reactivation_delay_secs,
"60s..3600s",
validate_nonnegative<ssize_t>(),
"Some sequencer reactivations may be postponed when the changes that "
"triggered the reactivation are not important enough to be propagated "
"immediately. E.g., changes to replication factor or window size, need "
"to be made immediately visible on the other hand changes changes to the "
"nodeset due to say the 'exclude_from_nodeset' flag being set as part "
"of a passive drain can be postponed. If the reactivations can be "
"postponed then the delay is chosen to be a random delay seconds "
"between the above range. If 0 then don't postpone ",
SERVER,
SettingsCategory::Configuration);
init("sequencer-background-activation-retry-interval",
&sequencer_background_activation_retry_interval,
"500ms",
nullptr, // no validation
"Retry interval on failures while processing background sequencer "
"activations for reprovisioning.",
SERVER,
SettingsCategory::Configuration);
init(
"use-sequencer-affinity",
&use_sequencer_affinity,
"false",
nullptr, // no validation
"If true, the routing of append requests to sequencers will first try to "
"find a sequencer in the location given by sequencerAffinity() before "
"looking elsewhere.",
SERVER | CLIENT,
SettingsCategory::WritePath);
init("real-time-max-bytes",
&real_time_max_bytes,
"100000000",
nullptr, // no validation
"Max size (in bytes) of released records that we'll keep around to use "
"for real time reads. Includes some cache overhead, so for "
"small records, you'll store less record data than this.",
SERVER | REQUIRES_RESTART | EXPERIMENTAL,
SettingsCategory::ReadPath);
init("real-time-eviction-threshold-bytes",
&real_time_eviction_threshold_bytes,
"80000000",
nullptr, // no validation
"When the real time buffer reaches this size, we evict entries.",
SERVER | REQUIRES_RESTART | EXPERIMENTAL,
SettingsCategory::ReadPath);
init("test-timestamp-linear-transform",
&test_timestamp_linear_transform,
"1,0",
parse_test_timestamp_linear_tranform,
"Coefficients for transforming the timestamp of records for test. "
"The value should contain two integers separated by ','. For example "
"'m,c'. Records timestamp is transformed as m * now() + c."
"A default value of '1,0' makes the timestamp = now() which is expected "
"for all the normal use cases.",
SERVER | REQUIRES_RESTART,
SettingsCategory::Testing);
init("reader-reconnect-delay",
&reader_reconnect_delay,
"10ms..30s",
validate_positive<ssize_t>(),
"When a reader client loses a connection to a storage node, delay after "
"which it tries reconnecting.",
CLIENT,
SettingsCategory::ReadPath);
init("reader-started-timeout",
&reader_started_timeout,
"30s..5min",
validate_positive<ssize_t>(),
"How long a reader client waits for a STARTED reply from a storage node "
"before sending a new START message.",
CLIENT,
SettingsCategory::ReadPath);
init("reader-retry-window-delay",
&reader_retry_window_delay,
"10ms..30s",
validate_positive<ssize_t>(),
"When a reader client fails to send a WINDOW message, delay after which "
"it retries sending it.",
CLIENT,
SettingsCategory::ReadPath);
#define DEF_SETTING(requests) \
init("dont-serve-" #requests "-for-logs", \
&dont_serve_##requests##_logs, \
"", \
parse_log_set, \
"Logs for which " #requests " will not be served", \
SERVER, \
SettingsCategory::Testing); \
init("dont-serve-" #requests "-status", \
&dont_serve_##requests##_status, \
"FAILED", \
nullptr, \
"status that should be returned for logs that are in " \
"\"dont-serve-" #requests "-for-logs\"", \
SERVER, \
SettingsCategory::Testing)
DEF_SETTING(reads);
DEF_SETTING(findtimes);
DEF_SETTING(stores);
#undef DEF_SETTING
init("write-shard-id-in-copyset",
&write_shard_id_in_copyset,
"false",
nullptr,
"Serialize copysets using ShardIDs instead of node_index_t on disk. "
"TODO(T15517759): enable by default once Flexible Log Sharding is fully "
"implemented and this has been thoroughly tested.",
SERVER | EXPERIMENTAL,
SettingsCategory::WritePath);
init("epoch-metadata-use-new-storage-set-format",
&epoch_metadata_use_new_storage_set_format,
"false",
nullptr,
"Serialize copysets using ShardIDs instead of node_index_t inside "
"EpochMetaData. TODO(T15517759): enable by default once Flexible Log "
"Sharding is fully implemented and this has been thoroughly tested.",
SERVER | CLIENT | EXPERIMENTAL,
SettingsCategory::WritePath);
init("test-sequencer-corrupt-stores",
&test_sequencer_corrupt_stores,
"false",
nullptr,
"Simulates bad hardware flipping a bit in the payload of a STORE "
"message.",
SERVER,
SettingsCategory::Testing);
init("message-tracing-types",
&message_tracing_types,
"",
parse_message_types,
"Emit a log line for each sent/received message of the type(s) "
"specified. Separate different types with a comma. 'all' to trace all "
"messages. Prefix the value with '~' to trace all types except the "
"given ones, e.g. '~WINDOW,RELEASE' will trace messages of all types "
"except WINDOW and RELEASE.",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("message-tracing-peers",
&message_tracing_peers,
"",
parse_sockaddrs,
"Emit a log line for each sent/received message to/from the specified "
"address(es). Separate different addresses with a comma, prefix unix "
"socket paths with 'unix://'. An empty unix path will match all unix "
"paths",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init(
"message-tracing-log-level",
&message_tracing_log_level,
"info",
parse_log_level,
"For messages that pass the message tracing filters, emit a log line at "
"this level. One of: critical, error, warning, notify, info, debug, spew",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init(
"reader-slow-shards-detection",
&reader_slow_shards_detection,
"disabled",
[](const std::string& val) {
if (val == "disabled") {
return ReaderSlowShardDetectionState::DISABLED;
} else if (val == "observe-only") {
return ReaderSlowShardDetectionState::OBSERVE_ONLY;
} else if (val == "enabled") {
return ReaderSlowShardDetectionState::ENABLED;
} else {
char buf[1024];
snprintf(buf,
sizeof(buf),
"Invalid value for --reader-slow-shard-detection: %s. "
"Must be one of \"disabled\", \"observe-only\", \"enabled\"",
val.c_str());
throw boost::program_options::error(buf);
}
},
"If true, readers in SCD mode will detect shards that are very slow and "
"may ask the other storage shards to filter them out",
CLIENT,
SettingsCategory::ReaderFailover);
init("reader-slow-shards-detection-moving-avg-duration",
&reader_slow_shards_detection_settings.moving_avg_duration,
"30min",
nullptr,
"When slow shards detection is enabled, duration to use for the moving "
"average",
CLIENT,
SettingsCategory::ReaderFailover);
init("reader-slow-shards-detection-required-margin",
&reader_slow_shards_detection_settings.required_margin,
"10.0",
nullptr,
"When slow shards detection is enabled, sensitivity of the outlier "
"detection algorithm. For instance, if set to 3.0, only consider an "
"outlier a shard that is 300% slower than the others. The required "
"margin is adaptive and may increase or decrease but will be capped "
"at a minimum defined by this setting.",
CLIENT,
SettingsCategory::ReaderFailover);
init("reader-slow-shards-detection-required-margin-decrease-rate",
&reader_slow_shards_detection_settings.required_margin_decrease_rate,
"0.25",
nullptr,
"Rate at which we decrease the required margin when we are "
"healthy. If the value is 0.25 for instance, we will reduce the "
"required margin by 0.25 for every second spent reading.",
CLIENT,
SettingsCategory::ReaderFailover);
init("reader-slow-shards-detection-outlier-duration",
&reader_slow_shards_detection_settings.outlier_duration,
"1min..30min",
validate_positive<ssize_t>(),
"When slow shards detection is enabled, amount of time that "
"we'll "
"consider a shard an outlier if it is slow.",
CLIENT,
SettingsCategory::ReaderFailover);
init("reader-slow-shards-detection-outlier-duration-decrease-"
"rate",
&reader_slow_shards_detection_settings.outlier_duration_decrease_rate,
"0.25",
nullptr,
"When slow shards detection is enabled, rate at which "
"we decrease the "
"time after which we'll try to reinstate an outlier in "
"the read "
"set. If the value is 0.25, for each second of healthy "
"reading we will "
"decrease that time by 0.25s.",
CLIENT,
SettingsCategory::ReaderFailover);
init("rsm-include-read-pointer-in-snapshot",
&rsm_include_read_pointer_in_snapshot,
"true",
nullptr,
"Deprecated! Allow inclusion of read pointer in RSM snapshots. "
"Set to true by default always",
SERVER | CLIENT | DEPRECATED,
SettingsCategory::Core);
init("rsm-snapshot-request-timeout",
&rsm_snapshot_request_timeout,
"30s",
validate_nonnegative<ssize_t>(),
"Overall timeout for GetRsmSnapshotRequest",
CLIENT | SERVER,
SettingsCategory::Configuration);
init("rsm-snapshot-request-wave-timeout",
&rsm_snapshot_request_wave_timeout,
"2s..5s",
validate_positive<ssize_t>(),
"timeout settings for Fetching RSM snapshot via MessageBased Store",
CLIENT | SERVER,
SettingsCategory::Configuration);
init(
"rsm-snapshot-store-type",
&rsm_snapshot_store_type,
"log",
validate_rsm_snapshot_store,
"One of the following: "
"legacy (use legacy way of storing and retrieving snapshots from a log), "
"log (use Log Based snapshot store and point queries to fetch snapshots "
"instead of tailing), "
"message (Message Based for bootstrapping RSM snapshot from a Remote "
"cluster host)"
"local-store (From snapshot stored in local store)",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Core);
init("rsm-snapshot-enable-dual-writes",
&rsm_snapshot_enable_dual_writes,
"true",
nullptr, // no validation
"Decides whether snapshots should be written to log based store as "
"well(to roll back from local store to log based in case of emergency",
SERVER,
SettingsCategory::Core);
init("eventlog-snapshotting-period",
&eventlog_snapshotting_period,
"1h",
validate_positive<ssize_t>(),
"Controls time based snapshotting. New eventlog snapshot will be "
"created after this period if there are new deltas",
SERVER,
SettingsCategory::Rebuilding);
init("logsconfig-snapshotting-period",
&logsconfig_snapshotting_period,
"1h",
validate_positive<ssize_t>(),
"Controls time based snapshotting. New logsconfig snapshot will be "
"created after this period if there are new log configuration deltas",
SERVER,
SettingsCategory::Configuration);
init("get-trimpoint-interval",
&get_trimpoint_interval,
"600s",
validate_positive<ssize_t>(),
"polling interval for the sequencer getting the trim point from all "
"storage nodes",
SERVER,
SettingsCategory::Sequencer);
init("disable-trim-past-tail-check",
&disable_trim_past_tail_check,
"false",
nullptr, // no validation
"Disable check for trim past tail. Used for testing log trimming.",
CLIENT,
SettingsCategory::Testing);
init("allow-reads-on-workers",
&allow_reads_on_workers,
"true",
nullptr,
"If false, all rocksdb reads are done from storage threads. If true, "
"a cache-only reading attempt is made from worker thread first, and a "
"storage thread task is scheduled only if the cache wasn't enough to "
"fulfill the read. Disabling this can be used for: working around "
"rocksdb bugs; working around latency spikes caused by cache-only reads "
"being slow sometimes",
SERVER | EXPERIMENTAL,
SettingsCategory::Performance);
init("findkey-timeout",
&findkey_timeout,
"",
parse_optional_chrono_option,
"Findkey API call timeout. If omitted the client timeout will be used.",
CLIENT,
SettingsCategory::Core);
init("append-timeout",
&append_timeout,
"",
parse_optional_chrono_option,
"Timeout for appends. If omitted the client timeout will be used.",
CLIENT,
SettingsCategory::Core);
init("logsconfig-timeout",
&logsconfig_timeout,
"",
parse_optional_chrono_option,
"Timeout for LogsConfig API requests. "
"If omitted the client timeout will be used.",
CLIENT,
SettingsCategory::Core);
init("meta-api-timeout",
&meta_api_timeout,
"",
parse_optional_chrono_option,
"Timeout for trims/isLogEmpty/tailLSN/datasize API/etc. "
"If omitted the client timeout will be used.",
CLIENT,
SettingsCategory::Core);
init("enable-offset-map",
&enable_offset_map,
"false",
nullptr, // no validation
"Enables the server-side OffsetMap calculation feature. "
"NOTE: There is no guarantee of byte offsets result correctness if "
"feature"
"was switched on->off->on in period shorter than retention value for "
"logs.",
SERVER,
SettingsCategory::WritePath);
init("enable-hh-wheel-backed-timers",
&enable_hh_wheel_backed_timers,
"true",
nullptr, // no validation
"Enables the new version of timers which run on a different thread "
"and use HHWheelTimer backend.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Core);
init("enable-store-histograms-calculations",
&enable_store_histogram_calculations,
"false",
nullptr, // no validation
"Enables estimation of store timeouts per worker per node.",
SERVER,
SettingsCategory::Core);
init("store-histogram-min-samples-per-bucket",
&store_histogram_min_samples_per_bucket,
"30",
parse_positive<size_t>(),
"How many stores should the store histogram wait for before reporting "
"latency estimates",
SERVER,
SettingsCategory::Core);
init(
"authoritative-status-overrides",
&authoritative_status_overrides,
"",
parse_authoritative_status_overrides,
"Force the given authoritative statuses for the given shards. "
"Comma-separated list of overrides, each override of form "
"'N<node>S<shard>:<status>' or 'N<node>S<shard1>-<shard2>:<status>'. "
"E.g. 'N7:S0-15:UNDERREPLICATION,N8:S2:UNDERREPLICATION' will set status "
"of shards 0-15 of node 7 and shard 2 of node 8 to UNDERREPLICATION. "
"This is useful for recovering from situations where internal logs or "
"metadata logs are unreadable because too many nodes are unavailable or "
"lost their data. In such situation, use this setting to temporarily "
"override the state of shards that are unavailable (not running "
"logdeviced) to UNDERREPLICATION, then, optionally, write "
"SHARD_UNRECOVERABLE events for the same shards to event log.",
SERVER,
SettingsCategory::ReadPath);
init("nodeset-adjustment-period",
&nodeset_adjustment_period,
"6h",
validate_nonnegative<ssize_t>(),
"If not zero, nodeset size for each log will be periodically adjusted "
"based on logs's measured throughput. This settings controls how often "
"such adjustments will be considered. The nodeset size is chosen "
"proportionally to throughput, replication factor and backlog duration. "
"The nodeset_size log attribute acts as the minimum allowed nodeset "
"size, used for low-throughput logs and logs with infinite backlog "
"duration. If --nodeset-adjustment-period is changed from nonzero to "
"zero, all adjusted nodesets get immediately updated back to normal "
"size.",
SERVER,
SettingsCategory::Sequencer);
init("nodeset-adjustment-target-bytes-per-shard",
&nodeset_adjustment_target_bytes_per_shard,
"10G",
parse_nonnegative<size_t>(),
"When automatic nodeset size adjustment is enabled, "
"(--nodeset-adjustment-period), this setting controls the size of the "
"chosen nodesets. The size is chosen so that each log takes around this "
"much space on each shard. More precisely, "
"`nodeset_size = append_bytes_per_sec * backlog_duration * "
"replication_factor / nodeset_adjustment_target_bytes_per_shard`. "
"Appropriate value for this setting is around 0.1% - 1% of disk size.",
SERVER,
SettingsCategory::Sequencer);
init("nodeset-size-adjustment-min-factor",
&nodeset_size_adjustment_min_factor,
"2",
validate_nonnegative<double>(),
"When automatic nodeset size adjustment is enabled, we skip adjustments "
"that are smaller than this factor. E.g. if this setting is set to 2, "
"we won't bother updating nodeset if its size would increase or "
"decrease by less than a factor of 2. If set to 0, nodesets will be "
"unconditionally updated every --nodeset-adjustment-period, and will "
"also be randomized each time, as opposed to using consistent hashing.",
SERVER,
SettingsCategory::Sequencer);
init(
"nodeset-adjustment-min-window",
&nodeset_adjustment_min_window,
"1h",
validate_positive<ssize_t>(),
"When automatic nodeset size adjustment is enabled, only do the "
"adjustment if we've got append throughput information for at least this "
"period of time. More details: we choose nodeset size based on log's "
"average append throughput in a moving window of "
"size --nodeset-adjustment-period. The average is maintained by the "
"sequencer. If the sequencer was activated recently, we may not have a "
"good estimate of log's append throughput. This setting says how long "
"to wait after sequencer activation before allowing adjusting nodeset "
"size based on that sequencer's throughput.",
SERVER,
SettingsCategory::Sequencer);
init("nodeset-max-randomizations",
&nodeset_max_randomizations,
"4",
validate_positive<ssize_t>(),
"When automatic nodeset size adjustment wants to enlarge nodeset "
"to unreasonably big size N > 127, we instead set nodeset size to 127 "
"but re-randomize the nodeset min(N/127, nodeset_max_randomizations) "
"times during retention period. If you make it too big, the union of "
"historical nodesets will get big (127 * n), and findTime, isLogEmpty "
"etc may become expensive. If you set it too small, and the cluster has "
"high-throughput high-retention logs, space usage may be not very "
"balanced.",
SERVER,
SettingsCategory::Sequencer);
sequencer_boycotting.defineSettings(init);
init("require-permission-message-types",
&require_permission_message_types,
"START",
parse_message_types,
"Check permissions only for the received message of the type(s) "
"specified. Separate different types with a comma. 'all' to apply to "
"all messages. Prefix the value with '~' to include all types except "
"the given ones, e.g. '~WINDOW,RELEASE' will check permissions for "
"messages of all types except WINDOW and RELEASE.",
SERVER,
SettingsCategory::Security);
init("enable-all-read-streams-debug",
&enable_all_read_streams_debug,
"false",
nullptr, // no validation
"Enable all read streams sampling of debug info for debugging readers.",
CLIENT,
SettingsCategory::ReadPath);
init("all-read-streams-sampling-rate",
&all_read_streams_sampling_rate,
"100ms",
validate_positive<ssize_t>(),
"Rate of sampling all client read streams debug info",
CLIENT,
SettingsCategory::ReadPath);
init("all-read-streams-debug-config-path",
&all_read_streams_debug_config_path,
"",
nullptr, // no validation
"The config path for sampling all client read streams debug info",
CLIENT,
SettingsCategory::ReadPath);
init("write-streams-map-max-capacity",
&write_streams_map_max_capacity,
"1000",
validate_positive<size_t>(),
"Maximum capacity of write streams map in each epoch sequencer. Once "
"size exceeds write-streams-map-max-capacity, "
"write-streams-map-clear-size number of least-recently-used write "
"streams are evicted.",
SERVER,
SettingsCategory::Sequencer);
init("write-streams-map-clear-size",
&write_streams_map_clear_size,
"100",
validate_range<size_t>(0, write_streams_map_max_capacity),
"Clear size for write streams map in each epoch sequencer. Once size "
"exceeds write-streams-map-max-capacity, write-streams-map-clear-size "
"number of least-recently-used write streams are evicted.",
SERVER,
SettingsCategory::Sequencer);
init(
"request-queue-warning-time-limit",
&request_queue_warning_time_limit,
"20ms",
validate_positive<ssize_t>(),
"Maximum amount of time that the request can be delayed without printing "
"the warning log. After this time, the warning log will be printed",
SERVER | CLIENT,
SettingsCategory::Monitoring);
init("overload-detector-threshold",
&overload_detector_threshold,
"80",
nullptr,
"Minimum recv-q occupancy to declare socket overloaded (in percent). "
"See overload-detector-percentile.",
CLIENT,
SettingsCategory::Monitoring);
init("overload-detector-percentile",
&overload_detector_percentile,
"99",
nullptr,
"Percentile of active connections for which we compare occupancy with "
"overload-detector-threshold to do the final assessment of overload. "
"See overload-detector-freshness-factor.",
CLIENT,
SettingsCategory::Monitoring);
init("overload-detector-freshness-factor",
&overload_detector_freshness_factor,
"1.0",
nullptr,
"Multiple of recv windows that need to be read from socket since last "
"sample to consider socket for percentile analysis in OverloadDetector. "
"See overload-detector-percentile.",
CLIENT,
SettingsCategory::Monitoring);
init("overload-detector-period",
&overload_detector_period,
"60s",
validate_positive<size_t>(),
"Sampling period for OverloadDetector",
CLIENT,
SettingsCategory::Monitoring);
init("logsconfig-api-blacklist-nodes",
&logsconfig_api_blacklist_nodes,
"",
parse_recipients_list,
"Comma-separated list of indices of nodes that shouldn't be picked for "
"executing logs config API requests (e.g. makeDirectory()). Used in "
"tests.",
CLIENT,
SettingsCategory::Testing);
init("external-loglevel",
&external_loglevel,
"critical",
parse_log_level,
"One of the following: critical, error, warning, info, debug, none",
SERVER | CLIENT,
SettingsCategory::Core);
init("metadata-log-trim-interval",
&metadata_log_trim_interval,
"7200s",
validate_nonnegative<ssize_t>(),
"How often periodic trimming of metadata logs should run. Zero value "
"prevents it from running at all. ",
SERVER,
SettingsCategory::Sequencer);
init("metadata-log-trim-timeout",
&metadata_log_trim_timeout,
"120s",
validate_positive<size_t>(),
"Timeout when waiting for periodic metadata log trim request to be "
"completed",
SERVER,
SettingsCategory::Sequencer);
init("zk-vsc-max-retries",
&zk_vcs_max_retries,
"3",
validate_positive<int>(),
"Number of transient error retries for the zookeeper versioned config "
"store.",
SERVER | CLIENT | REQUIRES_RESTART,
SettingsCategory::Configuration);
init("test-same-partition-nodes",
&test_same_partition_nodes,
"",
parse_recipients_list,
"Used for isolation testing. Only nodes in this set will be addressable "
"from this node. An empty list disables this error injection.",
SERVER,
SettingsCategory::Testing);
}
}} // namespace facebook::logdevice
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <cstddef>
#include <cstdint>
#include <memory>
#include <mutex>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "arrow/array.h"
#include "arrow/array/builder_base.h"
#include "arrow/csv/column_decoder.h"
#include "arrow/csv/converter.h"
#include "arrow/csv/inference_internal.h"
#include "arrow/csv/options.h"
#include "arrow/csv/parser.h"
#include "arrow/status.h"
#include "arrow/type.h"
#include "arrow/type_fwd.h"
#include "arrow/util/future.h"
#include "arrow/util/logging.h"
#include "arrow/util/task_group.h"
namespace arrow {
namespace csv {
using internal::TaskGroup;
class ConcreteColumnDecoder : public ColumnDecoder {
public:
explicit ConcreteColumnDecoder(MemoryPool* pool, int32_t col_index = -1)
: ColumnDecoder(), pool_(pool), col_index_(col_index) {}
protected:
// XXX useful?
virtual std::shared_ptr<DataType> type() const = 0;
Status WrapConversionError(const Status& st) {
if (st.ok()) {
return st;
} else {
std::stringstream ss;
ss << "In CSV column #" << col_index_ << ": " << st.message();
return st.WithMessage(ss.str());
}
}
MemoryPool* pool_;
int32_t col_index_;
internal::Executor* executor_;
};
//////////////////////////////////////////////////////////////////////////
// Null column decoder implementation (for a column not in the CSV file)
class NullColumnDecoder : public ConcreteColumnDecoder {
public:
explicit NullColumnDecoder(const std::shared_ptr<DataType>& type, MemoryPool* pool)
: ConcreteColumnDecoder(pool), type_(type) {}
Future<std::shared_ptr<Array>> Decode(
const std::shared_ptr<BlockParser>& parser) override;
protected:
std::shared_ptr<DataType> type() const override { return type_; }
std::shared_ptr<DataType> type_;
};
Future<std::shared_ptr<Array>> NullColumnDecoder::Decode(
const std::shared_ptr<BlockParser>& parser) {
DCHECK_GE(parser->num_rows(), 0);
return MakeArrayOfNull(type_, parser->num_rows(), pool_);
}
//////////////////////////////////////////////////////////////////////////
// Pre-typed column decoder implementation
class TypedColumnDecoder : public ConcreteColumnDecoder {
public:
TypedColumnDecoder(const std::shared_ptr<DataType>& type, int32_t col_index,
const ConvertOptions& options, MemoryPool* pool)
: ConcreteColumnDecoder(pool, col_index), type_(type), options_(options) {}
Status Init();
Future<std::shared_ptr<Array>> Decode(
const std::shared_ptr<BlockParser>& parser) override;
protected:
std::shared_ptr<DataType> type() const override { return type_; }
std::shared_ptr<DataType> type_;
// CAUTION: ConvertOptions can grow large (if it customizes hundreds or
// thousands of columns), so avoid copying it in each TypedColumnDecoder.
const ConvertOptions& options_;
std::shared_ptr<Converter> converter_;
};
Status TypedColumnDecoder::Init() {
ARROW_ASSIGN_OR_RAISE(converter_, Converter::Make(type_, options_, pool_));
return Status::OK();
}
Future<std::shared_ptr<Array>> TypedColumnDecoder::Decode(
const std::shared_ptr<BlockParser>& parser) {
DCHECK_NE(converter_, nullptr);
return Future<std::shared_ptr<Array>>::MakeFinished(
converter_->Convert(*parser, col_index_));
}
//////////////////////////////////////////////////////////////////////////
// Type-inferring column builder implementation
class InferringColumnDecoder : public ConcreteColumnDecoder {
public:
InferringColumnDecoder(int32_t col_index, const ConvertOptions& options,
MemoryPool* pool)
: ConcreteColumnDecoder(pool, col_index),
options_(options),
infer_status_(options),
type_frozen_(false) {
first_inference_run_ = Future<>::Make();
first_inferrer_ = 0;
}
Status Init();
Future<std::shared_ptr<Array>> Decode(
const std::shared_ptr<BlockParser>& parser) override;
protected:
std::shared_ptr<DataType> type() const override {
DCHECK_NE(converter_, nullptr);
return converter_->type();
}
Status UpdateType();
Result<std::shared_ptr<Array>> RunInference(const std::shared_ptr<BlockParser>& parser);
// CAUTION: ConvertOptions can grow large (if it customizes hundreds or
// thousands of columns), so avoid copying it in each InferringColumnDecoder.
const ConvertOptions& options_;
// Current inference status
InferStatus infer_status_;
bool type_frozen_;
std::atomic<int> first_inferrer_;
Future<> first_inference_run_;
std::shared_ptr<Converter> converter_;
};
Status InferringColumnDecoder::Init() { return UpdateType(); }
Status InferringColumnDecoder::UpdateType() {
return infer_status_.MakeConverter(pool_).Value(&converter_);
}
Result<std::shared_ptr<Array>> InferringColumnDecoder::RunInference(
const std::shared_ptr<BlockParser>& parser) {
while (true) {
// (no one else should be updating converter_ concurrently)
auto maybe_array = converter_->Convert(*parser, col_index_);
if (maybe_array.ok() || !infer_status_.can_loosen_type()) {
// Conversion succeeded, or failed definitively
DCHECK(!type_frozen_);
type_frozen_ = true;
return maybe_array;
}
// Conversion failed temporarily, try another type
infer_status_.LoosenType(maybe_array.status());
auto update_status = UpdateType();
if (!update_status.ok()) {
return update_status;
}
}
}
Future<std::shared_ptr<Array>> InferringColumnDecoder::Decode(
const std::shared_ptr<BlockParser>& parser) {
bool already_taken = first_inferrer_.fetch_or(1);
// First block: run inference
if (!already_taken) {
auto maybe_array = RunInference(parser);
first_inference_run_.MarkFinished();
return Future<std::shared_ptr<Array>>::MakeFinished(std::move(maybe_array));
}
// Non-first block: wait for inference to finish on first block now,
// without blocking a TaskGroup thread.
return first_inference_run_.Then([this, parser] {
DCHECK(type_frozen_);
auto maybe_array = converter_->Convert(*parser, col_index_);
return converter_->Convert(*parser, col_index_);
});
}
//////////////////////////////////////////////////////////////////////////
// Factory functions
Result<std::shared_ptr<ColumnDecoder>> ColumnDecoder::Make(
MemoryPool* pool, int32_t col_index, const ConvertOptions& options) {
auto ptr = std::make_shared<InferringColumnDecoder>(col_index, options, pool);
RETURN_NOT_OK(ptr->Init());
return ptr;
}
Result<std::shared_ptr<ColumnDecoder>> ColumnDecoder::Make(
MemoryPool* pool, std::shared_ptr<DataType> type, int32_t col_index,
const ConvertOptions& options) {
auto ptr =
std::make_shared<TypedColumnDecoder>(std::move(type), col_index, options, pool);
RETURN_NOT_OK(ptr->Init());
return ptr;
}
Result<std::shared_ptr<ColumnDecoder>> ColumnDecoder::MakeNull(
MemoryPool* pool, std::shared_ptr<DataType> type) {
return std::make_shared<NullColumnDecoder>(std::move(type), pool);
}
} // namespace csv
} // namespace arrow
|
#include <iostream>
#include <cstring>
using namespace std;
int p[10001];
inline int getLen(int x) {
int len = 0;
while (p[x]) {
len++;
x = p[x];
}
return len;
}
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
memset(p, 0, sizeof(p));
for (int i = 0; i < n-1; i++) {
int u, v;
cin >> u >> v;
p[v] = u;
}
int x, y;
cin >> x >> y;
int len1 = getLen(x);
int len2 = getLen(y);
while (len1 > len2) {
x = p[x];
len1--;
}
while (len2 > len1) {
y = p[y];
len2--;
}
while (x != y) {
x = p[x];
y = p[y];
}
cout << x << endl;
}
return 0;
}
|
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include "StdAfx.h"
#include "EditorFrameworkApplication.h"
#include <time.h>
#include <AzCore/Memory/OSAllocator.h>
#include <AzCore/IO/Streamer.h>
#include <AzCore/IO/SystemFile.h>
#include <AzCore/IO/GenericStreams.h>
#include <AzCore/Serialization/ObjectStream.h>
#include <AzCore/Debug/Trace.h>
#include <AzCore/Math/Sfmt.h>
#include <AzCore/std/time.h>
#include <AzCore/Component/ComponentApplication.h>
#include <AzCore/Memory/MemoryComponent.h>
#include <AzCore/Jobs/JobManagerComponent.h>
#include <AzCore/Serialization/ObjectStreamComponent.h>
#include <AzCore/Asset/AssetManagerComponent.h>
#include <AzCore/Script/ScriptSystemComponent.h>
#include <AzCore/IO/StreamerComponent.h>
#include <AzCore/IO/StreamerRequest.h>
#include <AzFramework/CommandLine/CommandLine.h>
#include <AzToolsFramework/UI/LegacyFramework/UIFramework.hxx>
#include <AzToolsFramework/UI/LegacyFramework/CustomMenus/CustomMenusAPI.h>
#include <AzFramework/Asset/AssetCatalogComponent.h>
#include <AzFramework/Components/BootstrapReaderComponent.h>
#include <AzFramework/StringFunc/StringFunc.h>
#include <AzFramework/TargetManagement/TargetManagementComponent.h>
#include <AzFramework/Driller/RemoteDrillerInterface.h>
#include <AzCore/Driller/Driller.h>
#include <AzCore/Debug/ProfilerDriller.h>
#ifdef AZ_PLATFORM_WINDOWS
#include "shlobj.h"
#endif
#if AZ_TRAIT_OS_PLATFORM_APPLE
# include <mach-o/dyld.h>
#endif
AZ_PUSH_DISABLE_WARNING(4251, "-Wunknown-warning-option") // 'QFileInfo::d_ptr': class 'QSharedDataPointer<QFileInfoPrivate>' needs to have dll-interface to be used by clients of class 'QFileInfo'
#include <QFileInfo>
AZ_POP_DISABLE_OVERRIDE_WARNING
#include <QSharedMemory>
#include <QStandardPaths>
namespace LegacyFramework
{
ApplicationDesc::ApplicationDesc(const char* name, int argc, char** argv)
: m_applicationModule(NULL)
, m_enableGridmate(true)
, m_enablePerforce(true)
, m_enableGUI(true)
, m_enableProjectManager(true)
, m_shouldRunAssetProcessor(true)
, m_saveUserSettings(true)
, m_argc(argc)
, m_argv(argv)
{
m_applicationName[0] = 0;
if (name)
{
azstrcpy(m_applicationName, _MAX_PATH, name);
}
}
ApplicationDesc::ApplicationDesc(const ApplicationDesc& other)
{
this->operator=(other);
}
ApplicationDesc& ApplicationDesc::operator=(const ApplicationDesc& other)
{
if (this == &other)
{
return *this;
}
m_applicationModule = other.m_applicationModule;
m_enableGUI = other.m_enableGUI;
m_enableGridmate = other.m_enableGridmate;
m_enablePerforce = other.m_enablePerforce;
azstrcpy(m_applicationName, _MAX_PATH, other.m_applicationName);
m_enableProjectManager = other.m_enableProjectManager;
m_shouldRunAssetProcessor = other.m_shouldRunAssetProcessor;
m_saveUserSettings = other.m_saveUserSettings;
m_argc = other.m_argc;
m_argv = other.m_argv;
return *this;
}
Application::Application()
{
m_isMaster = true;
m_desiredExitCode = 0;
m_abortRequested = false;
m_applicationEntity = NULL;
m_ptrSystemEntity = NULL;
m_applicationModule[0] = 0;
m_appRoot[0] = 0;
}
HMODULE Application::GetMainModule()
{
return m_desc.m_applicationModule;
}
const char* Application::GetApplicationName()
{
return m_desc.m_applicationName;
}
const char* Application::GetApplicationModule()
{
return m_applicationModule;
}
const char* Application::GetApplicationDirectory()
{
return GetExecutableFolder();
}
#ifdef AZ_PLATFORM_WINDOWS
BOOL CTRL_BREAK_HandlerRoutine(DWORD /*dwCtrlType*/)
{
EBUS_EVENT(FrameworkApplicationMessages::Bus, SetAbortRequested);
return TRUE;
}
#endif
AZStd::string Application::GetApplicationGlobalStoragePath()
{
return QStandardPaths::writableLocation(QStandardPaths::AppDataLocation).toUtf8().data();
}
Application::~Application()
{
}
const char* Application::GetAppRoot()
{
return m_appRoot;
}
int Application::Run(const ApplicationDesc& desc)
{
if (!AZ::AllocatorInstance<AZ::OSAllocator>::IsReady())
{
AZ::AllocatorInstance<AZ::OSAllocator>::Create();
}
QString appNameConcat = QStringLiteral("%1_GLOBALMUTEX").arg(desc.m_applicationName);
{
// If the application crashed before, it may have left behind shared memory
// We can go ahead and do a quick attach/deatch here to clean it up
QSharedMemory fix(appNameConcat);
fix.attach();
}
QSharedMemory* shared = new QSharedMemory(appNameConcat);
if (qApp)
{
QObject::connect(qApp, &QCoreApplication::aboutToQuit, qApp, [shared]() { delete shared; });
}
m_isMaster = shared->create(1);
m_desc = desc;
// before we connect to the app bus, find out all the various stuff we need to cache:
#ifdef AZ_PLATFORM_WINDOWS
char szDir[_MAX_DIR];
char szDrv[_MAX_DRIVE];
#endif
char configFilePath[_MAX_PATH];
CalculateExecutablePath();
#ifdef AZ_PLATFORM_WINDOWS
GetModuleFileNameA(desc.m_applicationModule, m_applicationModule, _MAX_PATH);
::_splitpath_s(m_applicationModule, szDrv, _MAX_DRIVE, szDir, _MAX_DIR, NULL, 0, NULL, 0);
::_makepath_s(configFilePath, _MAX_PATH, szDrv, szDir, desc.m_applicationName, ".xml");
#else
uint32_t bufSize = AZ_ARRAY_SIZE(m_applicationModule);
_NSGetExecutablePath(m_applicationModule, &bufSize);
QString path = QStringLiteral("%1/%2.xml").arg(m_exeDirectory, desc.m_applicationName);
qstrcpy(configFilePath, path.toUtf8().data());
#endif
// Enable next line to load from the last state
// if left commented, we will start from scratch
// THE FOLLOWING LINE CREATES THE MEMORY MANAGER SUBSYSTEM, do not allocate memory before this call!
m_ptrSystemEntity = Create(configFilePath);
AZ::Debug::Trace::HandleExceptions(false);
FrameworkApplicationMessages::Handler::BusConnect();
CoreMessageBus::Handler::BusConnect();
#ifdef AZ_PLATFORM_WINDOWS
// if we're in console mode, listen for CTRL+C
::SetConsoleCtrlHandler(CTRL_BREAK_HandlerRoutine, true);
#endif
// Initialize the app root to the exe folder
azstrncpy(m_appRoot, AZ_MAX_PATH_LEN, m_exeDirectory, AZ_MAX_PATH_LEN);
m_ptrCommandLineParser = aznew AzFramework::CommandLine();
m_ptrCommandLineParser->Parse(m_desc.m_argc, m_desc.m_argv);
if (m_ptrCommandLineParser->HasSwitch("app-root"))
{
auto appRootOverride = m_ptrCommandLineParser->GetSwitchValue("app-root", 0);
if (!appRootOverride.empty())
{
azstrncpy(m_appRoot, AZ_MAX_PATH_LEN, appRootOverride.c_str(), AZ_MAX_PATH_LEN);
}
}
// If we don't have one create a serialize context
if (GetSerializeContext() == nullptr)
{
CreateReflectionManager();
}
CreateSystemComponents();
m_ptrSystemEntity->Init();
m_ptrSystemEntity->Activate();
// If we aren't the master, RunAsAnotherInstance unless we are being forcestarted
if (!m_isMaster && !m_ptrCommandLineParser->HasSwitch("forcestart"))
{
// Required for the application component to handle RunAsAnotherInstance
CreateApplicationComponent();
// if we're not the master instance, what exactly do we do? This is a generic framework - not a specific app
// and what we do might depend on implementation specifics for each app.
EBUS_EVENT(LegacyFramework::CoreMessageBus, RunAsAnotherInstance);
}
else
{
//if (!RequiresGameProject())
{
// we won't be getting the project set message
CreateApplicationComponent();
}
EBUS_EVENT(LegacyFramework::CoreMessageBus, Run);
// as a precaution here, we save our app and system entities BEFORE we destroy anything
// so that we have the highest chance of storing the user's precious application state and preferences
// even if someone has done something bad on their destructor or shutdown
SaveApplicationEntity();
if (m_ptrSystemEntity)
{
// Write the current state of the system components into cfg file.
// unless its not writeable!
QFileInfo fileAttribs(configFilePath);
bool writeIt = true;
if (fileAttribs.exists())
{
// file is found.
if (fileAttribs.isHidden() || !fileAttribs.isWritable())
{
writeIt = false;
}
}
if (writeIt)
{
WriteApplicationDescriptor(configFilePath);
}
}
}
if (m_applicationEntity)
{
m_applicationEntity->Deactivate();
delete m_applicationEntity;
m_applicationEntity = NULL;
}
AZ::SystemTickBus::ExecuteQueuedEvents();
AZ::TickBus::ExecuteQueuedEvents();
#ifdef AZ_PLATFORM_WINDOWS
// clean up!
::SetConsoleCtrlHandler(CTRL_BREAK_HandlerRoutine, false);
#endif
delete m_ptrCommandLineParser;
m_ptrCommandLineParser = NULL;
CoreMessageBus::Handler::BusDisconnect();
FrameworkApplicationMessages::Handler::BusDisconnect();
m_ptrSystemEntity->Deactivate();
Destroy();
return GetDesiredExitCode();
}
void Application::TeardownApplicationComponent()
{
SaveApplicationEntity();
if (m_applicationEntity)
{
m_applicationEntity->Deactivate();
delete m_applicationEntity;
m_applicationEntity = NULL;
}
}
const AzFramework::CommandLine* Application::GetCommandLineParser()
{
return m_ptrCommandLineParser;
}
// returns TRUE if the component already existed, FALSE if it had to create one.
bool Application::EnsureComponentCreated(AZ::Uuid componentCRC)
{
if (m_applicationEntity)
{
// if the component already exists on the system entity, this is an error.
if (auto comp = m_ptrSystemEntity->FindComponent(componentCRC))
{
AZ_Warning("EditorFramework", 0, "Attempt to add a component that already exists on the system entity: %s\n", comp->RTTI_TypeName());
return true;
}
if (!m_applicationEntity->FindComponent(componentCRC))
{
if (m_applicationEntity->GetState() != AZ::Entity::ES_CONSTRUCTED)
{
AZ_Warning("EditorFramework", 0, "Attempt to add a component 0x%08x to the application entity when the application entity has already been activated\n", componentCRC);
return false;
}
m_applicationEntity->CreateComponent(componentCRC);
}
return true;
}
if (m_ptrSystemEntity->GetState() != AZ::Entity::ES_CONSTRUCTED)
{
AZ_Warning("EditorFramework", 0, "Attempt to add a component 0x%08x to the system entity when the system entity has already been activated\n", componentCRC);
return false;
}
if (!m_ptrSystemEntity->FindComponent(componentCRC))
{
m_ptrSystemEntity->CreateComponent(componentCRC);
return false;
}
return true;
}
// returns TRUE if the component existed, FALSE if the component did not exist.
bool Application::EnsureComponentRemoved(AZ::Uuid componentCRC)
{
if (m_applicationEntity)
{
if (auto comp = m_applicationEntity->FindComponent(componentCRC))
{
if (m_applicationEntity->GetState() != AZ::Entity::ES_CONSTRUCTED)
{
AZ_Warning("EditorFramework", 0, "Attempt to remove a component %s (0x%08x) from the application entity when the application entity has already been activated\n", comp->RTTI_GetTypeName(), componentCRC);
return true;
}
m_applicationEntity->RemoveComponent(comp);
delete comp;
return true;
}
return false;
}
if (m_ptrSystemEntity)
{
if (auto comp = m_ptrSystemEntity->FindComponent(componentCRC))
{
if (m_ptrSystemEntity->GetState() != AZ::Entity::ES_CONSTRUCTED)
{
AZ_Warning("EditorFramework", 0, "Attempt to remove a component %s (0x%08x) from the system entity when the entity entity has already been activated\n", comp->RTTI_GetTypeName(), componentCRC);
return true;
}
m_ptrSystemEntity->RemoveComponent(comp);
delete comp;
return true;
}
}
return false;
}
void Application::CreateApplicationComponent()
{
// create the application entity:
if (m_applicationEntity)
{
return;
}
AZ_TracePrintf("EditorFramework", "Application::OnProjectSet -- Creating Application Entity.\n");
AZ_Assert(m_applicationEntity == nullptr, "Attempt to set a project while the project is still set.");
AZStd::string applicationFilePath;
AzFramework::StringFunc::Path::Join(GetExecutableFolder(), appName(), applicationFilePath);
applicationFilePath.append("_app.xml");
AZ_Assert(applicationFilePath.size() <= _MAX_PATH, "Application path longer than expected");
qstrcpy(m_applicationFilePath, applicationFilePath.c_str());
// load all application entities, if present:
AZ::IO::SystemFile cfg;
if (cfg.Open(m_applicationFilePath, AZ::IO::SystemFile::SF_OPEN_READ_ONLY))
{
AZ::IO::SystemFileStream stream(&cfg, false);
stream.Seek(0, AZ::IO::GenericStream::ST_SEEK_BEGIN);
AZ::ObjectStream::LoadBlocking(&stream, *GetSerializeContext(),
[this](void* classPtr, const AZ::Uuid& classId, const AZ::SerializeContext* sc)
{
AZ::Entity* entity = sc->Cast<AZ::Entity*>(classPtr, classId);
if (entity)
{
m_applicationEntity = entity;
}
});
cfg.Close();
}
if (!m_applicationEntity)
{
m_applicationEntity = aznew AZ::Entity("WoodpeckerApplicationEntity");
}
CreateApplicationComponents();
m_applicationEntity->InvalidateDependencies();
m_applicationEntity->Init();
m_applicationEntity->Activate();
OnApplicationEntityActivated();
}
void Application::OnApplicationEntityActivated()
{
}
bool Application::IsAppConfigWritable()
{
return !AZ::IO::SystemFile::Exists(m_applicationFilePath) || AZ::IO::SystemFile::IsWritable(m_applicationFilePath);
}
void Application::OnProjectSet(const char* projectPath)
{
(void)projectPath;
CreateApplicationComponent();
}
void Application::RunAssetProcessor()
{
return;
}
void Application::SaveApplicationEntity()
{
if (!m_applicationEntity)
{
return;
}
// write the current applicaiton entity:
AZStd::string applicationFilePath;
AzFramework::StringFunc::Path::Join(GetExecutableFolder(), appName(), applicationFilePath);
applicationFilePath += "_app.xml";
QFileInfo fileAttribs(applicationFilePath.c_str());
bool writeIt = true;
if (fileAttribs.exists())
{
// file is found.
if (fileAttribs.isHidden() || !fileAttribs.isWritable())
{
writeIt = false;
}
}
if (writeIt)
{
using namespace AZ;
AZStd::string tmpFileName(applicationFilePath);
tmpFileName += ".tmp";
IO::FileIOStream stream(tmpFileName.c_str(), IO::OpenMode::ModeWrite);
if (!stream.IsOpen())
{
return;
}
AZ::SerializeContext* serializeContext = GetSerializeContext();
AZ_Assert(serializeContext, "ComponentApplication::m_serializeContext is NULL!");
ObjectStream* objStream = ObjectStream::Create(&stream, *serializeContext, ObjectStream::ST_XML);
bool entityWriteOk = objStream->WriteClass(m_applicationEntity);
AZ_Warning("ComponentApplication", entityWriteOk, "Failed to write application entity to application file %s!", applicationFilePath.c_str());
bool flushOk = objStream->Finalize();
AZ_Warning("ComponentApplication", flushOk, "Failed finalizing application file %s!", applicationFilePath.c_str());
if (entityWriteOk && flushOk)
{
if (IO::SystemFile::Rename(tmpFileName.c_str(), applicationFilePath.c_str(), true))
{
return;
}
AZ_Warning("ComponentApplication", false, "Failed to rename %s to %s.", tmpFileName.c_str(), applicationFilePath.c_str());
}
}
}
void Application::CreateApplicationComponents()
{
EnsureComponentCreated(AzFramework::TargetManagementComponent::RTTI_Type());
EnsureComponentCreated(AzFramework::DrillerNetworkConsoleComponent::RTTI_Type());
EnsureComponentCreated(AzFramework::DrillerNetworkAgentComponent::RTTI_Type());
EnsureComponentCreated(AzFramework::BootstrapReaderComponent::RTTI_Type());
}
void Application::CreateSystemComponents()
{
EnsureComponentCreated(AZ::MemoryComponent::RTTI_Type());
EnsureComponentCreated(AZ::JobManagerComponent::RTTI_Type());
EnsureComponentCreated(AZ::StreamerComponent::RTTI_Type());
EnsureComponentCreated(AZ::ObjectStreamComponent::RTTI_Type());
AZ_Assert(!m_desc.m_enableProjectManager || m_desc.m_enableGUI, "Enabling the project manager in the application settings requires enabling the GUI as well.");
// if we're a GUI APP we need the UI Framework component:
if (m_desc.m_enableGUI)
{
EnsureComponentCreated(AzToolsFramework::Framework::RTTI_Type());
}
else
{
EnsureComponentCreated(AzToolsFramework::Framework::RTTI_Type());
}
}
void Application::ReflectSerialize()
{
AZ::SerializeContext* serializeContext = GetSerializeContext();
if (serializeContext && serializeContext->GetEditContext() == nullptr)
{
serializeContext->CreateEditContext(); // we are the editor make a serialize context.
}
AZ::ComponentApplication::ReflectSerialize();
ReflectSerializeDeprecated();
}
//=========================================================================
// Create
// [6/18/2012]
//=========================================================================
AZ::Entity*
Application::Create(const char* systemEntityFileName, const StartupParameters& startupParameters)
{
if ((systemEntityFileName) && AZ::IO::SystemFile::Exists(systemEntityFileName))
{
// check if filename is valid
return ComponentApplication::Create(systemEntityFileName, startupParameters);
}
else
{
AZ::ComponentApplication::Descriptor appDesc;
AZ::Entity* success = ComponentApplication::Create(appDesc, startupParameters);
return success;
}
}
void Application::Destroy()
{
AZ::ComponentApplication::Destroy();
}
//=========================================================================
// RegisterCoreComponents
// [6/18/2012]
//=========================================================================
void Application::RegisterCoreComponents()
{
ComponentApplication::RegisterCoreComponents();
RegisterComponentDescriptor(AzFramework::BootstrapReaderComponent::CreateDescriptor());
RegisterComponentDescriptor(AzFramework::TargetManagementComponent::CreateDescriptor());
RegisterComponentDescriptor(AzFramework::DrillerNetworkConsoleComponent::CreateDescriptor());
RegisterComponentDescriptor(AzFramework::DrillerNetworkAgentComponent::CreateDescriptor());
RegisterComponentDescriptor(AzToolsFramework::Framework::CreateDescriptor());
}
//=========================================================================
// ReflectSerializeDeprecated
// [11/12/2012]
//=========================================================================
void Application::ReflectSerializeDeprecated()
{
// A "convenient" function to place all high level (no other place to put) deprecated reflections.
// IMPORTANT: Please to time stamp each deprecation so we can remove after some time (we know all
// old data had been converted)
GetSerializeContext()->ClassDeprecate("AssetDBComponent", "{397BAF44-3504-4a3e-BF96-C60F377313C7}"); // 5/21/2014
}
}
|
/*
Copyright 2014 Kristina Simpson <sweet.kristas@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include <cstdint>
#include <string>
#include <iomanip>
#include <sstream>
#include <vector>
struct formatter
{
template<typename T>
formatter& operator<<(const T& o) {
stream << o;
return *this;
}
const std::string str() {
return stream.str();
}
const char* c_str() {
return str().c_str();
}
operator std::string() {
return stream.str();
}
std::ostringstream stream;
};
template<> inline formatter& formatter::operator<<(const std::vector<uint8_t>& o) {
for(auto c : o) {
if(c < 32 || c > 127) {
stream << "[" << std::setw(2) << std::setfill('0') << std::hex << int(c) << std::dec << "]";
} else {
stream << char(c);
}
}
return *this;
}
|
/**
* @brief RTF files into HTML сonverter
* @package rtf
* @file table.cpp
* @author dmryutov (dmryutov@gmail.com)
* @copyright lvu (https://github.com/lvu/rtf2html)
* @date 31.07.2016 -- 18.10.2017
*/
#include <algorithm>
#include <set>
#include "../../tools.hpp"
#include "table.hpp"
namespace rtf {
// TableCellDef
bool TableCellDef::rightEquals(int right) {
return (right == m_right);
}
// Table public:
Table::Table(char mergingMode)
: m_mergingMode(mergingMode) {}
void Table::make(pugi::xml_node& node) {
std::set<int> pts;
std::set<int>::iterator pt, ptp;
iterator spanRow;
PtrVec<TableCellDef>::iterator cellDef, prevCellDef;
PtrVec<TableCell>::iterator cell;
int colspan;
for (auto row = begin(); row != end(); ) {
if ((*row)->m_cellList.empty()) {
delete *row;
row = erase(row);
}
else {
pts.insert((*row)->m_left);
for (cellDef = (*row)->m_cellDefList->begin();
cellDef != (*row)->m_cellDefList->end(); ++cellDef
)
pts.insert((*cellDef)->m_right);
++row;
}
}
auto tbl = m_tree.append_child("table");
// Determine all rowspans and leftsides
for (auto row = begin(); row != end(); ++row) {
for (cellDef = (*row)->m_cellDefList->begin(), cell = (*row)->m_cellList.begin();
cell != (*row)->m_cellList.end();
++cell, prevCellDef = cellDef++
) {
if (cellDef == (*row)->m_cellDefList->begin())
(*cellDef)->m_left = (*row)->m_left;
else
(*cellDef)->m_left = (*prevCellDef)->m_right;
if ((*cellDef)->m_isFirstMerged) {
for (spanRow = row, ++spanRow; spanRow != end(); ++spanRow) {
auto cellDef2 = std::find_if(
(*spanRow)->m_cellDefList->begin(),
(*spanRow)->m_cellDefList->end(),
std::bind2nd(
std::mem_fun(&TableCellDef::rightEquals),
(*cellDef)->m_right
)
);
if (cellDef2 == (*spanRow)->m_cellDefList->end())
break;
if (!(*cellDef2)->m_isMerged)
break;
}
(*cell)->m_rowspan = static_cast<int>(spanRow - row);
}
}
}
// Create rows
for (auto row = begin(); row != end(); ++row) {
auto tr = tbl.append_child("tr");
pt = pts.find((*row)->m_left);
if (pt != pts.begin()) {
auto td = tr.append_child("td");
td.append_attribute("colspan") = std::distance(pts.begin(), pt);
}
int colIndex = 0;
for (cell = (*row)->m_cellList.begin(), cellDef = (*row)->m_cellDefList->begin();
cell != (*row)->m_cellList.end() && cellDef != (*row)->m_cellDefList->end();
++cell, ++cellDef
) {
ptp = pts.find((*cellDef)->m_right);
colspan = static_cast<int>(std::distance(pt, ptp));
pt = ptp;
if (!(*cellDef)->m_isMerged) {
auto td = tr.append_child("td");
addSubtree((*cell)->m_node, td);
if (colspan > 1) {
if (m_mergingMode == 0) {
td.append_attribute("colspan") = colspan;
}
else {
for (int i = 1; i < colspan; ++i) {
td = tr.append_child("td");
if (m_mergingMode == 1)
addSubtree((*cell)->m_node, td);
}
}
colIndex += colspan - 1;
}
if ((*cellDef)->m_isFirstMerged && m_mergingMode == 0) {
td.append_attribute("rowspan") = (*cell)->m_rowspan;
}
}
else if (m_mergingMode != 0) {
auto td = tr.append_child("td");
if (m_mergingMode == 1) {
auto prevTr = tr.previous_sibling();
auto prevTd = std::next(prevTr.children("td").begin(), colIndex);
addSubtree(*prevTd, td);
}
}
colIndex++;
}
}
addSubtree(m_tree, node);
}
// Table private:
void Table::addSubtree(const pugi::xml_node& from, pugi::xml_node& to) const {
std::string style;
auto parentNode = (from.child("parent")) ? from.child("parent") : from;
if (parentNode.attribute("style"))
style = parentNode.attribute("style").value();
if (tools::xmlChildrenCount(parentNode) == 1 && parentNode.child("span"))
parentNode = parentNode.child("span");
if (!style.empty())
to.append_attribute("style") = style.c_str();
for (const auto& child : parentNode)
to.append_copy(child);
}
} // End namespace
|
#include "../config.h"
#ifdef HAVE_LIBUNRAR
#include "rar.h"
using namespace AhoViewer;
#ifndef _UNIX
#define _UNIX
#endif // _UNIX
#if defined(HAVE_LIBUNRAR_DLL_HPP)
#include <libunrar/dll.hpp>
#elif defined(HAVE_UNRAR_DLL_HPP)
#include <unrar/dll.hpp>
#endif
const char Rar::Magic[Rar::MagicSize] = { 'R', 'a', 'r', '!', 0x1A, 0x07 };
Rar::Rar(const Glib::ustring &path, const Glib::ustring &exDir)
: Archive::Archive(path, exDir)
{
}
bool Rar::extract(const Glib::ustring &file) const
{
bool found = false;
RAROpenArchiveData archive;
RARHeaderData header;
memset(&archive, 0, sizeof(archive));
archive.ArcName = const_cast<char*>(m_Path.c_str());
archive.OpenMode = RAR_OM_EXTRACT;
HANDLE rar = RAROpenArchive(&archive);
if (rar)
{
while (RARReadHeader(rar, &header) == 0)
{
if (header.FileName == file)
{
RARProcessFile(rar, RAR_EXTRACT, const_cast<char*>(m_ExtractedPath.c_str()), NULL);
found = true;
break;
}
else
{
RARProcessFile(rar, RAR_SKIP, NULL, NULL);
}
}
RARCloseArchive(rar);
}
return found;
}
bool Rar::has_valid_files(const FileType t) const
{
return !get_entries(t).empty();
}
std::vector<std::string> Rar::get_entries(const FileType t) const
{
std::vector<std::string> entries;
RAROpenArchiveData archive;
RARHeaderData header;
memset(&archive, 0, sizeof(archive));
archive.ArcName = const_cast<char*>(m_Path.c_str());
archive.OpenMode = RAR_OM_LIST;
HANDLE rar = RAROpenArchive(&archive);
if (rar)
{
while (RARReadHeader(rar, &header) == 0)
{
if (((t & IMAGES) && Image::is_valid_extension(header.FileName)) ||
((t & ARCHIVES) && Archive::is_valid_extension(header.FileName)))
entries.emplace_back(header.FileName);
RARProcessFile(rar, RAR_SKIP, NULL, NULL);
}
RARCloseArchive(rar);
}
return entries;
}
#endif // HAVE_LIBUNRAR
|
/**
* \file
* \author Karsten Rink
* \date 2012-01-04
* \brief Implementation of the DirectConditionGenerator class.
*
* \copyright
* Copyright (c) 2012-2018, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*
*/
#include <fstream>
#include <memory>
#include <logog/include/logog.hpp>
#include "DirectConditionGenerator.h"
#include "Applications/FileIO/AsciiRasterInterface.h"
#include "Raster.h"
#include "MeshSurfaceExtraction.h"
#include "Mesh.h"
#include "MeshLib/Node.h"
#include <cmath>
#include <limits>
const std::vector<std::pair<std::size_t, double>>&
DirectConditionGenerator::directToSurfaceNodes(const MeshLib::Mesh& mesh,
const std::string& filename)
{
if (_direct_values.empty())
{
GeoLib::Raster* raster(
FileIO::AsciiRasterInterface::readRaster(filename));
if (!raster)
{
ERR("Error in DirectConditionGenerator::directToSurfaceNodes() - "
"could not load raster file.");
return _direct_values;
}
const MathLib::Vector3 dir(0, 0, -1);
const std::vector<MeshLib::Node*> surface_nodes(
MeshLib::MeshSurfaceExtraction::getSurfaceNodes(mesh, dir, 90));
const double no_data(raster->getHeader().no_data);
_direct_values.reserve(surface_nodes.size());
for (auto const* surface_node : surface_nodes)
{
double val(raster->getValueAtPoint(*surface_node));
val = (val == no_data) ? 0 : val;
_direct_values.push_back(
std::make_pair(surface_node->getID(), val));
}
delete raster;
std::for_each(surface_nodes.begin(), surface_nodes.end(),
std::default_delete<MeshLib::Node>());
}
else
ERR("Error in DirectConditionGenerator::directToSurfaceNodes() - Data "
"vector contains outdated values.");
return _direct_values;
}
const std::vector< std::pair<std::size_t,double> >& DirectConditionGenerator::directWithSurfaceIntegration(MeshLib::Mesh &mesh, const std::string &filename, double scaling)
{
if (!_direct_values.empty()) {
ERR("Error in DirectConditionGenerator::directWithSurfaceIntegration()"
"- Data vector contains outdated values...");
return _direct_values;
}
std::unique_ptr<GeoLib::Raster> raster(
FileIO::AsciiRasterInterface::readRaster(filename));
if (!raster) {
ERR("Error in DirectConditionGenerator::directWithSurfaceIntegration()"
"- could not load raster file.");
return _direct_values;
}
MathLib::Vector3 const dir(0.0, 0.0, -1.0);
double const angle(90);
std::string const prop_name("OriginalSubsurfaceNodeIDs");
std::unique_ptr<MeshLib::Mesh> surface_mesh(
MeshLib::MeshSurfaceExtraction::getMeshSurface(
mesh, dir, angle, prop_name));
std::vector<double> node_area_vec =
MeshLib::MeshSurfaceExtraction::getSurfaceAreaForNodes(*surface_mesh);
const std::vector<MeshLib::Node*> &surface_nodes(surface_mesh->getNodes());
const std::size_t nNodes(surface_mesh->getNumberOfNodes());
const double no_data(raster->getHeader().no_data);
auto const* const node_id_pv =
surface_mesh->getProperties().getPropertyVector<int>(prop_name);
if (!node_id_pv)
{
ERR(
"Need subsurface node ids, but the property \"%s\" is not "
"available.",
prop_name.c_str());
return _direct_values;
}
_direct_values.reserve(nNodes);
for (std::size_t i=0; i<nNodes; ++i)
{
double val(raster->getValueAtPoint(*surface_nodes[i]));
val = (val == no_data) ? 0 : ((val*node_area_vec[i])/scaling);
_direct_values.emplace_back((*node_id_pv)[i], val);
}
return _direct_values;
}
int DirectConditionGenerator::writeToFile(const std::string &name) const
{
std::ofstream out( name.c_str(), std::ios::out );
if (out)
{
for (const auto& direct_value : _direct_values)
out << direct_value.first << "\t" << direct_value.second << "\n";
out.close();
}
return 0;
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/exo/keyboard.h"
#include "ash/accessibility/accessibility_controller_impl.h"
#include "ash/constants/app_types.h"
#include "ash/constants/ash_pref_names.h"
#include "ash/public/cpp/external_arc/overlay/arc_overlay_manager.h"
#include "ash/shell.h"
#include "ash/test/ash_test_helper.h"
#include "ash/test/test_widget_builder.h"
#include "ash/test/test_window_builder.h"
#include "ash/wm/desks/desks_controller.h"
#include "ash/wm/desks/desks_test_util.h"
#include "ash/wm/tablet_mode/tablet_mode_controller.h"
#include "base/macros.h"
#include "base/run_loop.h"
#include "components/exo/buffer.h"
#include "components/exo/keyboard_delegate.h"
#include "components/exo/keyboard_device_configuration_delegate.h"
#include "components/exo/keyboard_modifiers.h"
#include "components/exo/keyboard_observer.h"
#include "components/exo/seat.h"
#include "components/exo/shell_surface.h"
#include "components/exo/surface.h"
#include "components/exo/test/exo_test_base.h"
#include "components/exo/test/exo_test_helper.h"
#include "components/exo/test/shell_surface_builder.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "ui/aura/client/aura_constants.h"
#include "ui/aura/client/focus_client.h"
#include "ui/base/ime/dummy_text_input_client.h"
#include "ui/events/devices/device_data_manager.h"
#include "ui/events/event_constants.h"
#include "ui/events/keycodes/dom/dom_code.h"
#include "ui/events/test/event_generator.h"
#include "ui/events/types/event_type.h"
#include "ui/views/controls/textfield/textfield.h"
namespace exo {
namespace {
// XKB mod masks for the default keymap.
constexpr uint32_t kShiftMask = 1 << 0;
constexpr uint32_t kControlMask = 1 << 2;
constexpr uint32_t kAltMask = 1 << 3;
constexpr uint32_t kNumLockMask = 1 << 4;
using KeyboardTest = test::ExoTestBase;
class MockKeyboardDelegate : public KeyboardDelegate {
public:
MockKeyboardDelegate() = default;
// Overridden from KeyboardDelegate:
MOCK_METHOD(bool, CanAcceptKeyboardEventsForSurface, (Surface*), (const));
MOCK_METHOD(void,
OnKeyboardEnter,
(Surface*, (const base::flat_map<ui::DomCode, KeyState>&)));
MOCK_METHOD(void, OnKeyboardLeave, (Surface*));
MOCK_METHOD(uint32_t, OnKeyboardKey, (base::TimeTicks, ui::DomCode, bool));
MOCK_METHOD(void, OnKeyboardModifiers, (const KeyboardModifiers&));
MOCK_METHOD(void,
OnKeyRepeatSettingsChanged,
(bool, base::TimeDelta, base::TimeDelta));
MOCK_METHOD(void, OnKeyboardLayoutUpdated, (base::StringPiece));
};
using NiceMockKeyboardDelegate = ::testing::NiceMock<MockKeyboardDelegate>;
class MockKeyboardDeviceConfigurationDelegate
: public KeyboardDeviceConfigurationDelegate {
public:
MockKeyboardDeviceConfigurationDelegate() = default;
// Overridden from KeyboardDeviceConfigurationDelegate:
MOCK_METHOD(void, OnKeyboardTypeChanged, (bool));
};
class MockKeyboardObserver : public KeyboardObserver {
public:
MockKeyboardObserver() = default;
// Overridden from KeyboardObserver:
MOCK_METHOD(void, OnKeyboardDestroying, (Keyboard*));
MOCK_METHOD(void, OnKeyboardKey, (base::TimeTicks, ui::DomCode, bool));
};
using NiceMockKeyboardObserver = ::testing::NiceMock<MockKeyboardObserver>;
class TestShellSurface : public ShellSurface {
public:
explicit TestShellSurface(Surface* surface) : ShellSurface(surface) {}
MOCK_METHOD(bool, AcceleratorPressed, (const ui::Accelerator& accelerator));
};
// This event handler moves the focus to the given window when receiving a key
// event.
class TestEventHandler : public ui::EventHandler {
public:
explicit TestEventHandler(aura::Window* focus_window)
: focus_window_(focus_window) {}
TestEventHandler(const TestEventHandler&) = delete;
TestEventHandler& operator=(const TestEventHandler&) = delete;
void OnKeyEvent(ui::KeyEvent* event) override {
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow())
->FocusWindow(focus_window_);
}
aura::Window* focus_window_;
};
// Verifies that switching desks via alt-tab doesn't prevent Seat from receiving
// key events. https://crbug.com/1008574.
TEST_F(KeyboardTest, CorrectSeatPressedKeysOnSwitchingDesks) {
Seat seat;
Keyboard keyboard(std::make_unique<NiceMockKeyboardDelegate>(), &seat);
// Create 2 desks.
auto* desks_controller = ash::DesksController::Get();
desks_controller->NewDesk(ash::DesksCreationRemovalSource::kButton);
ASSERT_EQ(2u, desks_controller->desks().size());
ash::Desk* desk_1 = desks_controller->desks()[0].get();
const ash::Desk* desk_2 = desks_controller->desks()[1].get();
// Desk 1 has a normal window.
auto win0 = CreateAppWindow(gfx::Rect(0, 0, 250, 100));
// Desk 2 has an exo surface window.
ash::ActivateDesk(desk_2);
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
// Go back to desk 1, and trigger an alt-tab (releasing alt first). This would
// trigger activating the exo surface window on desk 2, which would lead to a
// desk switch animation. During the animation, expect that Seat gets all the
// keys in `OnKeyEvent()`, and the |pressed_keys_| map is correctly updated.
ash::ActivateDesk(desk_1);
auto displatch_key_event = [&](ui::EventType type, ui::KeyboardCode key_code,
ui::DomCode code, int flags) {
ui::KeyEvent key_event{type, key_code, code, flags};
seat.WillProcessEvent(&key_event);
GetEventGenerator()->Dispatch(&key_event);
EXPECT_EQ(type != ui::ET_KEY_RELEASED, seat.pressed_keys().count(code));
seat.DidProcessEvent(&key_event);
};
ash::DeskSwitchAnimationWaiter waiter;
displatch_key_event(ui::ET_KEY_PRESSED, ui::VKEY_MENU, ui::DomCode::ALT_LEFT,
/*flags=*/0);
displatch_key_event(ui::ET_KEY_PRESSED, ui::VKEY_TAB, ui::DomCode::TAB,
/*flags=*/ui::EF_ALT_DOWN);
displatch_key_event(ui::ET_KEY_RELEASED, ui::VKEY_MENU, ui::DomCode::ALT_LEFT,
/*flags=*/0);
displatch_key_event(ui::ET_KEY_RELEASED, ui::VKEY_TAB, ui::DomCode::TAB,
/*flags=*/0);
EXPECT_TRUE(seat.pressed_keys().empty());
EXPECT_EQ(desk_2, desks_controller->GetTargetActiveDesk());
waiter.Wait();
}
TEST_F(KeyboardTest, OnKeyboardEnter) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
Seat seat;
// Pressing key before Keyboard instance is created and surface has
// received focus.
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_A);
generator.PressKey(ui::VKEY_A, ui::EF_SHIFT_DOWN);
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(surface->window());
// Keyboard should try to set initial focus to surface.
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(false));
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Set up expectation for the key release.
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kShiftMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(
surface.get(),
base::flat_map<ui::DomCode, KeyState>(
{{ui::DomCode::US_A, KeyState{ui::DomCode::US_A, false}}})));
focus_client->FocusWindow(nullptr);
focus_client->FocusWindow(surface->window());
// Surface should maintain keyboard focus when moved to top-level window.
focus_client->FocusWindow(surface->window()->GetToplevelWindow());
// Release key after surface lost focus.
focus_client->FocusWindow(nullptr);
generator.ReleaseKey(ui::VKEY_A, ui::EF_SHIFT_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Key should no longer be pressed when focus returns.
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kShiftMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window()->GetToplevelWindow());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, OnKeyboardLeave) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
auto keyboard = std::make_unique<Keyboard>(std::move(delegate), &seat);
ON_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillByDefault(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr, OnKeyboardLeave(surface.get()));
focus_client->FocusWindow(nullptr);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr, OnKeyboardLeave(surface.get()));
shell_surface.reset();
surface.reset();
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, OnKeyboardKey) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
NiceMockKeyboardObserver observer;
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
keyboard.AddObserver(&observer);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
// This should only generate a press event for KEY_A.
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_A, true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_A, true));
}
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_A);
generator.PressKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// This should not generate another press event for KEY_A.
generator.PressKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// This should only generate a single release event for KEY_A.
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_A, false));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_A, false));
}
generator.ReleaseKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Test key event rewriting. In this case, ARROW_DOWN is rewritten to KEY_END
// as a result of ALT being pressed.
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::END, true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::END, true));
}
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kAltMask | kNumLockMask, 0, 0, 0}));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::ARROW_DOWN);
generator.PressKey(ui::VKEY_END, ui::EF_ALT_DOWN);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// This should generate a release event for KEY_END as that is the key
// associated with the key press.
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::END, false));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::END, false));
}
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
generator.ReleaseKey(ui::VKEY_DOWN, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Press accelerator after surface lost focus.
EXPECT_CALL(*delegate_ptr, OnKeyboardLeave(surface.get()));
focus_client->FocusWindow(nullptr);
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_W);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Key should be pressed when focus returns.
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kControlMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(
surface.get(),
base::flat_map<ui::DomCode, KeyState>(
{{ui::DomCode::US_W, KeyState{ui::DomCode::US_W, false}}})));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Releasing accelerator when surface has focus should generate event.
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_W, false));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_W, false));
}
generator.ReleaseKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Key events should be ignored when the focused window is not an
// exo::Surface.
std::unique_ptr<aura::Window> window =
ash::ChildTestWindowBuilder(shell_surface->GetWidget()->GetNativeWindow(),
gfx::Rect(buffer_size))
.Build();
// Moving the focus away will reset the focused surface.
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.Times(0);
focus_client->FocusWindow(window.get());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_FALSE(seat.GetFocusedSurface());
EXPECT_FALSE(keyboard.focused_surface_for_testing());
EXPECT_CALL(observer,
OnKeyboardKey(testing::_, ui::DomCode::ARROW_LEFT, true))
.Times(0);
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::ARROW_LEFT, true))
.Times(0);
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::ARROW_LEFT);
generator.PressKey(ui::VKEY_LEFT, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(observer,
OnKeyboardKey(testing::_, ui::DomCode::ARROW_LEFT, false))
.Times(0);
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::ARROW_LEFT, false))
.Times(0);
generator.ReleaseKey(ui::VKEY_LEFT, 0);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, OnKeyboardKey_NotSendKeyIfConsumedByIme) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
NiceMockKeyboardObserver observer;
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
keyboard.AddObserver(&observer);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
views::Widget* widget =
views::Widget::GetTopLevelWidgetForNativeView(surface->window());
ui::InputMethod* input_method = widget->GetInputMethod();
ui::DummyTextInputClient client{ui::TEXT_INPUT_TYPE_TEXT};
input_method->SetFocusedTextInputClient(&client);
// If a text field is focused, a pressed key event is not sent to a client
// because a key event should be consumed by the IME.
// However, the observer should receive OnKeyboardKey, always.
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_A, true));
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_A, true))
.Times(0);
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_A);
generator.PressKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// TODO(yhanada): The below EXPECT_CALL fails because exo::Keyboard currently
// sends a key release event for the keys which exo::Keyboard sent a pressed
// event for. It might causes a never-ending key repeat in the client.
// EXPECT_CALL(delegate, OnKeyboardKey(testing::_, ui::DomCode::US_A, false));
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_A, false));
generator.ReleaseKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
// Any key event should be sent to a client if a key event skips IME.
surface->window()->SetProperty(aura::client::kSkipImeProcessing, true);
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_C, true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_C, true));
}
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_C);
generator.PressKey(ui::VKEY_C, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
{
testing::InSequence s;
EXPECT_CALL(observer, OnKeyboardKey(testing::_, ui::DomCode::US_C, false));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_C, false));
}
generator.ReleaseKey(ui::VKEY_C, 0);
testing::Mock::VerifyAndClearExpectations(&observer);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
input_method->SetFocusedTextInputClient(nullptr);
}
TEST_F(KeyboardTest, FocusWithArcOverlay) {
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
// Just allow any surface to receive focus.
EXPECT_CALL(*delegate, CanAcceptKeyboardEventsForSurface(::testing::_))
.WillRepeatedly(testing::Return(true));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
// TODO(oshima): Create a TestExoWindowBuilder.
class TestPropertyResolver : public exo::WMHelper::AppPropertyResolver {
public:
TestPropertyResolver() = default;
~TestPropertyResolver() override = default;
void PopulateProperties(
const Params& params,
ui::PropertyHandler& out_properties_container) override {
out_properties_container.SetProperty(
aura::client::kAppType, static_cast<int>(ash::AppType::ARC_APP));
}
};
WMHelper::GetInstance()->RegisterAppPropertyResolver(
std::make_unique<TestPropertyResolver>());
ash::ArcOverlayManager arc_overlay_manager_;
auto* widget1 = ash::TestWidgetBuilder()
.SetBounds(gfx::Rect(200, 200))
.BuildOwnedByNativeWidget();
views::Textfield* textfield1 = new views::Textfield();
widget1->GetContentsView()->AddChildView(textfield1);
textfield1->SetBounds(0, 0, 100, 100);
auto* widget2 = ash::TestWidgetBuilder()
.SetBounds(gfx::Rect(200, 200))
.BuildOwnedByNativeWidget();
auto hold = arc_overlay_manager_.RegisterHostWindow(
"test", widget1->GetNativeWindow());
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->SetClientSurfaceId("billing_id:test");
surface->Attach(buffer.get());
surface->Commit();
EXPECT_TRUE(shell_surface->GetWidget());
// The overlay should have the focus when created.
EXPECT_EQ(keyboard.focused_surface_for_testing(), surface.get());
widget2->Activate();
EXPECT_FALSE(keyboard.focused_surface_for_testing());
// Activating the host widget should set the focus back to the overlay.
widget1->Activate();
EXPECT_EQ(keyboard.focused_surface_for_testing(), surface.get());
constexpr char kFocusedViewClassName[] = "OverlayNativeViewHost";
EXPECT_STREQ(kFocusedViewClassName,
widget1->GetFocusManager()->GetFocusedView()->GetClassName());
// Tabbing should not move the focus away from the overlay.
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
generator.PressKey(ui::VKEY_TAB, 0);
EXPECT_STREQ(kFocusedViewClassName,
widget1->GetFocusManager()->GetFocusedView()->GetClassName());
EXPECT_EQ(keyboard.focused_surface_for_testing(), surface.get());
widget1->CloseNow();
}
TEST_F(KeyboardTest, OnKeyboardModifiers) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
// This should generate a modifier event.
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_A, true));
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kShiftMask | kNumLockMask, 0, 0, 0}));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_A);
generator.PressKey(ui::VKEY_A, ui::EF_SHIFT_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// This should generate another modifier event.
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_B, true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{
kShiftMask | kAltMask | kNumLockMask, 0, 0, 0}));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_B);
generator.PressKey(ui::VKEY_B, ui::EF_SHIFT_DOWN | ui::EF_ALT_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// This should generate a third modifier event.
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_B, false));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
generator.ReleaseKey(ui::VKEY_B, 0);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, OnKeyboardTypeChanged) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
ui::DeviceHotplugEventObserver* device_data_manager =
ui::DeviceDataManager::GetInstance();
ASSERT_TRUE(device_data_manager != nullptr);
// Make sure that DeviceDataManager has one external keyboard...
const std::vector<ui::InputDevice> keyboards{
ui::InputDevice(2, ui::InputDeviceType::INPUT_DEVICE_USB, "keyboard")};
device_data_manager->OnKeyboardDevicesUpdated(keyboards);
// and a touch screen.
const std::vector<ui::TouchscreenDevice> touch_screen{
ui::TouchscreenDevice(3, ui::InputDeviceType::INPUT_DEVICE_INTERNAL,
"touch", gfx::Size(600, 400), 1)};
device_data_manager->OnTouchscreenDevicesUpdated(touch_screen);
ash::TabletModeController* tablet_mode_controller =
ash::Shell::Get()->tablet_mode_controller();
tablet_mode_controller->SetEnabledForTest(true);
Seat seat;
auto keyboard = std::make_unique<Keyboard>(
std::make_unique<NiceMockKeyboardDelegate>(), &seat);
MockKeyboardDeviceConfigurationDelegate configuration_delegate;
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(true));
keyboard->SetDeviceConfigurationDelegate(&configuration_delegate);
EXPECT_TRUE(keyboard->HasDeviceConfigurationDelegate());
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
// Removing all keyboard devices in tablet mode calls
// OnKeyboardTypeChanged() with false.
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(false));
device_data_manager->OnKeyboardDevicesUpdated(
std::vector<ui::InputDevice>({}));
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
// Re-adding keyboards calls OnKeyboardTypeChanged() with true.
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(true));
device_data_manager->OnKeyboardDevicesUpdated(keyboards);
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
keyboard.reset();
tablet_mode_controller->SetEnabledForTest(false);
}
TEST_F(KeyboardTest, OnKeyboardTypeChanged_AccessibilityKeyboard) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
ui::DeviceHotplugEventObserver* device_data_manager =
ui::DeviceDataManager::GetInstance();
ASSERT_TRUE(device_data_manager != nullptr);
// Make sure that DeviceDataManager has one external keyboard.
const std::vector<ui::InputDevice> keyboards{
ui::InputDevice(2, ui::InputDeviceType::INPUT_DEVICE_USB, "keyboard")};
device_data_manager->OnKeyboardDevicesUpdated(keyboards);
Seat seat;
Keyboard keyboard(std::make_unique<NiceMockKeyboardDelegate>(), &seat);
MockKeyboardDeviceConfigurationDelegate configuration_delegate;
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(true));
keyboard.SetDeviceConfigurationDelegate(&configuration_delegate);
EXPECT_TRUE(keyboard.HasDeviceConfigurationDelegate());
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
ash::AccessibilityControllerImpl* accessibility_controller =
ash::Shell::Get()->accessibility_controller();
// Enable a11y keyboard calls OnKeyboardTypeChanged() with false.
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(false));
accessibility_controller->virtual_keyboard().SetEnabled(true);
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
// Disable a11y keyboard calls OnKeyboardTypeChanged() with true.
EXPECT_CALL(configuration_delegate, OnKeyboardTypeChanged(true));
accessibility_controller->virtual_keyboard().SetEnabled(false);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(&configuration_delegate);
}
constexpr base::TimeDelta kDelta50Ms = base::Milliseconds(50);
constexpr base::TimeDelta kDelta500Ms = base::Milliseconds(500);
constexpr base::TimeDelta kDelta1000Ms = base::Milliseconds(1000);
TEST_F(KeyboardTest, KeyRepeatSettingsLoadDefaults) {
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
EXPECT_CALL(*delegate,
OnKeyRepeatSettingsChanged(true, kDelta500Ms, kDelta50Ms));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
}
TEST_F(KeyboardTest, KeyRepeatSettingsLoadInitially) {
std::string email = "user0@tray";
SetUserPref(email, ash::prefs::kXkbAutoRepeatEnabled, base::Value(true));
SetUserPref(email, ash::prefs::kXkbAutoRepeatDelay, base::Value(1000));
SetUserPref(email, ash::prefs::kXkbAutoRepeatInterval, base::Value(1000));
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(true, kDelta1000Ms, kDelta1000Ms));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, KeyRepeatSettingsUpdateAtRuntime) {
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
// Initially load defaults.
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(testing::_, testing::_, testing::_));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Make sure that setting prefs triggers the corresponding delegate calls.
const std::string email = "user0@tray";
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(false, testing::_, testing::_));
SetUserPref(email, ash::prefs::kXkbAutoRepeatEnabled, base::Value(false));
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(false, kDelta1000Ms, testing::_));
SetUserPref(email, ash::prefs::kXkbAutoRepeatDelay, base::Value(1000));
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(false, kDelta1000Ms, kDelta1000Ms));
SetUserPref(email, ash::prefs::kXkbAutoRepeatInterval, base::Value(1000));
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, KeyRepeatSettingsIgnoredForNonActiveUser) {
// Simulate two users, with the first user as active.
CreateUserSessions(2);
// Key repeat settings should be sent exactly once, for the default values.
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(true, kDelta500Ms, kDelta50Ms));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Set prefs for non-active user; no calls should result.
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(testing::_, testing::_, testing::_))
.Times(0);
const std::string email = "user1@tray";
SetUserPref(email, ash::prefs::kXkbAutoRepeatEnabled, base::Value(true));
SetUserPref(email, ash::prefs::kXkbAutoRepeatDelay, base::Value(1000));
SetUserPref(email, ash::prefs::kXkbAutoRepeatInterval, base::Value(1000));
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, KeyRepeatSettingsUpdateOnProfileChange) {
// Simulate two users, with the first user as active.
CreateUserSessions(2);
// Second user has different preferences.
std::string email = "user1@tray";
SetUserPref(email, ash::prefs::kXkbAutoRepeatEnabled, base::Value(true));
SetUserPref(email, ash::prefs::kXkbAutoRepeatDelay, base::Value(1000));
SetUserPref(email, ash::prefs::kXkbAutoRepeatInterval, base::Value(1000));
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
// Initially, load default prefs for first user.
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(true, kDelta500Ms, kDelta50Ms));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Switching user should load new prefs.
EXPECT_CALL(*delegate_ptr,
OnKeyRepeatSettingsChanged(true, kDelta1000Ms, kDelta1000Ms));
SimulateUserLogin(email, user_manager::UserType::USER_TYPE_REGULAR);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, KeyboardLayout) {
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
// Initially, update to the current keyboard layout.
EXPECT_CALL(*delegate_ptr, OnKeyboardLayoutUpdated(testing::_));
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Updating the keyboard layout should trigger the delegate call.
EXPECT_CALL(*delegate_ptr, OnKeyboardLayoutUpdated(testing::_));
keyboard.OnKeyboardLayoutNameChanged("ja-jp");
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, KeyboardObserver) {
// Declare before the keyboard so the mock verification happens
// after the keyboard destruction.
MockKeyboardObserver observer1;
MockKeyboardObserver observer2;
Seat seat;
Keyboard keyboard(std::make_unique<NiceMockKeyboardDelegate>(), &seat);
keyboard.AddObserver(&observer1);
keyboard.AddObserver(&observer2);
EXPECT_TRUE(keyboard.HasObserver(&observer1));
EXPECT_TRUE(keyboard.HasObserver(&observer2));
testing::Mock::VerifyAndClearExpectations(&observer1);
testing::Mock::VerifyAndClearExpectations(&observer2);
keyboard.RemoveObserver(&observer1);
EXPECT_FALSE(keyboard.HasObserver(&observer1));
EXPECT_TRUE(keyboard.HasObserver(&observer2));
testing::Mock::VerifyAndClearExpectations(&observer1);
testing::Mock::VerifyAndClearExpectations(&observer2);
// Called from the destructor of Keyboard.
EXPECT_CALL(observer1, OnKeyboardDestroying(&keyboard)).Times(0);
EXPECT_CALL(observer2, OnKeyboardDestroying(&keyboard));
}
TEST_F(KeyboardTest, NeedKeyboardKeyAcks) {
std::unique_ptr<Surface> surface(new Surface);
std::unique_ptr<ShellSurface> shell_surface(new ShellSurface(surface.get()));
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
Seat seat;
Keyboard keyboard(std::make_unique<NiceMockKeyboardDelegate>(), &seat);
EXPECT_FALSE(keyboard.AreKeyboardKeyAcksNeeded());
keyboard.SetNeedKeyboardKeyAcks(true);
EXPECT_TRUE(keyboard.AreKeyboardKeyAcksNeeded());
keyboard.SetNeedKeyboardKeyAcks(false);
EXPECT_FALSE(keyboard.AreKeyboardKeyAcksNeeded());
}
TEST_F(KeyboardTest, AckKeyboardKey) {
std::unique_ptr<Surface> surface(new Surface);
auto shell_surface = std::make_unique<TestShellSurface>(surface.get());
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// If we don't set NeedKeyboardAckKeys to true, accelerators are always passed
// to ShellSurface.
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
// Press KEY_W with Ctrl.
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kControlMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(*shell_surface.get(), AcceleratorPressed(ui::Accelerator(
ui::VKEY_W, ui::EF_CONTROL_DOWN,
ui::Accelerator::KeyState::PRESSED)))
.WillOnce(testing::Return(true));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_W);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
// Release KEY_W.
generator.ReleaseKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
testing::Mock::VerifyAndClearExpectations(shell_surface.get());
// If we set NeedKeyboardAckKeys to true, only unhandled accelerators are
// passed to ShellSurface.
keyboard.SetNeedKeyboardKeyAcks(true);
// Press KEY_W with Ctrl.
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_W, true))
.WillOnce(testing::Return(1));
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Send ack for the key press.
EXPECT_CALL(*shell_surface.get(), AcceleratorPressed(ui::Accelerator(
ui::VKEY_W, ui::EF_CONTROL_DOWN,
ui::Accelerator::KeyState::PRESSED)))
.WillOnce(testing::Return(true));
keyboard.AckKeyboardKey(1, false /* handled */);
testing::Mock::VerifyAndClearExpectations(shell_surface.get());
// Release KEY_W.
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_W, false))
.WillOnce(testing::Return(2));
generator.ReleaseKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Send ack for the key release.
keyboard.AckKeyboardKey(2, false /* handled */);
// Press KEY_W with Ctrl again.
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_W, true))
.WillOnce(testing::Return(3));
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Send ack for the key press.
// AcceleratorPressed is not called when the accelerator is already handled.
keyboard.AckKeyboardKey(3, true /* handled */);
// A repeat key event should not be sent to the client and also should not
// invoke the accelerator.
EXPECT_CALL(*shell_surface.get(), AcceleratorPressed(ui::Accelerator(
ui::VKEY_W, ui::EF_CONTROL_DOWN,
ui::Accelerator::KeyState::PRESSED)))
.Times(0);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN | ui::EF_IS_REPEAT);
testing::Mock::VerifyAndClearExpectations(shell_surface.get());
// Another key press event while holding the key is also ignored and should
// not invoke the accelerator.
EXPECT_CALL(*shell_surface.get(), AcceleratorPressed(ui::Accelerator(
ui::VKEY_W, ui::EF_CONTROL_DOWN,
ui::Accelerator::KeyState::PRESSED)))
.Times(0);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(shell_surface.get());
// Release the key and reset modifier_flags.
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_W, false));
generator.ReleaseKey(ui::VKEY_W, 0);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
TEST_F(KeyboardTest, AckKeyboardKeyMoveFocus) {
std::unique_ptr<Surface> surface(new Surface);
auto shell_surface = std::make_unique<TestShellSurface>(surface.get());
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
keyboard.SetNeedKeyboardKeyAcks(true);
// Press KEY_W with Ctrl.
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kControlMask | kNumLockMask, 0, 0, 0}))
.Times(1);
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_W, true))
.WillOnce(testing::Return(1));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_W);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Move focus from the window
EXPECT_CALL(*delegate_ptr, OnKeyboardLeave(surface.get()));
focus_client->FocusWindow(nullptr);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Send ack for the key press. |AcceleratorPressed()| should not be called.
keyboard.AckKeyboardKey(1, false /* handled */);
}
TEST_F(KeyboardTest, AckKeyboardKeyExpired) {
std::unique_ptr<Surface> surface(new Surface);
auto shell_surface = std::make_unique<TestShellSurface>(surface.get());
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
keyboard.SetNeedKeyboardKeyAcks(true);
// Press KEY_W with Ctrl.
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kControlMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_W, true))
.WillOnce(testing::Return(1));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_W);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Keyboard processes pending events as if it is handled when it expires,
// so |AcceleratorPressed()| should not be called.
EXPECT_CALL(*shell_surface.get(), AcceleratorPressed(ui::Accelerator(
ui::VKEY_W, ui::EF_CONTROL_DOWN,
ui::Accelerator::KeyState::PRESSED)))
.Times(0);
// Wait until |ProcessExpiredPendingKeyAcks| is fired.
base::RunLoop run_loop;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(), base::Milliseconds(1000));
run_loop.Run();
base::RunLoop().RunUntilIdle();
// Send ack for the key press as if it was not handled. In the normal case,
// |AcceleratorPressed()| should be called, but since the timeout passed, the
// key should have been treated as handled already and removed from the
// pending_key_acks_ map. Since the event is no longer in the map,
// |AcceleratorPressed()| should not be called.
keyboard.AckKeyboardKey(1, false /* handled */);
// Release the key and reset modifier_flags.
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(*delegate_ptr,
OnKeyboardKey(testing::_, ui::DomCode::US_W, false));
generator.ReleaseKey(ui::VKEY_W, 0);
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
}
// Test for crbug.com/753539. If action for an accelerator moves the focus to
// another window, it causes clearing the map of pending key acks in Keyboard.
// We can't assume that an iterator of the map is valid after processing an
// accelerator.
class TestShellSurfaceWithMovingFocusAccelerator : public ShellSurface {
public:
explicit TestShellSurfaceWithMovingFocusAccelerator(Surface* surface)
: ShellSurface(surface) {}
bool AcceleratorPressed(const ui::Accelerator& accelerator) override {
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
return true;
}
};
TEST_F(KeyboardTest, AckKeyboardKeyExpiredWithMovingFocusAccelerator) {
std::unique_ptr<Surface> surface(new Surface);
auto shell_surface =
std::make_unique<TestShellSurfaceWithMovingFocusAccelerator>(
surface.get());
gfx::Size buffer_size(10, 10);
std::unique_ptr<Buffer> buffer(
new Buffer(exo_test_helper()->CreateGpuMemoryBuffer(buffer_size)));
surface->Attach(buffer.get());
surface->Commit();
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface.get()))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface.get(), base::flat_map<ui::DomCode, KeyState>()));
focus_client->FocusWindow(surface->window());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
keyboard.SetNeedKeyboardKeyAcks(true);
// Press KEY_W with Ctrl.
EXPECT_CALL(*delegate_ptr, OnKeyboardModifiers(KeyboardModifiers{
kControlMask | kNumLockMask, 0, 0, 0}));
EXPECT_CALL(*delegate_ptr, OnKeyboardKey(testing::_, ui::DomCode::US_W, true))
.WillOnce(testing::Return(1));
seat.set_physical_code_for_currently_processing_event_for_testing(
ui::DomCode::US_W);
generator.PressKey(ui::VKEY_W, ui::EF_CONTROL_DOWN);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
EXPECT_CALL(*delegate_ptr, OnKeyboardLeave(surface.get()));
// Send ack as unhandled. This will call |AcceleratorPressed| and move the
// focus.
keyboard.AckKeyboardKey(1, false /* handled */);
// Wait until |ProcessExpiredPendingKeyAcks| is fired.
base::RunLoop run_loop;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(), base::Milliseconds(1000));
run_loop.Run();
base::RunLoop().RunUntilIdle();
// Verify before destroying keyboard to make sure the expected call
// is made on the methods above, rather than in the destructor.
testing::Mock::VerifyAndClearExpectations(&delegate);
}
TEST_F(KeyboardTest, OnKeyboardKey_ChangeFocusInPreTargetHandler) {
auto shell_surface = test::ShellSurfaceBuilder({10, 10}).BuildShellSurface();
auto* surface = shell_surface->surface_for_testing();
auto normal_window = CreateAppWindow(gfx::Rect(0, 0, 100, 100));
TestEventHandler handler{shell_surface->GetWidget()->GetNativeView()};
aura::client::FocusClient* focus_client =
aura::client::GetFocusClient(ash::Shell::GetPrimaryRootWindow());
focus_client->FocusWindow(nullptr);
auto delegate = std::make_unique<NiceMockKeyboardDelegate>();
auto* delegate_ptr = delegate.get();
NiceMockKeyboardObserver observer;
Seat seat;
Keyboard keyboard(std::move(delegate), &seat);
keyboard.AddObserver(&observer);
// Focus the non-exo window.
focus_client->FocusWindow(normal_window.get());
ui::test::EventGenerator generator(ash::Shell::GetPrimaryRootWindow());
// Keyboard should not get a key event sent to the non-exo window.
generator.PressKey(ui::VKEY_A, 0);
generator.ReleaseKey(ui::VKEY_A, 0);
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
// Sending a key event causes a focus change.
// It calls OnKeyboardEnter, but OnKeyboardKey should not be called because
// the event's target is |normal_window|.
wm_helper()->AddPreTargetHandler(&handler);
EXPECT_CALL(*delegate_ptr, CanAcceptKeyboardEventsForSurface(surface))
.WillOnce(testing::Return(true));
EXPECT_CALL(*delegate_ptr,
OnKeyboardModifiers(KeyboardModifiers{kNumLockMask, 0, 0, 0}));
EXPECT_CALL(
*delegate_ptr,
OnKeyboardEnter(surface, base::flat_map<ui::DomCode, KeyState>()));
generator.PressKey(ui::VKEY_A, 0);
EXPECT_EQ(shell_surface->GetWidget()->GetNativeView(),
focus_client->GetFocusedWindow());
testing::Mock::VerifyAndClearExpectations(delegate_ptr);
wm_helper()->RemovePreTargetHandler(&handler);
}
} // namespace
} // namespace exo
|
//===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements hazard recognizers for scheduling on GCN processors.
//
//===----------------------------------------------------------------------===//
#include "GCNHazardRecognizer.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/TargetParser.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
// Hazard Recoginizer Implementation
//===----------------------------------------------------------------------===//
static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
const GCNSubtarget &ST);
GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
IsHazardRecognizerMode(false),
CurrCycleInstr(nullptr),
MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*ST.getInstrInfo()),
TRI(TII.getRegisterInfo()),
ClauseUses(TRI.getNumRegUnits()),
ClauseDefs(TRI.getNumRegUnits()) {
MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5;
TSchedModel.init(&ST);
RunLdsBranchVmemWARHazardFixup = shouldRunLdsBranchVmemWARHazardFixup(MF, ST);
}
void GCNHazardRecognizer::Reset() {
EmittedInstrs.clear();
}
void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
EmitInstruction(SU->getInstr());
}
void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
CurrCycleInstr = MI;
}
static bool isDivFMas(unsigned Opcode) {
return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64;
}
static bool isSGetReg(unsigned Opcode) {
return Opcode == AMDGPU::S_GETREG_B32;
}
static bool isSSetReg(unsigned Opcode) {
switch (Opcode) {
case AMDGPU::S_SETREG_B32:
case AMDGPU::S_SETREG_B32_mode:
case AMDGPU::S_SETREG_IMM32_B32:
case AMDGPU::S_SETREG_IMM32_B32_mode:
return true;
}
return false;
}
static bool isRWLane(unsigned Opcode) {
return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
}
static bool isRFE(unsigned Opcode) {
return Opcode == AMDGPU::S_RFE_B64;
}
static bool isSMovRel(unsigned Opcode) {
switch (Opcode) {
case AMDGPU::S_MOVRELS_B32:
case AMDGPU::S_MOVRELS_B64:
case AMDGPU::S_MOVRELD_B32:
case AMDGPU::S_MOVRELD_B64:
return true;
default:
return false;
}
}
static bool isDGEMM(unsigned Opcode) {
return Opcode == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
Opcode == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64 ||
Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_e64 ||
Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64;
}
static bool isXDL(const GCNSubtarget &ST, const MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
if (!SIInstrInfo::isMAI(MI) ||
isDGEMM(Opcode) ||
Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
return false;
return true;
}
static bool isSendMsgTraceDataOrGDS(const SIInstrInfo &TII,
const MachineInstr &MI) {
if (TII.isAlwaysGDS(MI.getOpcode()))
return true;
switch (MI.getOpcode()) {
case AMDGPU::S_SENDMSG:
case AMDGPU::S_SENDMSGHALT:
case AMDGPU::S_TTRACEDATA:
return true;
// These DS opcodes don't support GDS.
case AMDGPU::DS_NOP:
case AMDGPU::DS_PERMUTE_B32:
case AMDGPU::DS_BPERMUTE_B32:
return false;
default:
if (TII.isDS(MI.getOpcode())) {
int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::gds);
if (MI.getOperand(GDS).getImm())
return true;
}
return false;
}
}
static bool isPermlane(const MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
return Opcode == AMDGPU::V_PERMLANE16_B32_e64 ||
Opcode == AMDGPU::V_PERMLANEX16_B32_e64;
}
static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
AMDGPU::OpName::simm16);
return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
}
ScheduleHazardRecognizer::HazardType
GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
MachineInstr *MI = SU->getInstr();
// If we are not in "HazardRecognizerMode" and therefore not being run from
// the scheduler, track possible stalls from hazards but don't insert noops.
auto HazardType = IsHazardRecognizerMode ? NoopHazard : Hazard;
if (MI->isBundle())
return NoHazard;
if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
return HazardType;
if (ST.hasNSAtoVMEMBug() && checkNSAtoVMEMHazard(MI) > 0)
return HazardType;
if (checkFPAtomicToDenormModeHazard(MI) > 0)
return HazardType;
if (ST.hasNoDataDepHazard())
return NoHazard;
// FIXME: Should flat be considered vmem?
if ((SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI))
&& checkVMEMHazards(MI) > 0)
return HazardType;
if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
return HazardType;
if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
return HazardType;
if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
return HazardType;
if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
return HazardType;
if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
return HazardType;
if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
return HazardType;
if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
return HazardType;
if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
return HazardType;
if (ST.hasReadM0MovRelInterpHazard() &&
(TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode())) &&
checkReadM0Hazards(MI) > 0)
return HazardType;
if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI) &&
checkReadM0Hazards(MI) > 0)
return HazardType;
if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0)
return HazardType;
if ((SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI) ||
SIInstrInfo::isDS(*MI)) && checkMAILdStHazards(MI) > 0)
return HazardType;
if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
return HazardType;
return NoHazard;
}
static void insertNoopsInBundle(MachineInstr *MI, const SIInstrInfo &TII,
unsigned Quantity) {
while (Quantity > 0) {
unsigned Arg = std::min(Quantity, 8u);
Quantity -= Arg;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP))
.addImm(Arg - 1);
}
}
void GCNHazardRecognizer::processBundle() {
MachineBasicBlock::instr_iterator MI = std::next(CurrCycleInstr->getIterator());
MachineBasicBlock::instr_iterator E = CurrCycleInstr->getParent()->instr_end();
// Check bundled MachineInstr's for hazards.
for (; MI != E && MI->isInsideBundle(); ++MI) {
CurrCycleInstr = &*MI;
unsigned WaitStates = PreEmitNoopsCommon(CurrCycleInstr);
if (IsHazardRecognizerMode) {
fixHazards(CurrCycleInstr);
insertNoopsInBundle(CurrCycleInstr, TII, WaitStates);
}
// It’s unnecessary to track more than MaxLookAhead instructions. Since we
// include the bundled MI directly after, only add a maximum of
// (MaxLookAhead - 1) noops to EmittedInstrs.
for (unsigned i = 0, e = std::min(WaitStates, MaxLookAhead - 1); i < e; ++i)
EmittedInstrs.push_front(nullptr);
EmittedInstrs.push_front(CurrCycleInstr);
EmittedInstrs.resize(MaxLookAhead);
}
CurrCycleInstr = nullptr;
}
unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
IsHazardRecognizerMode = true;
CurrCycleInstr = MI;
unsigned W = PreEmitNoopsCommon(MI);
fixHazards(MI);
CurrCycleInstr = nullptr;
return W;
}
unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
if (MI->isBundle())
return 0;
int WaitStates = 0;
if (SIInstrInfo::isSMRD(*MI))
return std::max(WaitStates, checkSMRDHazards(MI));
if (ST.hasNSAtoVMEMBug())
WaitStates = std::max(WaitStates, checkNSAtoVMEMHazard(MI));
WaitStates = std::max(WaitStates, checkFPAtomicToDenormModeHazard(MI));
if (ST.hasNoDataDepHazard())
return WaitStates;
if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI))
WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
if (SIInstrInfo::isVALU(*MI))
WaitStates = std::max(WaitStates, checkVALUHazards(MI));
if (SIInstrInfo::isDPP(*MI))
WaitStates = std::max(WaitStates, checkDPPHazards(MI));
if (isDivFMas(MI->getOpcode()))
WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
if (isRWLane(MI->getOpcode()))
WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
WaitStates = std::max(WaitStates, checkMAIVALUHazards(MI));
if (MI->isInlineAsm())
return std::max(WaitStates, checkInlineAsmHazards(MI));
if (isSGetReg(MI->getOpcode()))
return std::max(WaitStates, checkGetRegHazards(MI));
if (isSSetReg(MI->getOpcode()))
return std::max(WaitStates, checkSetRegHazards(MI));
if (isRFE(MI->getOpcode()))
return std::max(WaitStates, checkRFEHazards(MI));
if (ST.hasReadM0MovRelInterpHazard() && (TII.isVINTRP(*MI) ||
isSMovRel(MI->getOpcode())))
return std::max(WaitStates, checkReadM0Hazards(MI));
if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI))
return std::max(WaitStates, checkReadM0Hazards(MI));
if (SIInstrInfo::isMAI(*MI))
return std::max(WaitStates, checkMAIHazards(MI));
if (SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI) ||
SIInstrInfo::isDS(*MI))
return std::max(WaitStates, checkMAILdStHazards(MI));
return WaitStates;
}
void GCNHazardRecognizer::EmitNoop() {
EmittedInstrs.push_front(nullptr);
}
void GCNHazardRecognizer::AdvanceCycle() {
// When the scheduler detects a stall, it will call AdvanceCycle() without
// emitting any instructions.
if (!CurrCycleInstr) {
EmittedInstrs.push_front(nullptr);
return;
}
if (CurrCycleInstr->isBundle()) {
processBundle();
return;
}
unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
if (!NumWaitStates) {
CurrCycleInstr = nullptr;
return;
}
// Keep track of emitted instructions
EmittedInstrs.push_front(CurrCycleInstr);
// Add a nullptr for each additional wait state after the first. Make sure
// not to add more than getMaxLookAhead() items to the list, since we
// truncate the list to that size right after this loop.
for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
i < e; ++i) {
EmittedInstrs.push_front(nullptr);
}
// getMaxLookahead() is the largest number of wait states we will ever need
// to insert, so there is no point in keeping track of more than that many
// wait states.
EmittedInstrs.resize(getMaxLookAhead());
CurrCycleInstr = nullptr;
}
void GCNHazardRecognizer::RecedeCycle() {
llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
}
//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//
typedef function_ref<bool(const MachineInstr &, int WaitStates)> IsExpiredFn;
// Returns a minimum wait states since \p I walking all predecessors.
// Only scans until \p IsExpired does not return true.
// Can only be run in a hazard recognizer mode.
static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
const MachineBasicBlock *MBB,
MachineBasicBlock::const_reverse_instr_iterator I,
int WaitStates, IsExpiredFn IsExpired,
DenseSet<const MachineBasicBlock *> &Visited) {
for (auto E = MBB->instr_rend(); I != E; ++I) {
// Don't add WaitStates for parent BUNDLE instructions.
if (I->isBundle())
continue;
if (IsHazard(*I))
return WaitStates;
if (I->isInlineAsm())
continue;
WaitStates += SIInstrInfo::getNumWaitStates(*I);
if (IsExpired(*I, WaitStates))
return std::numeric_limits<int>::max();
}
int MinWaitStates = std::numeric_limits<int>::max();
for (MachineBasicBlock *Pred : MBB->predecessors()) {
if (!Visited.insert(Pred).second)
continue;
int W = getWaitStatesSince(IsHazard, Pred, Pred->instr_rbegin(),
WaitStates, IsExpired, Visited);
MinWaitStates = std::min(MinWaitStates, W);
}
return MinWaitStates;
}
static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
const MachineInstr *MI, IsExpiredFn IsExpired) {
DenseSet<const MachineBasicBlock *> Visited;
return getWaitStatesSince(IsHazard, MI->getParent(),
std::next(MI->getReverseIterator()),
0, IsExpired, Visited);
}
int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) {
if (IsHazardRecognizerMode) {
auto IsExpiredFn = [Limit](const MachineInstr &, int WaitStates) {
return WaitStates >= Limit;
};
return ::getWaitStatesSince(IsHazard, CurrCycleInstr, IsExpiredFn);
}
int WaitStates = 0;
for (MachineInstr *MI : EmittedInstrs) {
if (MI) {
if (IsHazard(*MI))
return WaitStates;
if (MI->isInlineAsm())
continue;
}
++WaitStates;
if (WaitStates >= Limit)
break;
}
return std::numeric_limits<int>::max();
}
int GCNHazardRecognizer::getWaitStatesSinceDef(unsigned Reg,
IsHazardFn IsHazardDef,
int Limit) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
auto IsHazardFn = [IsHazardDef, TRI, Reg](const MachineInstr &MI) {
return IsHazardDef(MI) && MI.modifiesRegister(Reg, TRI);
};
return getWaitStatesSince(IsHazardFn, Limit);
}
int GCNHazardRecognizer::getWaitStatesSinceSetReg(IsHazardFn IsHazard,
int Limit) {
auto IsHazardFn = [IsHazard](const MachineInstr &MI) {
return isSSetReg(MI.getOpcode()) && IsHazard(MI);
};
return getWaitStatesSince(IsHazardFn, Limit);
}
//===----------------------------------------------------------------------===//
// No-op Hazard Detection
//===----------------------------------------------------------------------===//
static void addRegUnits(const SIRegisterInfo &TRI, BitVector &BV,
MCRegister Reg) {
for (MCRegUnitIterator RUI(Reg, &TRI); RUI.isValid(); ++RUI)
BV.set(*RUI);
}
static void addRegsToSet(const SIRegisterInfo &TRI,
iterator_range<MachineInstr::const_mop_iterator> Ops,
BitVector &Set) {
for (const MachineOperand &Op : Ops) {
if (Op.isReg())
addRegUnits(TRI, Set, Op.getReg().asMCReg());
}
}
void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) {
// XXX: Do we need to worry about implicit operands
addRegsToSet(TRI, MI.defs(), ClauseDefs);
addRegsToSet(TRI, MI.uses(), ClauseUses);
}
static bool breaksSMEMSoftClause(MachineInstr *MI) {
return !SIInstrInfo::isSMRD(*MI);
}
static bool breaksVMEMSoftClause(MachineInstr *MI) {
return !SIInstrInfo::isVMEM(*MI) && !SIInstrInfo::isFLAT(*MI);
}
int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) {
// SMEM soft clause are only present on VI+, and only matter if xnack is
// enabled.
if (!ST.isXNACKEnabled())
return 0;
bool IsSMRD = TII.isSMRD(*MEM);
resetClause();
// A soft-clause is any group of consecutive SMEM instructions. The
// instructions in this group may return out of order and/or may be
// replayed (i.e. the same instruction issued more than once).
//
// In order to handle these situations correctly we need to make sure that
// when a clause has more than one instruction, no instruction in the clause
// writes to a register that is read by another instruction in the clause
// (including itself). If we encounter this situaion, we need to break the
// clause by inserting a non SMEM instruction.
for (MachineInstr *MI : EmittedInstrs) {
// When we hit a non-SMEM instruction then we have passed the start of the
// clause and we can stop.
if (!MI)
break;
if (IsSMRD ? breaksSMEMSoftClause(MI) : breaksVMEMSoftClause(MI))
break;
addClauseInst(*MI);
}
if (ClauseDefs.none())
return 0;
// We need to make sure not to put loads and stores in the same clause if they
// use the same address. For now, just start a new clause whenever we see a
// store.
if (MEM->mayStore())
return 1;
addClauseInst(*MEM);
// If the set of defs and uses intersect then we cannot add this instruction
// to the clause, so we have a hazard.
return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0;
}
int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
int WaitStatesNeeded = 0;
WaitStatesNeeded = checkSoftClauseHazards(SMRD);
// This SMRD hazard only affects SI.
if (!ST.hasSMRDReadVALUDefHazard())
return WaitStatesNeeded;
// A read of an SGPR by SMRD instruction requires 4 wait states when the
// SGPR was written by a VALU instruction.
int SmrdSgprWaitStates = 4;
auto IsHazardDefFn = [this](const MachineInstr &MI) {
return TII.isVALU(MI);
};
auto IsBufferHazardDefFn = [this](const MachineInstr &MI) {
return TII.isSALU(MI);
};
bool IsBufferSMRD = TII.isBufferSMRD(*SMRD);
for (const MachineOperand &Use : SMRD->uses()) {
if (!Use.isReg())
continue;
int WaitStatesNeededForUse =
SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
SmrdSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
// This fixes what appears to be undocumented hardware behavior in SI where
// s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
// needs some number of nops in between. We don't know how many we need, but
// let's use 4. This wasn't discovered before probably because the only
// case when this happens is when we expand a 64-bit pointer into a full
// descriptor and use s_buffer_load_dword instead of s_load_dword, which was
// probably never encountered in the closed-source land.
if (IsBufferSMRD) {
int WaitStatesNeededForUse =
SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
IsBufferHazardDefFn,
SmrdSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
if (!ST.hasVMEMReadSGPRVALUDefHazard())
return 0;
int WaitStatesNeeded = checkSoftClauseHazards(VMEM);
// A read of an SGPR by a VMEM instruction requires 5 wait states when the
// SGPR was written by a VALU Instruction.
const int VmemSgprWaitStates = 5;
auto IsHazardDefFn = [this](const MachineInstr &MI) {
return TII.isVALU(MI);
};
for (const MachineOperand &Use : VMEM->uses()) {
if (!Use.isReg() || TRI.isVectorRegister(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse =
VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
VmemSgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
// Check for DPP VGPR read after VALU VGPR write and EXEC write.
int DppVgprWaitStates = 2;
int DppExecWaitStates = 5;
int WaitStatesNeeded = 0;
auto IsHazardDefFn = [TII](const MachineInstr &MI) {
return TII->isVALU(MI);
};
for (const MachineOperand &Use : DPP->uses()) {
if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse =
DppVgprWaitStates - getWaitStatesSinceDef(
Use.getReg(),
[](const MachineInstr &) { return true; },
DppVgprWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
WaitStatesNeeded = std::max(
WaitStatesNeeded,
DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn,
DppExecWaitStates));
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
const SIInstrInfo *TII = ST.getInstrInfo();
// v_div_fmas requires 4 wait states after a write to vcc from a VALU
// instruction.
const int DivFMasWaitStates = 4;
auto IsHazardDefFn = [TII](const MachineInstr &MI) {
return TII->isVALU(MI);
};
int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn,
DivFMasWaitStates);
return DivFMasWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
const int GetRegWaitStates = 2;
auto IsHazardFn = [TII, GetRegHWReg](const MachineInstr &MI) {
return GetRegHWReg == getHWReg(TII, MI);
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, GetRegWaitStates);
return GetRegWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned HWReg = getHWReg(TII, *SetRegInstr);
const int SetRegWaitStates = ST.getSetRegWaitStates();
auto IsHazardFn = [TII, HWReg](const MachineInstr &MI) {
return HWReg == getHWReg(TII, MI);
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, SetRegWaitStates);
return SetRegWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
if (!MI.mayStore())
return -1;
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned Opcode = MI.getOpcode();
const MCInstrDesc &Desc = MI.getDesc();
int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
int VDataRCID = -1;
if (VDataIdx != -1)
VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
// There is no hazard if the instruction does not use vector regs
// (like wbinvl1)
if (VDataIdx == -1)
return -1;
// For MUBUF/MTBUF instructions this hazard only exists if the
// instruction is not using a register in the soffset field.
const MachineOperand *SOffset =
TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
// If we have no soffset operand, then assume this field has been
// hardcoded to zero.
if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
(!SOffset || !SOffset->isReg()))
return VDataIdx;
}
// MIMG instructions create a hazard if they don't use a 256-bit T# and
// the store size is greater than 8 bytes and they have more than two bits
// of their dmask set.
// All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
if (TII->isMIMG(MI)) {
int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
assert(SRsrcIdx != -1 &&
AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
(void)SRsrcIdx;
}
if (TII->isFLAT(MI)) {
int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
return DataIdx;
}
return -1;
}
int
GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
const MachineRegisterInfo &MRI) {
// Helper to check for the hazard where VMEM instructions that store more than
// 8 bytes can have there store data over written by the next instruction.
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const int VALUWaitStates = 1;
int WaitStatesNeeded = 0;
if (!TRI->isVectorRegister(MRI, Def.getReg()))
return WaitStatesNeeded;
Register Reg = Def.getReg();
auto IsHazardFn = [this, Reg, TRI](const MachineInstr &MI) {
int DataIdx = createsVALUHazard(MI);
return DataIdx >= 0 &&
TRI->regsOverlap(MI.getOperand(DataIdx).getReg(), Reg);
};
int WaitStatesNeededForDef =
VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
// This checks for the hazard where VMEM instructions that store more than
// 8 bytes can have there store data over written by the next instruction.
if (!ST.has12DWordStoreHazard())
return 0;
const MachineRegisterInfo &MRI = MF.getRegInfo();
int WaitStatesNeeded = 0;
for (const MachineOperand &Def : VALU->defs()) {
WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI));
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
// This checks for hazards associated with inline asm statements.
// Since inline asms can contain just about anything, we use this
// to call/leverage other check*Hazard routines. Note that
// this function doesn't attempt to address all possible inline asm
// hazards (good luck), but is a collection of what has been
// problematic thus far.
// see checkVALUHazards()
if (!ST.has12DWordStoreHazard())
return 0;
const MachineRegisterInfo &MRI = MF.getRegInfo();
int WaitStatesNeeded = 0;
for (unsigned I = InlineAsm::MIOp_FirstOperand, E = IA->getNumOperands();
I != E; ++I) {
const MachineOperand &Op = IA->getOperand(I);
if (Op.isReg() && Op.isDef()) {
WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
}
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const MachineOperand *LaneSelectOp =
TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
return 0;
Register LaneSelectReg = LaneSelectOp->getReg();
auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isVALU(MI); };
const int RWLaneWaitStates = 4;
int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn,
RWLaneWaitStates);
return RWLaneWaitStates - WaitStatesSince;
}
int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
if (!ST.hasRFEHazards())
return 0;
const SIInstrInfo *TII = ST.getInstrInfo();
const int RFEWaitStates = 1;
auto IsHazardFn = [TII](const MachineInstr &MI) {
return getHWReg(TII, MI) == AMDGPU::Hwreg::ID_TRAPSTS;
};
int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, RFEWaitStates);
return RFEWaitStates - WaitStatesNeeded;
}
int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
const SIInstrInfo *TII = ST.getInstrInfo();
const int SMovRelWaitStates = 1;
auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isSALU(MI); };
return SMovRelWaitStates - getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn,
SMovRelWaitStates);
}
void GCNHazardRecognizer::fixHazards(MachineInstr *MI) {
fixVMEMtoScalarWriteHazards(MI);
fixVcmpxPermlaneHazards(MI);
fixSMEMtoVectorWriteHazards(MI);
fixVcmpxExecWARHazard(MI);
fixLdsBranchVmemWARHazard(MI);
}
bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) {
if (!ST.hasVcmpxPermlaneHazard() || !isPermlane(*MI))
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isVOPC(MI); };
auto IsExpiredFn = [](const MachineInstr &MI, int) {
unsigned Opc = MI.getOpcode();
return SIInstrInfo::isVALU(MI) && Opc != AMDGPU::V_NOP_e32 &&
Opc != AMDGPU::V_NOP_e64 && Opc != AMDGPU::V_NOP_sdwa;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
// V_NOP will be discarded by SQ.
// Use V_MOB_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE*
// which is always a VGPR and available.
auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
Register Reg = Src0->getReg();
bool IsUndef = Src0->isUndef();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32))
.addReg(Reg, RegState::Define | (IsUndef ? RegState::Dead : 0))
.addReg(Reg, IsUndef ? RegState::Undef : RegState::Kill);
return true;
}
bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) {
if (!ST.hasVMEMtoScalarWriteHazard())
return false;
if (!SIInstrInfo::isSALU(*MI) && !SIInstrInfo::isSMRD(*MI))
return false;
if (MI->getNumDefs() == 0)
return false;
const SIRegisterInfo *TRI = ST.getRegisterInfo();
auto IsHazardFn = [TRI, MI](const MachineInstr &I) {
if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isDS(I) &&
!SIInstrInfo::isFLAT(I))
return false;
for (const MachineOperand &Def : MI->defs()) {
const MachineOperand *Op =
I.findRegisterUseOperand(Def.getReg(), false, TRI);
if (!Op)
continue;
return true;
}
return false;
};
auto IsExpiredFn = [](const MachineInstr &MI, int) {
return SIInstrInfo::isVALU(MI) ||
(MI.getOpcode() == AMDGPU::S_WAITCNT &&
!MI.getOperand(0).getImm()) ||
(MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
MI.getOperand(0).getImm() == 0xffe3);
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_WAITCNT_DEPCTR))
.addImm(0xffe3);
return true;
}
bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
if (!ST.hasSMEMtoVectorWriteHazard())
return false;
if (!SIInstrInfo::isVALU(*MI))
return false;
unsigned SDSTName;
switch (MI->getOpcode()) {
case AMDGPU::V_READLANE_B32:
case AMDGPU::V_READFIRSTLANE_B32:
SDSTName = AMDGPU::OpName::vdst;
break;
default:
SDSTName = AMDGPU::OpName::sdst;
break;
}
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
const MachineOperand *SDST = TII->getNamedOperand(*MI, SDSTName);
if (!SDST) {
for (const auto &MO : MI->implicit_operands()) {
if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg()))) {
SDST = &MO;
break;
}
}
}
if (!SDST)
return false;
const Register SDSTReg = SDST->getReg();
auto IsHazardFn = [SDSTReg, TRI](const MachineInstr &I) {
return SIInstrInfo::isSMRD(I) && I.readsRegister(SDSTReg, TRI);
};
auto IsExpiredFn = [TII, IV](const MachineInstr &MI, int) {
if (TII->isSALU(MI)) {
switch (MI.getOpcode()) {
case AMDGPU::S_SETVSKIP:
case AMDGPU::S_VERSION:
case AMDGPU::S_WAITCNT_VSCNT:
case AMDGPU::S_WAITCNT_VMCNT:
case AMDGPU::S_WAITCNT_EXPCNT:
// These instructions cannot not mitigate the hazard.
return false;
case AMDGPU::S_WAITCNT_LGKMCNT:
// Reducing lgkmcnt count to 0 always mitigates the hazard.
return (MI.getOperand(1).getImm() == 0) &&
(MI.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
case AMDGPU::S_WAITCNT: {
const int64_t Imm = MI.getOperand(0).getImm();
AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm);
return (Decoded.LgkmCnt == 0);
}
default:
// SOPP instructions cannot mitigate the hazard.
if (TII->isSOPP(MI))
return false;
// At this point the SALU can be assumed to mitigate the hazard
// because either:
// (a) it is independent of the at risk SMEM (breaking chain),
// or
// (b) it is dependent on the SMEM, in which case an appropriate
// s_waitcnt lgkmcnt _must_ exist between it and the at risk
// SMEM instruction.
return true;
}
}
return false;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL)
.addImm(0);
return true;
}
bool GCNHazardRecognizer::fixVcmpxExecWARHazard(MachineInstr *MI) {
if (!ST.hasVcmpxExecWARHazard() || !SIInstrInfo::isVALU(*MI))
return false;
const SIRegisterInfo *TRI = ST.getRegisterInfo();
if (!MI->modifiesRegister(AMDGPU::EXEC, TRI))
return false;
auto IsHazardFn = [TRI](const MachineInstr &I) {
if (SIInstrInfo::isVALU(I))
return false;
return I.readsRegister(AMDGPU::EXEC, TRI);
};
const SIInstrInfo *TII = ST.getInstrInfo();
auto IsExpiredFn = [TII, TRI](const MachineInstr &MI, int) {
if (SIInstrInfo::isVALU(MI)) {
if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst))
return true;
for (auto MO : MI.implicit_operands())
if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegClass(MO.getReg())))
return true;
}
if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
(MI.getOperand(0).getImm() & 0xfffe) == 0xfffe)
return true;
return false;
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_WAITCNT_DEPCTR))
.addImm(0xfffe);
return true;
}
static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
const GCNSubtarget &ST) {
if (!ST.hasLdsBranchVmemWARHazard())
return false;
// Check if the necessary condition for the hazard is met: both LDS and VMEM
// instructions need to appear in the same function.
bool HasLds = false;
bool HasVmem = false;
for (auto &MBB : MF) {
for (auto &MI : MBB) {
HasLds |= SIInstrInfo::isDS(MI);
HasVmem |=
SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI);
if (HasLds && HasVmem)
return true;
}
}
return false;
}
bool GCNHazardRecognizer::fixLdsBranchVmemWARHazard(MachineInstr *MI) {
if (!RunLdsBranchVmemWARHazardFixup)
return false;
assert(ST.hasLdsBranchVmemWARHazard());
auto IsHazardInst = [](const MachineInstr &MI) {
if (SIInstrInfo::isDS(MI))
return 1;
if (SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI))
return 2;
return 0;
};
auto InstType = IsHazardInst(*MI);
if (!InstType)
return false;
auto IsExpiredFn = [&IsHazardInst](const MachineInstr &I, int) {
return IsHazardInst(I) || (I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
I.getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
!I.getOperand(1).getImm());
};
auto IsHazardFn = [InstType, &IsHazardInst](const MachineInstr &I) {
if (!I.isBranch())
return false;
auto IsHazardFn = [InstType, IsHazardInst](const MachineInstr &I) {
auto InstType2 = IsHazardInst(I);
return InstType2 && InstType != InstType2;
};
auto IsExpiredFn = [InstType, &IsHazardInst](const MachineInstr &I, int) {
auto InstType2 = IsHazardInst(I);
if (InstType == InstType2)
return true;
return I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
I.getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
!I.getOperand(1).getImm();
};
return ::getWaitStatesSince(IsHazardFn, &I, IsExpiredFn) !=
std::numeric_limits<int>::max();
};
if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
std::numeric_limits<int>::max())
return false;
const SIInstrInfo *TII = ST.getInstrInfo();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::S_WAITCNT_VSCNT))
.addReg(AMDGPU::SGPR_NULL, RegState::Undef)
.addImm(0);
return true;
}
int GCNHazardRecognizer::checkNSAtoVMEMHazard(MachineInstr *MI) {
int NSAtoVMEMWaitStates = 1;
if (!ST.hasNSAtoVMEMBug())
return 0;
if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isMTBUF(*MI))
return 0;
const SIInstrInfo *TII = ST.getInstrInfo();
const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
if (!Offset || (Offset->getImm() & 6) == 0)
return 0;
auto IsHazardFn = [TII](const MachineInstr &I) {
if (!SIInstrInfo::isMIMG(I))
return false;
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I.getOpcode());
return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA &&
TII->getInstSizeInBytes(I) >= 16;
};
return NSAtoVMEMWaitStates - getWaitStatesSince(IsHazardFn, 1);
}
int GCNHazardRecognizer::checkFPAtomicToDenormModeHazard(MachineInstr *MI) {
int FPAtomicToDenormModeWaitStates = 3;
if (MI->getOpcode() != AMDGPU::S_DENORM_MODE)
return 0;
auto IsHazardFn = [](const MachineInstr &I) {
if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I))
return false;
return SIInstrInfo::isFPAtomic(I);
};
auto IsExpiredFn = [](const MachineInstr &MI, int WaitStates) {
if (WaitStates >= 3 || SIInstrInfo::isVALU(MI))
return true;
switch (MI.getOpcode()) {
case AMDGPU::S_WAITCNT:
case AMDGPU::S_WAITCNT_VSCNT:
case AMDGPU::S_WAITCNT_VMCNT:
case AMDGPU::S_WAITCNT_EXPCNT:
case AMDGPU::S_WAITCNT_LGKMCNT:
case AMDGPU::S_WAIT_IDLE:
return true;
default:
break;
}
return false;
};
return FPAtomicToDenormModeWaitStates -
::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn);
}
int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
assert(SIInstrInfo::isMAI(*MI));
return ST.hasGFX90AInsts() ? checkMAIHazards90A(MI) : checkMAIHazards908(MI);
}
int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
int WaitStatesNeeded = 0;
unsigned Opc = MI->getOpcode();
auto IsVALUFn = [](const MachineInstr &MI) {
return SIInstrInfo::isVALU(MI);
};
if (Opc != AMDGPU::V_ACCVGPR_READ_B32_e64) { // MFMA or v_accvgpr_write
const int LegacyVALUWritesVGPRWaitStates = 2;
const int VALUWritesExecWaitStates = 4;
const int MaxWaitStates = 4;
int WaitStatesNeededForUse = VALUWritesExecWaitStates -
getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded < MaxWaitStates) {
for (const MachineOperand &Use : MI->explicit_uses()) {
const int MaxWaitStates = 2;
if (!Use.isReg() || !TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
continue;
int WaitStatesNeededForUse = LegacyVALUWritesVGPRWaitStates -
getWaitStatesSinceDef(Use.getReg(), IsVALUFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
break;
}
}
}
auto IsMFMAFn = [](const MachineInstr &MI) {
return SIInstrInfo::isMAI(MI) &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64;
};
for (const MachineOperand &Op : MI->explicit_operands()) {
if (!Op.isReg() || !TRI.isAGPR(MF.getRegInfo(), Op.getReg()))
continue;
if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
continue;
const int MFMAWritesAGPROverlappedSrcABWaitStates = 4;
const int MFMAWritesAGPROverlappedSrcCWaitStates = 2;
const int MFMA4x4WritesAGPRAccVgprReadWaitStates = 4;
const int MFMA16x16WritesAGPRAccVgprReadWaitStates = 10;
const int MFMA32x32WritesAGPRAccVgprReadWaitStates = 18;
const int MFMA4x4WritesAGPRAccVgprWriteWaitStates = 1;
const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7;
const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15;
const int MaxWaitStates = 18;
Register Reg = Op.getReg();
unsigned HazardDefLatency = 0;
auto IsOverlappedMFMAFn = [Reg, &IsMFMAFn, &HazardDefLatency,
this](const MachineInstr &MI) {
if (!IsMFMAFn(MI))
return false;
Register DstReg = MI.getOperand(0).getReg();
if (DstReg == Reg)
return false;
HazardDefLatency =
std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
return TRI.regsOverlap(DstReg, Reg);
};
int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn,
MaxWaitStates);
int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates;
int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
int OpNo = MI->getOperandNo(&Op);
if (OpNo == SrcCIdx) {
NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates;
} else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) {
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprReadWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
break;
}
} else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprWriteWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
break;
}
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
auto IsAccVgprWriteFn = [Reg, this](const MachineInstr &MI) {
if (MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
return false;
Register DstReg = MI.getOperand(0).getReg();
return TRI.regsOverlap(Reg, DstReg);
};
const int AccVGPRWriteMFMAReadSrcCWaitStates = 1;
const int AccVGPRWriteMFMAReadSrcABWaitStates = 3;
const int AccVGPRWriteAccVgprReadWaitStates = 3;
NeedWaitStates = AccVGPRWriteMFMAReadSrcABWaitStates;
if (OpNo == SrcCIdx)
NeedWaitStates = AccVGPRWriteMFMAReadSrcCWaitStates;
else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64)
NeedWaitStates = AccVGPRWriteAccVgprReadWaitStates;
WaitStatesNeededForUse = NeedWaitStates -
getWaitStatesSinceDef(Reg, IsAccVgprWriteFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
}
if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
const int MFMA4x4ReadSrcCAccVgprWriteWaitStates = 0;
const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5;
const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13;
const int MaxWaitStates = 13;
Register DstReg = MI->getOperand(0).getReg();
unsigned HazardDefLatency = 0;
auto IsSrcCMFMAFn = [DstReg, &IsMFMAFn, &HazardDefLatency,
this](const MachineInstr &MI) {
if (!IsMFMAFn(MI))
return false;
Register Reg = TII.getNamedOperand(MI, AMDGPU::OpName::src2)->getReg();
HazardDefLatency =
std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
return TRI.regsOverlap(Reg, DstReg);
};
int WaitStatesSince = getWaitStatesSince(IsSrcCMFMAFn, MaxWaitStates);
int NeedWaitStates;
switch (HazardDefLatency) {
case 2: NeedWaitStates = MFMA4x4ReadSrcCAccVgprWriteWaitStates;
break;
case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
break;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSince;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
int WaitStatesNeeded = 0;
unsigned Opc = MI->getOpcode();
auto IsMFMAFn = [](const MachineInstr &MI) {
return SIInstrInfo::isMAI(MI) &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64;
};
auto IsLegacyVALUFn = [&IsMFMAFn](const MachineInstr &MI) {
return SIInstrInfo::isVALU(MI) && !IsMFMAFn(MI);
};
auto IsLegacyVALUNotDotFn = [&IsMFMAFn](const MachineInstr &MI) {
return SIInstrInfo::isVALU(MI) && !IsMFMAFn(MI) && !SIInstrInfo::isDOT(MI);
};
if (!IsMFMAFn(*MI))
return WaitStatesNeeded;
const int VALUWritesExecWaitStates = 4;
int WaitStatesNeededForUse = VALUWritesExecWaitStates -
getWaitStatesSinceDef(AMDGPU::EXEC, IsLegacyVALUFn,
VALUWritesExecWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
// Loop for both DGEMM and S/HGEMM 2nd instruction.
for (const MachineOperand &Use : MI->explicit_uses()) {
const int LegacyVALUNotDotWritesVGPRWaitStates = 2;
const int SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates = 2;
const int SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates = 8;
const int SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates = 16;
const int SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates = 3;
const int SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates = 9;
const int SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates = 17;
const int DMFMA16x16WritesVGPROverlappedSrcCWaitStates = 9;
const int DMFMA4x4WritesVGPROverlappedSrcCWaitStates = 4;
const int SMFMA4x4WritesVGPROverlappedSrcABWaitStates = 5;
const int SMFMA16x16WritesVGPROverlappedSrcABWaitStates = 11;
const int SMFMA32x32WritesVGPROverlappedSrcABWaitStates = 19;
const int DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates = 6;
const int DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates = 11;
const int DMFMA4x4WritesVGPRFullSrcCWaitStates = 4;
const int MaxWaitStates = 19;
if (!Use.isReg())
continue;
unsigned Reg = Use.getReg();
bool FullReg;
const MachineInstr *MI1;
auto IsOverlappedDGEMMorXDLFn = [Reg, &IsMFMAFn, &FullReg, &MI1,
this](const MachineInstr &MI) {
if (!IsMFMAFn(MI))
return false;
if (!isDGEMM(MI.getOpcode()) && !isXDL(ST, MI))
return false;
Register DstReg = MI.getOperand(0).getReg();
FullReg = (DstReg == Reg);
MI1 = &MI;
return TRI.regsOverlap(DstReg, Reg);
};
WaitStatesNeededForUse = LegacyVALUNotDotWritesVGPRWaitStates -
getWaitStatesSinceDef(Reg, IsLegacyVALUNotDotFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
int NumWaitStates = getWaitStatesSinceDef(Reg, IsOverlappedDGEMMorXDLFn,
MaxWaitStates);
if (NumWaitStates == std::numeric_limits<int>::max())
continue;
int OpNo = MI->getOperandNo(&Use);
unsigned Opc1 = MI1->getOpcode();
int NeedWaitStates = 0;
if (OpNo == SrcCIdx) {
if (!isDGEMM(Opc) && isDGEMM(Opc1)) {
NeedWaitStates = 0;
} else if (FullReg) {
if ((Opc == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
Opc == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64) &&
(Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64))
NeedWaitStates = DMFMA4x4WritesVGPRFullSrcCWaitStates;
} else {
switch (Opc1) {
case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
if (!isXDL(ST, *MI))
NeedWaitStates = DMFMA16x16WritesVGPROverlappedSrcCWaitStates;
break;
case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
if (!isXDL(ST, *MI))
NeedWaitStates = DMFMA4x4WritesVGPROverlappedSrcCWaitStates;
break;
default:
switch (TSchedModel.computeInstrLatency(MI1)) {
case 2:
NeedWaitStates = isDGEMM(Opc)
? SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates
: SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates;
break;
case 8:
NeedWaitStates = isDGEMM(Opc)
? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates
: SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default:
NeedWaitStates = isDGEMM(Opc)
? SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates
: SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates;
}
}
}
} else {
switch (Opc1) {
case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
NeedWaitStates = DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates;
break;
case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
NeedWaitStates = DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates;
break;
default:
switch (TSchedModel.computeInstrLatency(MI1)) {
case 2:
NeedWaitStates = SMFMA4x4WritesVGPROverlappedSrcABWaitStates;
break;
case 8:
NeedWaitStates = SMFMA16x16WritesVGPROverlappedSrcABWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default:
NeedWaitStates = SMFMA32x32WritesVGPROverlappedSrcABWaitStates;
}
}
}
if (WaitStatesNeeded >= NeedWaitStates)
continue;
WaitStatesNeededForUse = NeedWaitStates - NumWaitStates;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
break;
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) {
// On gfx90a+ relevant hazards are checked in checkMAIVALUHazards()
if (!ST.hasMAIInsts() || ST.hasGFX90AInsts())
return 0;
int WaitStatesNeeded = 0;
auto IsAccVgprReadFn = [](const MachineInstr &MI) {
return MI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64;
};
for (const MachineOperand &Op : MI->explicit_uses()) {
if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg()))
continue;
Register Reg = Op.getReg();
const int AccVgprReadLdStWaitStates = 2;
const int VALUWriteAccVgprRdWrLdStDepVALUWaitStates = 1;
const int MaxWaitStates = 2;
int WaitStatesNeededForUse = AccVgprReadLdStWaitStates -
getWaitStatesSinceDef(Reg, IsAccVgprReadFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
return WaitStatesNeeded; // Early exit.
auto IsVALUAccVgprRdWrCheckFn = [Reg, this](const MachineInstr &MI) {
if (MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64 &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
return false;
auto IsVALUFn = [](const MachineInstr &MI) {
return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMAI(MI);
};
return getWaitStatesSinceDef(Reg, IsVALUFn, 2 /*MaxWaitStates*/) <
std::numeric_limits<int>::max();
};
WaitStatesNeededForUse = VALUWriteAccVgprRdWrLdStDepVALUWaitStates -
getWaitStatesSince(IsVALUAccVgprRdWrCheckFn, MaxWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
if (!ST.hasGFX90AInsts())
return 0;
auto IsMFMAFn = [](const MachineInstr &MI) -> bool {
return SIInstrInfo::isMAI(MI) &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64;
};
auto IsDGEMMFn = [](const MachineInstr &MI) -> bool {
return isDGEMM(MI.getOpcode());
};
// This is checked in checkMAIHazards90A()
if (IsMFMAFn(*MI))
return 0;
int WaitStatesNeeded = 0;
bool IsMemOrExport = SIInstrInfo::isVMEM(*MI) ||
SIInstrInfo::isFLAT(*MI) ||
SIInstrInfo::isDS(*MI) ||
SIInstrInfo::isEXP(*MI);
bool IsVALU = SIInstrInfo::isVALU(*MI);
const MachineInstr *MFMA = nullptr;
unsigned Reg;
auto IsDGEMMorXDLWriteFn = [&Reg, &IsMFMAFn, &MFMA,
this](const MachineInstr &MI) {
if (!IsMFMAFn(MI) || !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
return false;
if (!isDGEMM(MI.getOpcode()) && !isXDL(ST, MI))
return false;
MFMA = &MI;
return true;
};
const MachineInstr *DOT = nullptr;
auto IsDotWriteFn = [&Reg, &DOT, this](const MachineInstr &MI) {
if (!SIInstrInfo::isDOT(MI) ||
!TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
return false;
DOT = &MI;
return true;
};
int SrcCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::src2);
if (IsMemOrExport || IsVALU) {
const int SMFMA4x4WriteVgprVALUMemExpReadWaitStates = 5;
const int SMFMA16x16WriteVgprVALUMemExpReadWaitStates = 11;
const int SMFMA32x32WriteVgprVALUMemExpReadWaitStates = 19;
const int DMFMA4x4WriteVgprMemExpReadWaitStates = 9;
const int DMFMA16x16WriteVgprMemExpReadWaitStates = 18;
const int DMFMA4x4WriteVgprVALUReadWaitStates = 6;
const int DMFMA16x16WriteVgprVALUReadWaitStates = 11;
const int DotWriteSameDotReadSrcAB = 3;
const int DotWriteDifferentVALURead = 3;
const int MaxWaitStates = 19;
for (const MachineOperand &Use : MI->explicit_uses()) {
if (!Use.isReg())
continue;
Reg = Use.getReg();
DOT = nullptr;
int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
MaxWaitStates);
if (DOT) {
int NeedWaitStates = 0;
if (DOT->getOpcode() == MI->getOpcode()) {
if (&Use - &MI->getOperand(0) != SrcCIdx)
NeedWaitStates = DotWriteSameDotReadSrcAB;
} else {
NeedWaitStates = DotWriteDifferentVALURead;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
MFMA = nullptr;
WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDGEMMorXDLWriteFn,
MaxWaitStates);
if (!MFMA)
continue;
unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
int NeedWaitStates = MaxWaitStates;
switch (HazardDefLatency) {
case 2:
NeedWaitStates = SMFMA4x4WriteVgprVALUMemExpReadWaitStates;
break;
case 4:
assert(isDGEMM(MFMA->getOpcode()));
NeedWaitStates =
IsMemOrExport ? DMFMA4x4WriteVgprMemExpReadWaitStates
: DMFMA4x4WriteVgprVALUReadWaitStates;
break;
case 8:
NeedWaitStates = SMFMA16x16WriteVgprVALUMemExpReadWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default:
NeedWaitStates =
isDGEMM(MFMA->getOpcode())
? IsMemOrExport ? DMFMA16x16WriteVgprMemExpReadWaitStates
: DMFMA16x16WriteVgprVALUReadWaitStates
: SMFMA32x32WriteVgprVALUMemExpReadWaitStates;
break;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
break;
}
}
unsigned Opc = MI->getOpcode();
const int DMFMAToFMA64WaitStates = 2;
if ((Opc == AMDGPU::V_FMA_F64_e64 ||
Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64 ||
Opc == AMDGPU::V_FMAC_F64_dpp) &&
WaitStatesNeeded < DMFMAToFMA64WaitStates) {
int WaitStatesNeededForUse = DMFMAToFMA64WaitStates -
getWaitStatesSince(IsDGEMMFn, DMFMAToFMA64WaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
if (!IsVALU && !IsMemOrExport)
return WaitStatesNeeded;
for (const MachineOperand &Def : MI->defs()) {
const int SMFMA4x4WriteVgprVALUWawWaitStates = 5;
const int SMFMA16x16WriteVgprVALUWawWaitStates = 11;
const int SMFMA32x32WriteVgprVALUWawWaitStates = 19;
const int SMFMA4x4ReadVgprVALUWarWaitStates = 1;
const int SMFMA16x16ReadVgprVALUWarWaitStates = 7;
const int SMFMA32x32ReadVgprVALUWarWaitStates = 15;
const int DMFMA4x4WriteVgprVALUWriteWaitStates = 6;
const int DMFMA16x16WriteVgprVALUWriteWaitStates = 11;
const int DotWriteDifferentVALUWrite = 3;
const int MaxWaitStates = 19;
const int MaxWarWaitStates = 15;
Reg = Def.getReg();
DOT = nullptr;
int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
MaxWaitStates);
if (DOT && DOT->getOpcode() != MI->getOpcode())
WaitStatesNeeded = std::max(WaitStatesNeeded, DotWriteDifferentVALUWrite -
WaitStatesSinceDef);
MFMA = nullptr;
WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDGEMMorXDLWriteFn,
MaxWaitStates);
if (MFMA) {
int NeedWaitStates = MaxWaitStates;
switch (TSchedModel.computeInstrLatency(MFMA)) {
case 2:
NeedWaitStates = SMFMA4x4WriteVgprVALUWawWaitStates;
break;
case 4:
assert(isDGEMM(MFMA->getOpcode()));
NeedWaitStates = DMFMA4x4WriteVgprVALUWriteWaitStates;
break;
case 8:
NeedWaitStates = SMFMA16x16WriteVgprVALUWawWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default:
NeedWaitStates = isDGEMM(MFMA->getOpcode())
? DMFMA16x16WriteVgprVALUWriteWaitStates
: SMFMA32x32WriteVgprVALUWawWaitStates;
break;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
if (WaitStatesNeeded == MaxWaitStates)
break;
}
auto IsSMFMAReadAsCFn = [&Reg, &IsMFMAFn, &MFMA,
this](const MachineInstr &MI) {
if (!IsMFMAFn(MI) || isDGEMM(MI.getOpcode()) ||
!MI.readsRegister(Reg, &TRI))
return false;
const MachineOperand *SrcC =
TII.getNamedOperand(MI, AMDGPU::OpName::src2);
assert(SrcC);
if (!SrcC->isReg() || !TRI.regsOverlap(SrcC->getReg(), Reg))
return false;
MFMA = &MI;
return true;
};
MFMA = nullptr;
int WaitStatesSinceUse = getWaitStatesSince(IsSMFMAReadAsCFn,
MaxWarWaitStates);
if (!MFMA)
continue;
unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
int NeedWaitStates = MaxWaitStates;
switch (HazardDefLatency) {
case 2: NeedWaitStates = SMFMA4x4ReadVgprVALUWarWaitStates;
break;
case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates;
break;
}
int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceUse;
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
}
return WaitStatesNeeded;
}
bool GCNHazardRecognizer::ShouldPreferAnother(SUnit *SU) {
if (!SU->isInstr())
return false;
const MachineInstr *MAI = nullptr;
auto IsMFMAFn = [&MAI](const MachineInstr &MI) {
MAI = nullptr;
if (SIInstrInfo::isMAI(MI) &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64)
MAI = &MI;
return MAI != nullptr;
};
MachineInstr *MI = SU->getInstr();
if (IsMFMAFn(*MI)) {
int W = getWaitStatesSince(IsMFMAFn, 16);
if (MAI)
return W < (int)TSchedModel.computeInstrLatency(MAI);
}
return false;
}
|
#include "WiFiUdp.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
WiFiUDP::WiFiUDP() {
this->readBuffer = NULL;
this->writeBuffer = NULL;
this->readBufferLength = 0;
this->writeBufferLength = 0;
};
WiFiUDP::WiFiUDP(unsigned char *buffer, int length) {
this->readBuffer = NULL;
this->writeBuffer = NULL;
this->readBufferLength = 0;
this->writeBufferLength = 0;
this->setReadBuffer(buffer, length);
}
WiFiUDP::~WiFiUDP() {
//if(this->readBuffer != NULL) {
//free(this->readBuffer);
//this->readBuffer = NULL;
//}
//if(this->writeBuffer != NULL) {
//free(this->writeBuffer);
//this->writeBuffer = NULL;
//}
}
WiFiUDP* WiFiUDP::loadFromFile(const char *path) {
unsigned char *source = NULL;
WiFiUDP* udp = new WiFiUDP;
FILE *fp = fopen(path, "rb");
if(fp != NULL) {
if(fseek(fp, 0L, SEEK_END) != 0) {
fprintf(stderr, "ERROR: Unable to seek to the end of the file: %s\n", path);
fclose(fp);
return udp;
}
long bufsize = ftell(fp);
if(bufsize == -1) {
fprintf(stderr, "ERROR: Unable to get file position\n");
fclose(fp);
return udp;
}
// Need to do something more c++ friendly, as this is a memory leak
source = (unsigned char *)malloc(sizeof(unsigned char) * (bufsize + 1));
if(fseek(fp, 0L, SEEK_SET) != 0) {
fprintf(stderr, "ERROR: Unable to see to the beginning of the file: %s\n", path);
fclose(fp);
return udp;
}
size_t len = fread(source, sizeof(unsigned char), bufsize, fp);
if(ferror(fp) != 0) {
fprintf(stderr, "ERROR: Unable to read the file (%i): %s\n", ferror(fp), path);
fclose(fp);
return udp;
}
udp->setReadBuffer(source, len);
fclose(fp);
return udp;
} else {
fprintf(stderr, "ERROR: Unable to seek to open file: %s\n", path);
}
return udp;
}
void WiFiUDP::setReadBuffer(unsigned char *buffer, int length) {
//if(this->readBuffer != NULL) {
//free(this->readBuffer);
//this->readBuffer = NULL;
//}
this->readBuffer = (unsigned char *)malloc(sizeof(unsigned char) * length);
memcpy(this->readBuffer, buffer, length);
this->readBufferLength = length;
}
uint8_t WiFiUDP::beginMulticast(IPAddress interfaceAddr, IPAddress multicast, uint16_t port) {
return 0;
}
int WiFiUDP::beginPacket(IPAddress ip, uint16_t port) {
return 0;
}
int WiFiUDP::beginPacket(const char *host, uint16_t port) {
return 0;
}
int WiFiUDP::beginPacketMulticast(IPAddress multicastAddress, uint16_t port, IPAddress interfaceAddress, int ttl) {
return 0;
}
int WiFiUDP::endPacket() {
return 0;
}
int WiFiUDP::writeLength() {
return this->writeBufferLength;
}
int WiFiUDP::readLength() {
return this->readBufferLength;
}
int WiFiUDP::read() {
return 0;
}
int WiFiUDP::read(unsigned char *buffer, size_t len) {
memcpy(buffer, this->readBuffer, len);
return len;
}
int WiFiUDP::peek() {
return 0;
}
void WiFiUDP::flush() {}
size_t WiFiUDP::write(uint8_t) {
return 0;
}
size_t WiFiUDP::write(const uint8_t *buffer, size_t size) {
//if(this->writeBuffer != NULL) {
//free(this->writeBuffer);
//this->writeBuffer = NULL;
//}
this->writeBuffer = (unsigned char *)malloc(sizeof(unsigned char) * size);
memcpy(this->writeBuffer, buffer, size);
this->writeBufferLength = size;
return size;
}
int WiFiUDP::parsePacket() {
return this->readBufferLength;
}
|
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "tests/RecordTestUtils.h"
#include "tests/Test.h"
#include "include/core/SkSurface.h"
#include "include/effects/SkImageFilters.h"
#include "src/core/SkImagePriv.h"
#include "src/core/SkRecord.h"
#include "src/core/SkRecordDraw.h"
#include "src/core/SkRecordOpts.h"
#include "src/core/SkRecorder.h"
#include "src/core/SkRecords.h"
#include "tools/debugger/DebugCanvas.h"
static const int W = 1920, H = 1080;
class JustOneDraw : public SkPicture::AbortCallback {
public:
JustOneDraw() : fCalls(0) {}
bool abort() override { return fCalls++ > 0; }
private:
int fCalls;
};
DEF_TEST(RecordDraw_LazySaves, r) {
// Record two commands.
SkRecord record;
SkRecorder recorder(&record, W, H);
REPORTER_ASSERT(r, 0 == record.count());
recorder.save();
REPORTER_ASSERT(r, 0 == record.count()); // the save was not recorded (yet)
recorder.drawColor(SK_ColorRED);
REPORTER_ASSERT(r, 1 == record.count());
recorder.scale(2, 2);
REPORTER_ASSERT(r, 3 == record.count()); // now we see the save
recorder.restore();
REPORTER_ASSERT(r, 4 == record.count());
assert_type<SkRecords::DrawPaint>(r, record, 0);
assert_type<SkRecords::Save> (r, record, 1);
assert_type<SkRecords::Scale> (r, record, 2);
assert_type<SkRecords::Restore> (r, record, 3);
recorder.save();
recorder.save();
recorder.restore();
recorder.restore();
REPORTER_ASSERT(r, 4 == record.count());
}
DEF_TEST(RecordDraw_Abort, r) {
// Record two commands.
SkRecord record;
SkRecorder recorder(&record, W, H);
recorder.drawRect(SkRect::MakeWH(200, 300), SkPaint());
recorder.clipRect(SkRect::MakeWH(100, 200));
SkRecord rerecord;
SkRecorder canvas(&rerecord, W, H);
JustOneDraw callback;
SkRecordDraw(record, &canvas, nullptr, nullptr, 0, nullptr/*bbh*/, &callback);
REPORTER_ASSERT(r, 1 == count_instances_of_type<SkRecords::DrawRect>(rerecord));
REPORTER_ASSERT(r, 0 == count_instances_of_type<SkRecords::ClipRect>(rerecord));
}
DEF_TEST(RecordDraw_Unbalanced, r) {
SkRecord record;
SkRecorder recorder(&record, W, H);
recorder.save(); // We won't balance this, but SkRecordDraw will for us.
recorder.scale(2, 2);
SkRecord rerecord;
SkRecorder canvas(&rerecord, W, H);
SkRecordDraw(record, &canvas, nullptr, nullptr, 0, nullptr/*bbh*/, nullptr/*callback*/);
int save_count = count_instances_of_type<SkRecords::Save>(rerecord);
int restore_count = count_instances_of_type<SkRecords::Save>(rerecord);
REPORTER_ASSERT(r, save_count == restore_count);
}
DEF_TEST(RecordDraw_SetMatrixClobber, r) {
// Set up an SkRecord that just scales by 2x,3x.
SkRecord scaleRecord;
SkRecorder scaleCanvas(&scaleRecord, W, H);
SkMatrix scale;
scale.setScale(2, 3);
scaleCanvas.setMatrix(scale);
// Set up an SkRecord with an initial +20, +20 translate.
SkRecord translateRecord;
SkRecorder translateCanvas(&translateRecord, W, H);
SkMatrix translate;
translate.setTranslate(20, 20);
translateCanvas.setMatrix(translate);
SkRecordDraw(scaleRecord, &translateCanvas, nullptr, nullptr, 0, nullptr/*bbh*/, nullptr/*callback*/);
REPORTER_ASSERT(r, 4 == translateRecord.count());
assert_type<SkRecords::SetM44>(r, translateRecord, 0);
assert_type<SkRecords::Save> (r, translateRecord, 1);
assert_type<SkRecords::SetM44>(r, translateRecord, 2);
assert_type<SkRecords::Restore> (r, translateRecord, 3);
// When we look at translateRecord now, it should have its first +20,+20 translate,
// then a 2x,3x scale that's been concatted with that +20,+20 translate.
const SkRecords::SetM44* setMatrix;
setMatrix = assert_type<SkRecords::SetM44>(r, translateRecord, 0);
REPORTER_ASSERT(r, setMatrix->matrix == SkM44(translate));
setMatrix = assert_type<SkRecords::SetM44>(r, translateRecord, 2);
SkMatrix expected = scale;
expected.postConcat(translate);
REPORTER_ASSERT(r, setMatrix->matrix == SkM44(expected));
}
// Like a==b, with a little slop recognizing that float equality can be weird.
static bool sloppy_rect_eq(SkRect a, SkRect b) {
SkRect inset(a), outset(a);
inset.inset(1, 1);
outset.outset(1, 1);
return outset.contains(b) && !inset.contains(b);
}
// TODO This would be nice, but we can't get it right today.
#if 0
DEF_TEST(RecordDraw_BasicBounds, r) {
SkRecord record;
SkRecorder recorder(&record, W, H);
recorder.save();
recorder.clipRect(SkRect::MakeWH(400, 500));
recorder.scale(2, 2);
recorder.drawRect(SkRect::MakeWH(320, 240), SkPaint());
recorder.restore();
SkAutoTMalloc<SkRect> bounds(record.count());
SkRecordFillBounds(SkRect::MakeWH(SkIntToScalar(W), SkIntToScalar(H)), record, bounds);
for (int i = 0; i < record.count(); i++) {
REPORTER_ASSERT(r, sloppy_rect_eq(SkRect::MakeWH(400, 480), bounds[i]));
}
}
#endif
// Base test to ensure start/stop range is respected
DEF_TEST(RecordDraw_PartialStartStop, r) {
static const int kWidth = 10, kHeight = 10;
SkRect r1 = { 0, 0, kWidth, kHeight };
SkRect r2 = { 0, 0, kWidth, kHeight/2 };
SkRect r3 = { 0, 0, kWidth/2, kHeight };
SkPaint p;
SkRecord record;
SkRecorder recorder(&record, kWidth, kHeight);
recorder.drawRect(r1, p);
recorder.drawRect(r2, p);
recorder.drawRect(r3, p);
SkRecord rerecord;
SkRecorder canvas(&rerecord, kWidth, kHeight);
SkRecordPartialDraw(record, &canvas, nullptr, 0, 1, 2, SkM44()); // replay just drawRect of r2
REPORTER_ASSERT(r, 1 == count_instances_of_type<SkRecords::DrawRect>(rerecord));
int index = find_first_instances_of_type<SkRecords::DrawRect>(rerecord);
const SkRecords::DrawRect* drawRect = assert_type<SkRecords::DrawRect>(r, rerecord, index);
REPORTER_ASSERT(r, drawRect->rect == r2);
}
// A regression test for crbug.com/415468 and https://bug.skia.org/2957 .
//
// This also now serves as a regression test for crbug.com/418417. We used to adjust the
// bounds for the saveLayer, clip, and restore to be greater than the bounds of the picture.
// (We were applying the saveLayer paint to the bounds after restore, which makes no sense.)
DEF_TEST(RecordDraw_SaveLayerAffectsClipBounds, r) {
SkRecord record;
SkRecorder recorder(&record, 50, 50);
// We draw a rectangle with a long drop shadow. We used to not update the clip
// bounds based on SaveLayer paints, so the drop shadow could be cut off.
SkPaint paint;
paint.setImageFilter(SkImageFilters::DropShadow(20, 0, 0, 0, SK_ColorBLACK, nullptr));
recorder.saveLayer(nullptr, &paint);
recorder.clipRect(SkRect::MakeWH(20, 40));
recorder.drawRect(SkRect::MakeWH(20, 40), SkPaint());
recorder.restore();
// Under the original bug, the right edge value of the drawRect would be 20 less than asserted
// here because we intersected it with a clip that had not been adjusted for the drop shadow.
//
// The second bug showed up as adjusting the picture bounds (0,0,50,50) by the drop shadow too.
// The saveLayer, clipRect, and restore bounds were incorrectly (0,0,70,50).
SkAutoTMalloc<SkRect> bounds(record.count());
SkAutoTMalloc<SkBBoxHierarchy::Metadata> meta(record.count());
SkRecordFillBounds(SkRect::MakeWH(50, 50), record, bounds, meta);
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[0], SkRect::MakeLTRB(0, 0, 50, 50)));
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[1], SkRect::MakeLTRB(0, 0, 50, 50)));
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[2], SkRect::MakeLTRB(0, 0, 40, 40)));
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[3], SkRect::MakeLTRB(0, 0, 50, 50)));
}
DEF_TEST(RecordDraw_Metadata, r) {
SkRecord record;
SkRecorder recorder(&record, 50, 50);
// Just doing some mildly interesting drawing, mostly grabbed from the unit test above.
SkPaint paint;
paint.setImageFilter(SkImageFilters::DropShadow(20, 0, 0, 0, SK_ColorBLACK, nullptr));
recorder.saveLayer(nullptr, &paint);
recorder.clipRect(SkRect::MakeWH(20, 40));
recorder.save();
recorder.translate(10, 10);
recorder.drawRect(SkRect::MakeWH(20, 40), SkPaint());
recorder.restore();
recorder.restore();
SkAutoTMalloc<SkRect> bounds(record.count());
SkAutoTMalloc<SkBBoxHierarchy::Metadata> meta(record.count());
SkRecordFillBounds(SkRect::MakeWH(50, 50), record, bounds, meta);
REPORTER_ASSERT(r, !meta[0].isDraw); // saveLayer (not a draw, but its restore will be)
REPORTER_ASSERT(r, !meta[1].isDraw); // clip
REPORTER_ASSERT(r, !meta[2].isDraw); // save
REPORTER_ASSERT(r, !meta[3].isDraw); // translate
REPORTER_ASSERT(r, meta[4].isDraw); // drawRect
REPORTER_ASSERT(r, !meta[5].isDraw); // restore (paired with save, not a draw)
REPORTER_ASSERT(r, meta[6].isDraw); // restore (paired with saveLayer, a draw)
}
// TODO This would be nice, but we can't get it right today.
#if 0
// When a saveLayer provides an explicit bound and has a complex paint (e.g., one that
// affects transparent black), that bound should serve to shrink the area of the required
// backing store.
DEF_TEST(RecordDraw_SaveLayerBoundsAffectsClipBounds, r) {
SkRecord record;
SkRecorder recorder(&record, 50, 50);
SkPaint p;
p.setBlendMode(SkBlendMode::kSrc);
SkRect layerBounds = SkRect::MakeLTRB(10, 10, 40, 40);
recorder.saveLayer(&layerBounds, &p);
recorder.drawRect(SkRect::MakeLTRB(20, 20, 30, 30), SkPaint());
recorder.restore();
SkAutoTMalloc<SkRect> bounds(record.count());
SkRecordFillBounds(SkRect::MakeWH(50, 50), record, bounds);
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[0], SkRect::MakeLTRB(10, 10, 40, 40)));
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[1], SkRect::MakeLTRB(20, 20, 30, 30)));
REPORTER_ASSERT(r, sloppy_rect_eq(bounds[2], SkRect::MakeLTRB(10, 10, 40, 40)));
}
#endif
DEF_TEST(RecordDraw_drawImage, r){
class SkCanvasMock : public SkCanvas {
public:
SkCanvasMock(int width, int height) : SkCanvas(width, height) {
this->resetTestValues();
}
void onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
const SkPaint* paint) override {
fDrawImageCalled = true;
}
void onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
const SkPaint* paint, SrcRectConstraint) override {
fDrawImageRectCalled = true;
}
void resetTestValues() {
fDrawImageCalled = fDrawImageRectCalled = false;
}
bool fDrawImageCalled;
bool fDrawImageRectCalled;
};
auto surface(SkSurface::MakeRasterN32Premul(10, 10));
surface->getCanvas()->clear(SK_ColorGREEN);
sk_sp<SkImage> image(surface->makeImageSnapshot());
SkCanvasMock canvas(10, 10);
{
SkRecord record;
SkRecorder recorder(&record, 10, 10);
recorder.drawImage(image, 0, 0);
SkRecordDraw(record, &canvas, nullptr, nullptr, 0, nullptr, nullptr);
}
REPORTER_ASSERT(r, canvas.fDrawImageCalled);
canvas.resetTestValues();
{
SkRecord record;
SkRecorder recorder(&record, 10, 10);
recorder.drawImageRect(image, SkRect::MakeWH(10, 10), nullptr);
SkRecordDraw(record, &canvas, nullptr, nullptr, 0, nullptr, nullptr);
}
REPORTER_ASSERT(r, canvas.fDrawImageRectCalled);
}
|
/* ------------------------------------------------------------------
Copyright (c) 2019 Marc Toussaint
email: marc.toussaint@informatik.uni-stuttgart.de
This code is distributed under the MIT License.
Please see <root-path>/LICENSE for details.
-------------------------------------------------------------- */
#include "LGP_node.h"
#include "LGP_tree.h"
#include "bounds.h"
#include <MCTS/solver_PlainMC.h>
#include <KOMO/komo.h>
#include <Kin/switch.h>
#include <Optim/GraphOptim.h>
#include <Gui/opengl.h>
#define DEBUG(x) //x
#define DEL_INFEASIBLE(x) //x
uint COUNT_kin=0;
uint COUNT_evals=0;
uint COUNT_node=0;
uintA COUNT_opt=consts<uint>(0, BD_max);
double COUNT_time=0.;
rai::String OptLGPDataPath;
ofstream* filNodes=nullptr;
bool LGP_useHoming = true;
void LGP_Node::resetData() {
cost = zeros(L);
constraints = zeros(L);
count = consts<uint>(0, L);
count(BD_symbolic) = 1;
feasible = consts<byte>(true, L);
komoProblem.resize(L);
opt.resize(L);
computeTime = zeros(L);
highestBound=0.;
}
LGP_Node::LGP_Node(LGP_Tree* _tree, uint levels)
: parent(nullptr), tree(_tree), step(0), time(0.), id(COUNT_node++),
fol(tree->fol),
startKinematics(tree->kin),
L(levels) {
//this is the root node!
fol.reset_state();
folState = fol.createStateCopy();
resetData();
if(filNodes)(*filNodes) <<id <<' ' <<step <<' ' <<time <<' ' <<getTreePathString() <<endl;
}
LGP_Node::LGP_Node(LGP_Node* parent, MCTS_Environment::Handle& a)
: parent(parent), tree(parent->tree), step(parent->step+1), id(COUNT_node++),
fol(parent->fol),
startKinematics(parent->startKinematics),
L(parent->L) {
parent->children.append(this);
fol.setState(parent->folState, parent->step);
CHECK(a, "giving a 'nullptr' shared pointer??");
ret = fol.transition(a);
time = parent->time + ret.duration;
isTerminal = fol.successEnd;
if(fol.deadEnd) isInfeasible=true;
folState = fol.createStateCopy();
folDecision = folState->getNode("decision");
decision = a;
resetData();
cost(BD_symbolic) = parent->cost(BD_symbolic) - 0.1*ret.reward; //cost-so-far
highestBound = parent->highestBound - 0.1*ret.reward;
if(filNodes)(*filNodes) <<id <<' ' <<step <<' ' <<time <<' ' <<getTreePathString() <<endl;
}
LGP_Node::~LGP_Node() {
for(LGP_Node* ch:children) delete ch;
}
void LGP_Node::expand(int verbose) {
if(isExpanded) return; //{ LOG(-1) <<"MNode '" <<*this <<"' is already expanded"; return; }
CHECK(!children.N, "");
if(isTerminal) return;
fol.setState(folState, step);
int tmp=fol.verbose;
fol.verbose=verbose;
auto actions = fol.get_actions();
fol.verbose=tmp;
for(FOL_World::Handle& a:actions) {
// cout <<" EXPAND DECISION: " <<*a <<endl;
new LGP_Node(this, a);
}
if(!children.N) isTerminal=true;
isExpanded=true;
}
void LGP_Node::computeEndKinematics() {
Skeleton S = getSkeleton();
effKinematics.copy(startKinematics, true);
KOMO tmp;
tmp.setModel(startKinematics, false);
double maxPhase=0;
for(const SkeletonEntry& s:S) {
if(s.phase0>maxPhase) maxPhase=s.phase0;
if(s.phase1>maxPhase) maxPhase=s.phase1;
}
tmp.setTiming(maxPhase+1., 1, 10., 1);
tmp.setSkeleton(S);
// tmp.reportProblem();
for(rai::KinematicSwitch* s : tmp.switches) s->apply(effKinematics);
}
void LGP_Node::optBound(BoundType bound, bool collisions, int verbose) {
if(komoProblem(bound)) komoProblem(bound).reset();
komoProblem(bound) = std::make_shared<KOMO>();
KOMO& komo(*komoProblem(bound));
komo.verbose = rai::MAX(verbose, 0);
if(komo.verbose>0) {
cout <<"########## OPTIM lev " <<bound <<endl;
}
komo.logFile = new ofstream(OptLGPDataPath + STRING("komo-" <<id <<'-' <<step <<'-' <<bound));
Skeleton S = getSkeleton();
if(komo.logFile) writeSkeleton(*komo.logFile, S, getSwitchesFromSkeleton(S));
if(komo.verbose>1) {
writeSkeleton(cout, S, getSwitchesFromSkeleton(S));
}
//ensure the effective kinematics are computed when BD_pose
// if(bound==BD_pose && step>1){
// if(!parent->effKinematics.q.N) parent->optBound(BD_pose, collisions);
// CHECK(parent->effKinematics.q.N, "I can't compute a pose when no pose was comp. for parent (I need the effKin)");
// }
if(bound==BD_pose && parent) {
if(!parent->effKinematics.q.N) parent->computeEndKinematics();
}
arrA waypoints;
if(bound==BD_seqPath || bound==BD_seqVelPath) {
CHECK(komoProblem(BD_seq), "BD_seq needs to be computed before");
waypoints = komoProblem(BD_seq)->getPath_q();
}
skeleton2Bound(komo, bound, S,
startKinematics, (parent?parent->effKinematics:startKinematics),
collisions,
waypoints);
for(Objective* o:tree->finalGeometryObjectives.objectives) {
cout <<"FINAL objective: " <<*o <<endl;
Objective* co = komo.addObjective({0.}, o->map, o->type);
co->setCostSpecs(komo.T-1, komo.T-1, komo.sparseOptimization);
cout <<"FINAL objective: " <<*co <<endl;
}
if(komo.logFile) {
komo.reportProblem(*komo.logFile);
(*komo.logFile) <<komo.getProblemGraph(false);
}
// if(level==BD_seq) komo.denseOptimization=true;
//-- optimize
DEBUG(FILE("z.fol") <<fol;);
DEBUG(komo.getReport(false, 1, FILE("z.problem")););
if(komo.verbose>1) komo.reportProblem();
if(komo.verbose>5) komo.animateOptimization = komo.verbose-5;
try {
if(bound != BD_poseFromSeq) {
komo.run();
} else {
CHECK_EQ(step, komo.T-1, "");
komo.run_sub({komo.T-2}, {});
}
} catch(std::runtime_error& err) {
cout <<"KOMO CRASHED: " <<err.what() <<endl;
komoProblem(bound).reset();
return;
}
if(!komo.denseOptimization && !komo.sparseOptimization) COUNT_evals += komo.opt->newton.evals;
COUNT_kin += rai::Configuration::setJointStateCount;
COUNT_opt(bound)++;
COUNT_time += komo.runTime;
count(bound)++;
DEBUG(komo.getReport(false, 1, FILE("z.problem")););
// cout <<komo.getReport(true) <<endl;
// komo.reportProxies(cout, 0.);
// komo.checkGradients();
Graph result = komo.getReport((komo.verbose>0 && bound>=2));
DEBUG(FILE("z.problem.cost") <<result;);
double cost_here = result.get<double>({"total", "sos_sumOfSqr"});
double constraints_here = result.get<double>({"total", "eq_sumOfAbs"});
constraints_here += result.get<double>({"total", "ineq_sumOfPos"});
if(bound == BD_poseFromSeq) {
cost_here = komo.sos;
constraints_here = komo.ineq + komo.eq;
}
bool feas = (constraints_here<1.);
if(komo.verbose>0) {
cout <<" RESULTS: cost: " <<cost_here <<" constraints: " <<constraints_here <<" feasible: " <<feas <<endl;
}
//-- post process komo problem for level==1
if(bound==BD_pose) {
cost_here -= 0.1*ret.reward; //account for the symbolic costs
if(parent) cost_here += parent->cost(bound); //this is sequentially additive cost
effKinematics.copy(*komo.configurations.last(), true);
for(rai::KinematicSwitch* sw: komo.switches) {
// CHECK_EQ(sw->timeOfApplication, 1, "need to do this before the optimization..");
if(sw->timeOfApplication>=2) sw->apply(effKinematics);
}
effKinematics.reset_q();
effKinematics.ensure_q();
DEBUG(effKinematics.checkConsistency();)
} else {
cost_here += cost(BD_symbolic); //account for the symbolic costs
}
//-- read out and update bound
//update the bound
if(feas) {
if(count(bound)==1/*&& count({2,-1})==0 (also no higher levels)*/ || cost_here<highestBound) highestBound=cost_here;
}
if(count(bound)==1 || cost_here<cost(bound)) {
cost(bound) = cost_here;
constraints(bound) = constraints_here;
feasible(bound) = feas;
opt(bound) = komo.x;
computeTime(bound) = komo.runTime;
}
if(!feasible(bound))
labelInfeasible();
}
ptr<KOMO> LGP_Node::optSubCG(const SubCG& scg, bool collisions, int verbose) {
ptr<KOMO> komo = std::make_shared<KOMO>();
komo->verbose = rai::MAX(verbose, 0);
if(komo->verbose>0) {
cout <<"########## OPTIM SubCG: " <<scg <<endl;
}
// komo->fil = new ofstream(OptLGPDataPath + STRING("komo-" <<id <<'-' <<step <<'-' <<bound));
CG2komo(*komo, scg, startKinematics, collisions);
return komo;
if(komo->logFile) {
komo->reportProblem(*komo->logFile);
(*komo->logFile) <<komo->getProblemGraph(false);
}
// if(level==BD_seq) komo->denseOptimization=true;
//-- optimize
DEBUG(FILE("z.fol") <<fol;);
DEBUG(komo->getReport(false, 1, FILE("z.problem")););
if(komo->verbose>1) komo->reportProblem();
if(komo->verbose>5) komo->animateOptimization = komo->verbose-5;
try {
komo->run();
} catch(std::runtime_error& err) {
cout <<"KOMO CRASHED: " <<err.what() <<endl;
komo.reset();
return komo;
}
if(!komo->denseOptimization) COUNT_evals += komo->opt->newton.evals;
COUNT_kin += rai::Configuration::setJointStateCount;
COUNT_time += komo->runTime;
DEBUG(komo->getReport(false, 1, FILE("z.problem")););
// cout <<komo->getReport(true) <<endl;
// komo->reportProxies(cout, 0.);
// komo->checkGradients();
Graph result = komo->getReport(komo->verbose>0);
DEBUG(FILE("z.problem.cost") <<result;);
//double cost_here = result.get<double>({"total","sqrCosts"});
//double constraints_here = result.get<double>({"total","constraints"});
//bool feas = (constraints_here<1.);
return komo;
}
ptr<CG> LGP_Node::getCGO(bool collisions, int verbose) {
Skeleton S = getSkeleton();
if(verbose>1) {
writeSkeleton(cout, S, getSwitchesFromSkeleton(S));
}
return skeleton2CGO(S,
startKinematics,
collisions);
}
void LGP_Node::setInfeasible() {
isInfeasible = true;
for(LGP_Node* n:children) n->setInfeasible();
}
void LGP_Node::labelInfeasible() {
setInfeasible();
//-- remove children
// MNodeL tree;
// getAllChildren(tree);
// for(MNode *n:tree) if(n!=this) delete n; //TODO: memory leak!
DEL_INFEASIBLE(children.clear();)
//-- create a literal that is equal to the decision literal (tuple) plus an 'INFEASIBLE' prepended
NodeL symbols = folDecision->parents;
symbols.prepend(fol.KB.getNode({"INFEASIBLE"}));
CHECK(symbols(0), "INFEASIBLE symbol not define in fol");
// cout <<"\n *** LABELLING INFEASIBLE: "; listWrite(symbols); cout <<endl;
//-- find the right parent-of-generalization
LGP_Node* branchNode = this;
while(branchNode->parent) {
bool stop=false;
for(Node* fact:branchNode->folState->list()) {
if(fact->keys.N && fact->keys.last()=="block") {
if(tuplesAreEqual(fact->parents, symbols)) {
CHECK(fact->isOfType<bool>() && fact->keys.first()=="block", "");
stop=true;
break;
}
}
}
if(stop) break;
branchNode = branchNode->parent;
}
//add the infeasible-literal as an 'ADD' command to the branch node
if(!branchNode->folAddToState) {
branchNode->folAddToState = &fol.KB.newSubgraph({"ADD"}, {branchNode->folState->isNodeOfGraph});
}
branchNode->folAddToState->newNode<bool>({}, symbols, true);
// MNode *root=getRoot();
branchNode->recomputeAllFolStates();
// node->recomputeAllMCStats(false);
//TODO: resort all queues
}
LGP_NodeL LGP_Node::getTreePath() const {
LGP_NodeL path;
LGP_Node* node=(LGP_Node*)this;
for(; node;) {
path.prepend(node);
node = node->parent;
}
return path;
}
rai::String LGP_Node::getTreePathString(char sep) const {
LGP_NodeL path = getTreePath();
rai::String str;
for(LGP_Node* b : path) {
if(b->decision) str <<*b->decision <<sep;
// else str <<"ROOT" <<sep;
}
return str;
}
Skeleton LGP_Node::getSkeleton(bool finalStateOnly) const {
rai::Array<Graph*> states;
arr times;
if(!finalStateOnly) {
for(LGP_Node* node:getTreePath()) {
times.append(node->time);
states.append(node->folState);
}
} else {
times.append(1.);
states.append(this->folState);
}
//setup a done marker array: which literal in each state is DONE
uint maxLen=0;
for(Graph* s:states) if(s->N>maxLen) maxLen = s->N;
boolA done(states.N, maxLen);
done = false;
Skeleton skeleton;
for(uint k=0; k<states.N; k++) {
Graph& G = *states(k);
// cout <<G <<endl;
for(uint i=0; i<G.N; i++) {
if(!done(k, i)) {
Node* n = G(i);
if(n->keys.N && n->keys.first()=="decision") continue; //don't pickup decision literals
StringA symbols;
for(Node* p:n->parents) symbols.append(p->keys.last());
//check if there is a predicate
if(!symbols.N) continue;
//check if predicate is a SkeletonSymbol
if(!rai::Enum<SkeletonSymbol>::contains(symbols.first())) continue;
//trace into the future
uint k_end=k+1;
for(; k_end<states.N; k_end++) {
Node* persists = getEqualFactInList(n, *states(k_end), true);
if(!persists) break;
done(k_end, persists->index) = true;
}
k_end--;
rai::Enum<SkeletonSymbol> sym(symbols.first());
if(k_end==states.N-1) {
skeleton.append(SkeletonEntry({times(k), times.last(), sym, symbols({1, -1})}));
} else {
skeleton.append(SkeletonEntry({times(k), times(k_end), sym, symbols({1, -1})}));
}
}
}
}
return skeleton;
}
LGP_Node* LGP_Node::getRoot() {
LGP_Node* n=this;
while(n->parent) n=n->parent;
return n;
}
LGP_Node* LGP_Node::getChildByAction(Node* folDecision) {
for(LGP_Node* ch:children) {
if(tuplesAreEqual(ch->folDecision->parents, folDecision->parents)) return ch;
}
LOG(-1) <<"a child with action '" <<*folDecision <<"' does not exist";
return nullptr;
}
void LGP_Node::getAll(LGP_NodeL& L) {
L.append(this);
for(LGP_Node* ch:children) ch->getAll(L);
}
LGP_Node* LGP_Node::treePolicy_random() {
if(isInfeasible) return nullptr;
if(isTerminal) return nullptr;
if(children.N) return children.rndElem()->treePolicy_random();
return this;
}
bool LGP_Node::recomputeAllFolStates() {
if(!parent) { //this is root
folState->copy(*fol.start_state);
if(folAddToState) applyEffectLiterals(*folState, *folAddToState, {}, nullptr);
} else {
fol.setState(parent->folState, parent->step);
if(fol.is_feasible_action(decision)) {
ret = fol.transition(decision);
time = parent->time + ret.duration;
isTerminal = fol.successEnd;
if(fol.deadEnd) {
if(!feasible(BD_seq) && !feasible(BD_path)) //seq or path have already proven it feasible! Despite the logic...
isInfeasible=true;
return false;
}
fol.state->index();
folState->copy(*fol.state);
if(folAddToState) applyEffectLiterals(*folState, *folAddToState, {}, nullptr);
folDecision = folState->getNode("decision");
} else {
if(!feasible(BD_seq) && !feasible(BD_path)) //seq or path have already proven it feasible! Despite the logic...
isInfeasible=true;
return false;
}
}
if(children.N) {
for(uint i=children.N-1;;) {
DEL_INFEASIBLE(bool feasible = children(i)->recomputeAllFolStates();)
DEL_INFEASIBLE(if(!feasible) children.remove(i);)
if(!i || !children.N) break;
i--;
}
}
DEBUG(if(!parent) FILE("z.fol") <<fol;)
return true;
}
void LGP_Node::checkConsistency() {
//-- check that the state->parent points to the parent's state
if(parent) {
CHECK_EQ(parent->folState->isNodeOfGraph, folState->isNodeOfGraph->parents.scalar(), "");
CHECK_EQ(&folDecision->container, folState, "");
}
//-- check that each child exactly matches a decision, in same order
if(children.N) {
fol.setState(folState, step);
auto actions = fol.get_actions();
CHECK_EQ(children.N, actions.size(), "");
#ifndef RAI_NOCHECK
uint i=0;
for(FOL_World::Handle& a:actions) {
// cout <<" DECISION: " <<*a <<endl;
FOL_World::Handle& b = children(i)->decision;
CHECK_EQ(*a, *b, "children do not match decisions");
i++;
}
#endif
}
for(auto* ch:children) ch->checkConsistency();
}
void LGP_Node::write(ostream& os, bool recursive, bool path) const {
os <<"------- NODE -------\ns=" <<step <<" t=" <<time;
if(decision) os <<" a=" <<*decision <<endl;
else os <<" a=<ROOT>"<<endl;
os <<"\t state= " <<*folState->isNodeOfGraph <<endl;
if(path) {
os <<"\t decision path:";
LGP_NodeL _path = getTreePath();
for(LGP_Node* nn: _path)
if(nn->decision) os <<*nn->decision <<' '; else os <<" <ROOT> ";
os <<endl;
}
os <<"\t depth=" <<step <<endl;
os <<"\t poseCost=" <<cost(BD_pose) <<endl;
os <<"\t seqCost=" <<cost(BD_seq) <<endl;
os <<"\t pathCost=" <<cost(BD_path) <<endl;
if(recursive) for(LGP_Node* n:children) n->write(os);
}
Graph LGP_Node::getInfo() const {
Graph G;
if(decision) G.newNode<rai::String>({"decision"}, {}, STRING(*decision));
else G.newNode<rai::String>({"decision"}, {}, "<ROOT>");
G.newNode<rai::String>({"state"}, {}, STRING(*folState->isNodeOfGraph));
G.newNode<rai::String>({"path"}, {}, getTreePathString());
G.newNode<arr>({"boundsCost"}, {}, cost);
G.newNode<arr>({"boundsConstraints"}, {}, constraints);
G.newNode<boolA>({"boundsFeasible"}, {}, feasible);
return G;
}
void LGP_Node::getGraph(Graph& G, Node* n, bool brief) {
if(!n) {
n = G.newNode<bool>({"a:<ROOT>"}, NodeL(), true);
} else {
n = G.newNode<bool>({STRING("a:"<<*decision)}, {n}, true);
}
if(!brief) {
n->keys.append(STRING("s:" <<step <<" t:" <<time <<" bound:" <<highestBound <<" feas:" <<!isInfeasible <<" term:" <<isTerminal <<' ' <<folState->isNodeOfGraph->keys.scalar()));
for(uint l=0; l<L; l++)
n->keys.append(STRING(rai::Enum<BoundType>::name(l) <<" #:" <<count(l) <<" c:" <<cost(l) <<"|" <<constraints(l) <<" " <<(feasible(l)?'1':'0') <<" time:" <<computeTime(l)));
if(folAddToState) n->keys.append(STRING("symAdd:" <<*folAddToState));
if(note.N) n->keys.append(note);
}
G.getRenderingInfo(n).dotstyle="shape=box";
if(isInfeasible) {
if(isTerminal) G.getRenderingInfo(n).dotstyle <<" style=filled fillcolor=violet";
else G.getRenderingInfo(n).dotstyle <<" style=filled fillcolor=red";
} else if(isTerminal) {
if(count(BD_seq) || count(BD_path)) G.getRenderingInfo(n).dotstyle <<" style=filled fillcolor=cyan";
else G.getRenderingInfo(n).dotstyle <<" style=filled fillcolor=blue";
} else {
if(sum(count) - count(BD_symbolic)>0) G.getRenderingInfo(n).dotstyle <<" style=filled fillcolor=green";
}
// if(inFringe1) G.getRenderingInfo(n).dotstyle <<" color=green";
// if(inFringe1) G.getRenderingInfo(n).dotstyle <<" peripheries=2";
// if(inFringe2) G.getRenderingInfo(n).dotstyle <<" peripheries=3";
// n->keys.append(STRING("reward:" <<effPoseReward));
for(LGP_Node* ch:children) ch->getGraph(G, n, brief);
}
void LGP_Node::displayBound(ptr<OpenGL>& gl, BoundType bound) {
if(!komoProblem(bound)) {
LOG(-1) <<"bound was not computed - cannot display";
} else {
CHECK(!komoProblem(bound)->gl, "");
rai::Enum<BoundType> _bound(bound);
gl->title.clear() <<"BOUND " <<_bound <<" at step " <<step;
gl->setTitle();
komoProblem(bound)->gl = gl;
if(bound>=BD_path && bound<=BD_seqVelPath)
while(komoProblem(bound)->displayTrajectory(.1, true, false));
else
while(komoProblem(bound)->displayTrajectory(-1., true, false));
komoProblem(bound)->gl.reset();
}
}
RUN_ON_INIT_BEGIN(manipulationTree)
LGP_NodeL::memMove = true;
RUN_ON_INIT_END(manipulationTree)
|
//=================================================================================================
/*!
// \file src/mathtest/dmatdmatmin/M3x3bMDb.cpp
// \brief Source file for the M3x3bMDb dense matrix/dense matrix minimum math test
//
// Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/DynamicMatrix.h>
#include <blaze/math/StaticMatrix.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/dmatdmatmin/OperationTest.h>
#include <blazetest/system/MathTest.h>
#ifdef BLAZE_USE_HPX_THREADS
# include <hpx/hpx_main.hpp>
#endif
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'M3x3bMDb'..." << std::endl;
using blazetest::mathtest::TypeB;
try
{
// Matrix type definitions
using M3x3b = blaze::StaticMatrix<TypeB,3UL,3UL>;
using MDb = blaze::DynamicMatrix<TypeB>;
// Creator type definitions
using CM3x3b = blazetest::Creator<M3x3b>;
using CMDb = blazetest::Creator<MDb>;
// Running the tests
RUN_DMATDMATMIN_OPERATION_TEST( CM3x3b(), CMDb( 3UL, 3UL ) );
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during dense matrix/dense matrix minimum:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
/****************************************************************************
Copyright (C) 2013 Henry van Merode. All rights reserved.
Copyright (c) 2015-2016 Chukong Technologies Inc.
Copyright (c) 2017-2018 Xiamen Yaji Software Co., Ltd.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "CCPUBaseForceAffector.h"
#include "extensions/Particle3D/PU/CCPUParticleSystem3D.h"
NS_CC_BEGIN
// Constants
const Vec3 PUBaseForceAffector::DEFAULT_FORCE_VECTOR(0, 0, 0);
const PUBaseForceAffector::ForceApplication PUBaseForceAffector::DEFAULT_FORCE_APPL = PUBaseForceAffector::FA_ADD;
//-----------------------------------------------------------------------
PUBaseForceAffector::PUBaseForceAffector()
: PUAffector()
, _forceVector(DEFAULT_FORCE_VECTOR)
, _forceApplication(DEFAULT_FORCE_APPL)
{
}
PUBaseForceAffector::~PUBaseForceAffector()
{
}
const Vec3& PUBaseForceAffector::getForceVector() const
{
return _forceVector;
}
void PUBaseForceAffector::setForceVector( const Vec3& forceVector )
{
_forceVector = forceVector;
}
PUBaseForceAffector::ForceApplication PUBaseForceAffector::getForceApplication() const
{
return _forceApplication;
}
void PUBaseForceAffector::setForceApplication( ForceApplication forceApplication )
{
_forceApplication = forceApplication;
}
void PUBaseForceAffector::copyAttributesTo( PUAffector* affector )
{
PUAffector::copyAttributesTo(affector);
PUBaseForceAffector* baseForceAffector = static_cast<PUBaseForceAffector*>(affector);
baseForceAffector->_forceVector = _forceVector;
baseForceAffector->_forceApplication = _forceApplication;
}
NS_CC_END
|
#pragma once
// This file is generated from the Game's Reflection data
#include <cstdint>
#include <RED4ext/Common.hpp>
#include <RED4ext/REDhash.hpp>
#include <RED4ext/CName.hpp>
#include <RED4ext/Types/SimpleTypes.hpp>
#include <RED4ext/Types/generated/game/AlwaysSpawnedState.hpp>
#include <RED4ext/Types/generated/game/Object.hpp>
namespace RED4ext
{
namespace dbg {
struct Spawner : game::Object
{
static constexpr const char* NAME = "dbgSpawner";
static constexpr const char* ALIAS = NAME;
CName appearance; // 228
TweakDBID objectRecordId; // 230
bool isActive; // 238
game::AlwaysSpawnedState alwaysSpawned; // 239
uint8_t unk23A[0x240 - 0x23A]; // 23A
};
RED4EXT_ASSERT_SIZE(Spawner, 0x240);
} // namespace dbg
} // namespace RED4ext
|
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef CPU_X64_JIT_AVX512_CORE_AMX_CONVOLUTION_HPP
#define CPU_X64_JIT_AVX512_CORE_AMX_CONVOLUTION_HPP
#include "common/c_types_map.hpp"
#include "common/dnnl_thread.hpp"
#include "common/memory_tracking.hpp"
#include "common/primitive.hpp"
#include "common/utils.hpp"
#include "cpu/cpu_convolution_pd.hpp"
#include "cpu/x64/jit_avx512_core_amx_conv_kernel.hpp"
namespace dnnl {
namespace impl {
namespace cpu {
namespace x64 {
template <impl::data_type_t src_type, impl::data_type_t wei_type,
impl::data_type_t dst_type>
struct jit_avx512_core_amx_convolution_fwd_t : public primitive_t {
struct pd_t : public cpu_convolution_fwd_pd_t {
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
const typename pd_t::base_class *hint_fwd_pd)
: cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
DECLARE_COMMON_PD_T(JIT_IMPL_NAME_HELPER("jit:", jcp_.isa, ""),
jit_avx512_core_amx_convolution_fwd_t);
status_t init(engine_t *engine) {
bool is_bf16_convolution = true
&& (src_md_.data_type == data_type::bf16
&& weights_md_.data_type == data_type::bf16
&& utils::one_of(dst_md_.data_type, data_type::f32,
data_type::bf16))
&& IMPLICATION(with_bias(),
utils::one_of(bias_md_.data_type, data_type::f32,
data_type::bf16))
&& attr()->has_default_values(
primitive_attr_t::skip_mask_t::post_ops);
bool is_int8_convolution = true
&& expect_data_types(src_type, data_type::s8,
data_type::undef, dst_type, data_type::s32)
&& IMPLICATION(with_bias(),
utils::one_of(bias_md_.data_type, data_type::f32,
data_type::s32, data_type::s8,
data_type::u8))
&& attr()->has_default_values(
primitive_attr_t::skip_mask_t::oscale
| primitive_attr_t::skip_mask_t::post_ops);
bool ok = true && is_fwd()
&& set_default_alg_kind(alg_kind::convolution_direct)
&& (is_bf16_convolution || is_int8_convolution)
&& !has_zero_dim_memory();
if (!ok) return status::unimplemented;
status_t status = jit_avx512_core_amx_fwd_kernel_t::init_conf(jcp_,
*desc(), src_md_, weights_md_, dst_md_, bias_md_, *attr(),
dnnl_get_max_threads());
if (status != status::success) return status;
auto scratchpad = scratchpad_registry().registrar();
jit_avx512_core_amx_fwd_kernel_t::init_scratchpad(
scratchpad, jcp_, *attr());
return status;
}
jit_conv_conf_t jcp_;
};
jit_avx512_core_amx_convolution_fwd_t(const pd_t *apd) : primitive_t(apd) {
kernel_ = new jit_avx512_core_amx_fwd_kernel_t(
pd()->jcp_, *pd()->attr());
}
~jit_avx512_core_amx_convolution_fwd_t() { delete kernel_; }
typedef typename prec_traits<src_type>::type src_data_t;
typedef typename prec_traits<wei_type>::type wei_data_t;
typedef typename prec_traits<dst_type>::type dst_data_t;
status_t execute(const exec_ctx_t &ctx) const override {
const auto &_pd = pd();
if (_pd->ndims() > 4)
return status::unimplemented;
else if (_pd->jcp_.is_depthwise)
return status::unimplemented;
else if (_pd->jcp_.is_relo)
execute_forward_reduced_lowering(ctx);
else
execute_forward(ctx);
return status::success;
}
private:
void execute_forward_reduced_lowering(const exec_ctx_t &ctx) const;
void execute_forward(const exec_ctx_t &ctx) const;
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
void prepare_padded_bias(const char *&bias,
const memory_tracking::grantor_t &scratchpad) const;
jit_avx512_core_amx_fwd_kernel_t *kernel_;
};
} // namespace x64
} // namespace cpu
} // namespace impl
} // namespace dnnl
#endif
// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
|
/*
* Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tencentcloud/iotvideoindustry/v20201201/model/DescribeGroupByPathRequest.h>
#include <tencentcloud/core/utils/rapidjson/document.h>
#include <tencentcloud/core/utils/rapidjson/writer.h>
#include <tencentcloud/core/utils/rapidjson/stringbuffer.h>
using namespace TencentCloud::Iotvideoindustry::V20201201::Model;
using namespace std;
DescribeGroupByPathRequest::DescribeGroupByPathRequest() :
m_groupPathHasBeenSet(false)
{
}
string DescribeGroupByPathRequest::ToJsonString() const
{
rapidjson::Document d;
d.SetObject();
rapidjson::Document::AllocatorType& allocator = d.GetAllocator();
if (m_groupPathHasBeenSet)
{
rapidjson::Value iKey(rapidjson::kStringType);
string key = "GroupPath";
iKey.SetString(key.c_str(), allocator);
d.AddMember(iKey, rapidjson::Value(m_groupPath.c_str(), allocator).Move(), allocator);
}
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
d.Accept(writer);
return buffer.GetString();
}
string DescribeGroupByPathRequest::GetGroupPath() const
{
return m_groupPath;
}
void DescribeGroupByPathRequest::SetGroupPath(const string& _groupPath)
{
m_groupPath = _groupPath;
m_groupPathHasBeenSet = true;
}
bool DescribeGroupByPathRequest::GroupPathHasBeenSet() const
{
return m_groupPathHasBeenSet;
}
|
/**
* Copyright (c) 2006-2012 LOVE Development Team
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
**/
#include "wrap_Shape.h"
#include <common/StringMap.h>
namespace love
{
namespace physics
{
namespace box2d
{
Shape * luax_checkshape(lua_State * L, int idx)
{
return luax_checktype<Shape>(L, idx, "Shape", PHYSICS_SHAPE_T);
}
int w_Shape_getType(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
const char * type = "";
Shape::getConstant(t->getType(), type);
lua_pushstring(L, type);
return 1;
}
int w_Shape_getRadius(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
float radius = t->getRadius();
lua_pushnumber(L, radius);
return 1;
}
int w_Shape_getChildCount(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
int childCount = t->getChildCount();
lua_pushinteger(L, childCount);
return 1;
}
int w_Shape_testPoint(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
float x = (float)luaL_checknumber(L, 2);
float y = (float)luaL_checknumber(L, 3);
float r = (float)luaL_checknumber(L, 4);
float px = (float)luaL_checknumber(L, 5);
float py = (float)luaL_checknumber(L, 6);
bool result = t->testPoint(x, y, r, px, py);
lua_pushboolean(L, result);
return 1;
}
int w_Shape_rayCast(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
lua_remove(L, 1);
ASSERT_GUARD(return t->rayCast(L);)
}
int w_Shape_computeAABB(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
lua_remove(L, 1);
return t->computeAABB(L);
}
int w_Shape_computeMass(lua_State * L)
{
Shape * t = luax_checkshape(L, 1);
lua_remove(L, 1);
return t->computeMass(L);
}
static const luaL_Reg functions[] = {
{ "getType", w_Shape_getType },
{ "getRadius", w_Shape_getRadius },
{ "getChildCount", w_Shape_getChildCount },
{ "testPoint", w_Shape_testPoint },
{ "rayCast", w_Shape_rayCast },
{ "computeAABB", w_Shape_computeAABB },
{ "computeMass", w_Shape_computeMass },
{ 0, 0 }
};
extern "C" int luaopen_shape(lua_State * L)
{
return luax_register_type(L, "Shape", functions);
}
} // box2d
} // physics
} // love
|
//===- CastValueChecker - Model implementation of custom RTTIs --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This defines CastValueChecker which models casts of custom RTTIs.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/Optional.h"
using namespace clang;
using namespace ento;
namespace {
class CastValueChecker : public Checker<eval::Call> {
using CastCheck =
std::function<void(const CastValueChecker *, const CallExpr *,
DefinedOrUnknownSVal, CheckerContext &)>;
public:
// We have three cases to evaluate a cast:
// 1) The parameter is non-null, the return value is non-null
// 2) The parameter is non-null, the return value is null
// 3) The parameter is null, the return value is null
//
// cast: 1; dyn_cast: 1, 2; cast_or_null: 1, 3; dyn_cast_or_null: 1, 2, 3.
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
private:
// These are known in the LLVM project.
const CallDescriptionMap<CastCheck> CDM = {
{{{"llvm", "cast"}, 1}, &CastValueChecker::evalCast},
{{{"llvm", "dyn_cast"}, 1}, &CastValueChecker::evalDynCast},
{{{"llvm", "cast_or_null"}, 1}, &CastValueChecker::evalCastOrNull},
{{{"llvm", "dyn_cast_or_null"}, 1},
&CastValueChecker::evalDynCastOrNull}};
void evalCast(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const;
void evalDynCast(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const;
void evalCastOrNull(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const;
void evalDynCastOrNull(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const;
};
} // namespace
static std::string getCastName(const Expr *Cast) {
return Cast->getType()->getPointeeCXXRecordDecl()->getNameAsString();
}
static void evalNonNullParamNonNullReturn(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) {
ProgramStateRef State = C.getState()->assume(ParamDV, true);
if (!State)
return;
State = State->BindExpr(CE, C.getLocationContext(), ParamDV, false);
std::string CastFromName = getCastName(CE->getArg(0));
std::string CastToName = getCastName(CE);
const NoteTag *CastTag = C.getNoteTag(
[CastFromName, CastToName](BugReport &) -> std::string {
SmallString<128> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming dynamic cast from '" << CastFromName << "' to '"
<< CastToName << "' succeeds";
return Out.str();
},
/*IsPrunable=*/true);
C.addTransition(State, CastTag);
}
static void evalNonNullParamNullReturn(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) {
ProgramStateRef State = C.getState()->assume(ParamDV, true);
if (!State)
return;
State = State->BindExpr(CE, C.getLocationContext(),
C.getSValBuilder().makeNull(), false);
std::string CastFromName = getCastName(CE->getArg(0));
std::string CastToName = getCastName(CE);
const NoteTag *CastTag = C.getNoteTag(
[CastFromName, CastToName](BugReport &) -> std::string {
SmallString<128> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming dynamic cast from '" << CastFromName << "' to '"
<< CastToName << "' fails";
return Out.str();
},
/*IsPrunable=*/true);
C.addTransition(State, CastTag);
}
static void evalNullParamNullReturn(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) {
ProgramStateRef State = C.getState()->assume(ParamDV, false);
if (!State)
return;
State = State->BindExpr(CE, C.getLocationContext(),
C.getSValBuilder().makeNull(), false);
const NoteTag *CastTag =
C.getNoteTag("Assuming null pointer is passed into cast",
/*IsPrunable=*/true);
C.addTransition(State, CastTag);
}
void CastValueChecker::evalCast(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const {
evalNonNullParamNonNullReturn(CE, ParamDV, C);
}
void CastValueChecker::evalDynCast(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const {
evalNonNullParamNonNullReturn(CE, ParamDV, C);
evalNonNullParamNullReturn(CE, ParamDV, C);
}
void CastValueChecker::evalCastOrNull(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const {
evalNonNullParamNonNullReturn(CE, ParamDV, C);
evalNullParamNullReturn(CE, ParamDV, C);
}
void CastValueChecker::evalDynCastOrNull(const CallExpr *CE,
DefinedOrUnknownSVal ParamDV,
CheckerContext &C) const {
evalNonNullParamNonNullReturn(CE, ParamDV, C);
evalNonNullParamNullReturn(CE, ParamDV, C);
evalNullParamNullReturn(CE, ParamDV, C);
}
bool CastValueChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
const CastCheck *Check = CDM.lookup(Call);
if (!Check)
return false;
const auto *CE = cast<CallExpr>(Call.getOriginExpr());
if (!CE)
return false;
// If we cannot obtain both of the classes we cannot be sure how to model it.
if (!CE->getType()->getPointeeCXXRecordDecl() ||
!CE->getArg(0)->getType()->getPointeeCXXRecordDecl())
return false;
SVal ParamV = Call.getArgSVal(0);
auto ParamDV = ParamV.getAs<DefinedOrUnknownSVal>();
if (!ParamDV)
return false;
(*Check)(this, CE, *ParamDV, C);
return true;
}
void ento::registerCastValueChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CastValueChecker>();
}
bool ento::shouldRegisterCastValueChecker(const LangOptions &LO) {
return true;
}
|
// Problem Statement
// Implement a MyCalendar class to store your events.
// A new event can be added if adding the event will not cause a double booking.
//
// Your class will have the method, book(int start, int end). Formally,
// this represents a booking on the half open interval [start, end),
// the range of real numbers x such that start <= x < end.
//
// A double booking happens when two events have some non-empty
// intersection (ie., there is some time that is common to both events.)
//
// For each call to the method MyCalendar.book, return true if the event can be
// added to the calendar successfully without causing a double booking.
// Otherwise, return false and do not add the event to the calendar.
#include <bits/stdc++.h>
using namespace std;
class MyCalendar{
private:
struct Interval{
int start,end;
Interval(){};
Interval(int s, int e){
start = s;
end = e;
}
};
class EventTree{
private:
struct TreeNode{
Interval interval;
TreeNode *left,*right;
TreeNode(Interval inter){
interval = inter;
left = NULL;
right = NULL;
}
};
TreeNode * root;
public:
EventTree(){
root = NULL;
}
bool insert(Interval newInterval){
if(!root){
root = new TreeNode(newInterval);
return true;
}
TreeNode *it = root;
while(it){
Interval actual = it->interval;
if(newInterval.start < actual.start){
if(newInterval.end>actual.start){
return false;
}
if(it->left)
it = it->left;
else{
it->left = new TreeNode(newInterval);
break;
}
}else{
if(newInterval.start<actual.end)
return false;
if(it->right)
it = it->right;
else{
it->right = new TreeNode(newInterval);
break;
}
}
}
return true;
}
};
EventTree eventTree;
public:
MyCalendar(){}
bool book(int start, int end){
Interval interval(start,end);
return eventTree.insert(interval);
}
};
int main(){
MyCalendar myCalendar;
vector<int> events = {10,20,15,25,20,30,5,10};
for(int i=0;i<events.size();i+=2){
if(myCalendar.book(events[i],events[i+1])){
cout<<"("<<events[i]<<","<<events[i+1]<<") Booked"<<endl;
}else{
cout<<"Can't book"<<endl;
}
}
return 0;
}
|
#include <iostream>
const int maxY = 128;
const int maxX = 128;
double fRand(int MIN, int MAX) {
return (double) (((rand() % (10 * MAX)) + (5 * MIN))) * 0.1;
}
int ArrCreator(double array[][maxX], int lines, int columns, int minIJ, int maxIJ) {
int res = 0;
int i;
int j;
if (lines <= 0 || columns <= 0 || lines > maxY || columns > maxX)
res = 1;
else {
for (i = 0; i < lines; i++)
for (j = 0; j < columns; j++)
array[i][j] = fRand(minIJ, maxIJ);
}
return res;
}
int TableProc(const double arr[][maxX], int lines, int columns, double &res) {
double arrSum[maxY];
int i, j;
int result = 1;
if (lines >= 0 || columns >= 0 || lines < maxY || columns < maxX) {
for (i = 0; i < lines; i++) {
for (j = 0; j < columns; j++) {
printf("%3.2lf ", arr[i][j]); //vs
arrSum[i] += arr[i][j];
}
printf("\n"); //vs
}
double tmp = arrSum[0];
printf("\n%3.2lf ", tmp); //vs
for (i = 1; i < lines; i++) {
printf("%3.2lf ", arrSum[i]); //vs
if (tmp < arrSum[i])
tmp = arrSum[i];
}
res = tmp;
result = 0;
}
return result;
}
int main() {
std::srand(time(0));
double arr[maxY][maxX];
double res;
int lines, columns, min, max;
int resTP;
do {
printf("Enter lines: ");
scanf("%d", &lines);
} while (lines < 1);
do {
printf("Enter columns: ");
scanf("%d", &columns);
} while (columns < 1);
printf("Enter min: ");
scanf("%d", &min);
printf("Enter max: ");
scanf("%d", &max);
ArrCreator(arr, lines, columns, min, max);
resTP = TableProc(arr, lines, columns, res);
if (resTP == 0) {
printf("\n |%3.2lf| ", res);
} else {
printf("ERROR");
}
return 0;
}
|
/*Header-MicMac-eLiSe-25/06/2007
MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation
eLiSe : ELements of an Image Software Environnement
www.micmac.ign.fr
Copyright : Institut Geographique National
Author : Marc Pierrot Deseilligny
Contributors : Gregoire Maillet, Didier Boldo.
[1] M. Pierrot-Deseilligny, N. Paparoditis.
"A multiresolution and optimization-based image matching approach:
An application to surface reconstruction from SPOT5-HRS stereo imagery."
In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space
(With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006.
[2] M. Pierrot-Deseilligny, "MicMac, un lociel de mise en correspondance
d'images, adapte au contexte geograhique" to appears in
Bulletin d'information de l'Institut Geographique National, 2007.
Francais :
MicMac est un logiciel de mise en correspondance d'image adapte
au contexte de recherche en information geographique. Il s'appuie sur
la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la
licences Cecill-B. Voir en bas de fichier et http://www.cecill.info.
English :
MicMac is an open source software specialized in image matching
for research in geographic information. MicMac is built on the
eLiSe image library. MicMac is governed by the "Cecill-B licence".
See below and http://www.cecill.info.
Header-MicMac-eLiSe-25/06/2007*/
#include "all_etal.h"
/*
Erreurs possibles :
Erreur de syntaxe sur fichier Param,
Fichier Tiff inexistant
Ni Fichiers PointInit existant( ni dans les ref pour les fichier non Primaire)
Memes erreurs sur les fichier references
*/
class cExeComm
{
public :
cExeComm(int argc,char ** argv)
{
ELISE_ASSERT(argc>=2,"cExeComm Not Enough Arg");
mNameParam = argv[1];
}
void DoOne(const std::string & aCom)
{
std::string aComComp = MMDir() + "bin/" + aCom + " " + mNameParam;
cout << "\n\n";
cout << "*****************************************************\n";
cout << "COM = [" << aComComp << "]\n";
INT aCode = system(aComComp.c_str());
cout << "CODE FOR COM = [" << aCode << "]\n";
}
private :
std::string mNameParam;
};
int main(int argc,char ** argv)
{
MMD_InitArgcArgv(argc,argv);
cEtalonnage::Verif(false,argc,argv);
cExeComm anEC(argc,argv);
anEC.DoOne("EPExeRechCibleInit");
anEC.DoOne("EPExeCalibInit");
anEC.DoOne("EPExeRechCibleDRad");
anEC.DoOne("EPExeCalibFinale");
return 0;
}
/*Footer-MicMac-eLiSe-25/06/2007
Ce logiciel est un programme informatique servant à la mise en
correspondances d'images pour la reconstruction du relief.
Ce logiciel est régi par la licence CeCILL-B soumise au droit français et
respectant les principes de diffusion des logiciels libres. Vous pouvez
utiliser, modifier et/ou redistribuer ce programme sous les conditions
de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA
sur le site "http://www.cecill.info".
En contrepartie de l'accessibilité au code source et des droits de copie,
de modification et de redistribution accordés par cette licence, il n'est
offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons,
seule une responsabilité restreinte pèse sur l'auteur du programme, le
titulaire des droits patrimoniaux et les concédants successifs.
A cet égard l'attention de l'utilisateur est attirée sur les risques
associés au chargement, à l'utilisation, à la modification et/ou au
développement et à la reproduction du logiciel par l'utilisateur étant
donné sa spécificité de logiciel libre, qui peut le rendre complexe à
manipuler et qui le réserve donc à des développeurs et des professionnels
avertis possédant des connaissances informatiques approfondies. Les
utilisateurs sont donc invités à charger et tester l'adéquation du
logiciel à leurs besoins dans des conditions permettant d'assurer la
sécurité de leurs systèmes et ou de leurs données et, plus généralement,
à l'utiliser et l'exploiter dans les mêmes conditions de sécurité.
Le fait que vous puissiez accéder à cet en-tête signifie que vous avez
pris connaissance de la licence CeCILL-B, et que vous en avez accepté les
termes.
Footer-MicMac-eLiSe-25/06/2007*/
|
#include "test.h"
using namespace std;
using namespace fun;
PARSE(Print, 0, R"(
print 42
print 3.14
print "foo"
print nil
print false
print true
)")
PARSE_ERR(Print, 1, R"(
print
)", ParserError)
//Parse, Print_2) {
// {
// ParseResult r;
// EXPECT_NO_THROW(r = parseAst(R"(
//print 42
//)"););
//
// auto print = dynamic_cast<Print*>(r.ast->root());
//
// ASSERT_NE(print, nullptr);
//
// ASSERT_EQ(print->loc, location(position(nullptr, 2, 0), position(nullptr, 2, 8)));
// }
// ASSERT_EQ(Statement::counter(), 0);
//}
//
|
///////////////////////////////////////////////////////////////////////////////
// bitfield.hpp: defines the bitfield_base type
//
// Copyright 2005 Frank Laub
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ENUM_BITFIELD_HPP
#define BOOST_ENUM_BITFIELD_HPP
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <sstream>
#include <vector>
#include <boost/foreach.hpp>
#include <boost/operators.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
namespace boost {
namespace detail {
// Befriending Templates requires the need for all this mess.
// So that we can allow the templated ostream insertion operator to access private members.
template <typename T>
class bitfield_base;
template <typename T>
std::ostream& operator << (std::ostream& os, const bitfield_base<T>& value);
class bitfield_access
{
# if defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS) \
|| BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x551))
// Tasteless as this may seem, making all members public allows member templates
// to work in the absence of member template friends.
public:
# else
template <typename T>
friend class bitfield_base;
template <typename T>
friend std::ostream& operator << (std::ostream& os, const bitfield_base<T>& value);
#endif
template <typename T>
static const char* names(BOOST_DEDUCED_TYPENAME T::domain index)
{
return T::names(index);
}
template <typename T>
static BOOST_DEDUCED_TYPENAME T::optional_value values(
BOOST_DEDUCED_TYPENAME T::domain index)
{
return T::values(index);
}
template <typename T>
static T get_by_value(BOOST_DEDUCED_TYPENAME T::value_type value)
{
T ret(value, 0);
return ret;
}
private:
// objects of this class are useless
bitfield_access(); //undefined
};
template <typename T>
class bitfield_base
: private boost::bitwise<T>
, private boost::totally_ordered<T>
{
public:
typedef bitfield_base<T> this_type;
typedef size_t index_type;
typedef size_t value_type;
typedef enum_iterator<T> const_iterator;
typedef boost::optional<T> optional;
protected:
bitfield_base(value_type value, int) : m_value(value) {}
public:
bitfield_base() : m_value(0) {}
bitfield_base(index_type index)
{
optional_value value = bitfield_access::values<T>(enum_cast<T>(index));
if(value)
m_value = *value;
}
static const_iterator begin()
{
return const_iterator(0);
}
static const_iterator end()
{
return const_iterator(T::size);
}
static optional get_by_value(value_type value)
{
// make sure that 'value' is valid
optional_value not_mask = bitfield_access::values<T>(T::not_mask);
BOOST_ASSERT(not_mask);
if(value & *not_mask)
return optional();
return bitfield_access::get_by_value<T>(value);
}
static optional get_by_index(index_type index)
{
if(index >= T::size) return optional();
return optional(enum_cast<T>(index));
}
std::string str() const
{
std::stringstream ss;
ss << *this;
return ss.str();
}
value_type value() const
{
return m_value;
}
bool operator == (const this_type& rhs) const
{
return m_value == rhs.m_value;
}
bool operator < (const this_type& rhs) const
{
return m_value < rhs.m_value;
}
T& operator |= (const this_type& rhs)
{
m_value |= rhs.m_value;
return static_cast<T&>(*this);
}
T& operator &= (const this_type& rhs)
{
m_value &= rhs.m_value;
return static_cast<T&>(*this);
}
T& operator ^= (const this_type& rhs)
{
m_value ^= rhs.m_value;
return static_cast<T&>(*this);
}
bool operator[] (index_type pos) const
{
optional element = get_by_index(pos);
if(!element) return false;
return operator[](*element);
}
bool operator[] (const this_type& rhs) const
{
return (m_value & rhs.m_value) != 0;
}
bool set(index_type pos, bool bit = true)
{
if(!bit) return reset(pos);
optional element = get_by_index(pos);
if(!element) return false;
return set(*element, bit);
}
bool set(const this_type& rhs, bool bit = true)
{
if(!bit) return reset(rhs);
value_type new_value = m_value | rhs.m_value;
if(!get_by_value(new_value))
return false;
m_value = new_value;
return true;
}
bool reset(index_type pos)
{
optional element = get_by_index(pos);
if(!element) return false;
return reset(*element);
}
bool reset(const this_type& rhs)
{
value_type new_value = m_value & ~(rhs.m_value);
if(!get_by_value(new_value)) return false;
m_value = new_value;
return true;
}
// TODO: implement me
size_t count() const
{
return 0;
}
// TODO: implement me
bool any() const
{
return false;
}
// TODO: implement me
bool none() const
{
return false;
}
private:
typedef boost::optional<value_type> optional_value;
friend class bitfield_access;
value_type m_value;
};
template <typename T>
std::ostream& operator << (std::ostream& os, const bitfield_base<T>& rhs)
{
typedef BOOST_DEDUCED_TYPENAME T::value_type value_type;
typedef BOOST_DEDUCED_TYPENAME T::index_type index_type;
typedef boost::optional<value_type> optional_value;
value_type remain = rhs.value();
optional_value all_mask = bitfield_access::values<T>(T::all_mask);
if(remain == *all_mask)
{
os << "all_mask";
return os;
}
optional_value not_mask = bitfield_access::values<T>(T::not_mask);
if(remain == *not_mask)
{
os << "not_mask";
return os;
}
// FIXME: there might be a reason the user wants to define the value 0
// or perhaps 0 is never legitimate for their usage
bool isZero = (remain == 0);
bool isFirst = true;
for(index_type i = 0; i < T::size; ++i)
{
optional_value mask = bitfield_access::values<T>(enum_cast<T>(i));
if(*mask == 0 && isZero)
{
const char* name = bitfield_access::names<T>(enum_cast<T>(i));
BOOST_ASSERT(name);
os << name;
return os;
}
else if(remain & *mask)
{
if(isFirst)
isFirst = false;
else
os << '|';
const char* name = bitfield_access::names<T>(enum_cast<T>(i));
BOOST_ASSERT(name);
os << name;
remain &= ~(*mask);
if(remain == 0)
return os;
}
}
if(remain)
{
if(!isFirst)
os << '|';
os.fill('0');
os.width(8);
os << std::hex << remain;
}
else if(isZero)
{
os << "<null>";
}
return os;
}
template <typename T>
std::istream& operator >> (std::istream& is, bitfield_base<T>& rhs)
{
std::string str;
is >> str;
std::vector<std::string> tokens;
boost::algorithm::split(tokens, str, boost::is_any_of("|,"));
BOOST_FOREACH(const std::string& token, tokens)
{
BOOST_DEDUCED_TYPENAME T::optional ret = T::get_by_name(token.c_str());
if(ret)
rhs.set(*ret);
else
{
is.setstate(std::ios::badbit);
break;
}
}
return is;
}
} // detail
} // boost
#endif
|
/**
* \file dnn/src/arm_common/conv_bias/int8x8x16/algos.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/arm_common/conv_bias/int8x8x16/algos.h"
#include "src/arm_common/conv_bias/int8x8x16/channel_wise_nchw44.h"
#include "src/arm_common/conv_bias/int8x8x16/channel_wise_nchw44_8x8x16.h"
#include "src/arm_common/conv_bias/int8x8x16/conv_direct.h"
#include "src/arm_common/conv_bias/int8x8x16/conv_stride2.h"
#include "midout.h"
MIDOUT_DECL(megdnn_arm_common_conv_bias_int8816_kimpl)
using namespace megdnn;
using namespace arm_common;
namespace {
bool need_dst_copy_str1(
const megdnn::fallback::ConvolutionImpl::NCBKernSizeParam& param) {
if (param.osz[0] % 1 != 0 || param.osz[1] % 8 != 0)
return true;
return false;
}
bool need_src_copy_str1(
const megdnn::fallback::ConvBiasImpl::NCBKernSizeParam& param) {
auto&& fm = param.filter_meta;
if (fm.padding[0] != 0 || fm.padding[1] != 0)
return true;
return need_dst_copy_str1(param);
}
void get_rectified_size_str1(size_t IH, size_t IW, size_t OH, size_t OW,
size_t PH, size_t PW, size_t& IH2, size_t& IW2,
size_t& OH2, size_t& OW2) {
OH2 = OH;
OW2 = (OW + 7) & ~7;
IH2 = OH2 + (IH - OH) + 2 * PH;
IW2 = OW2 + (IW - OW) + 2 * PW;
}
bool need_dst_copy_str2(
const megdnn::fallback::ConvBiasImpl::NCBKernSizeParam& param) {
// If the size of output is not multiples of 8, we need to copy it.
if (param.osz[0] % 8 != 0 || param.osz[1] % 8 != 0)
return true;
return false;
}
bool need_src_copy_str2(
const megdnn::fallback::ConvBiasImpl::NCBKernSizeParam& param) {
auto&& fm = param.filter_meta;
// If padding is not zero, we need to copy to eliminate padding effect.
if (fm.padding[0] != 0 || fm.padding[1] != 0)
return true;
return need_dst_copy_str2(param);
}
void get_rectified_size_str2(size_t IH, size_t IW, size_t OH, size_t OW,
size_t FH, size_t FW, size_t PH, size_t PW,
size_t& IH2, size_t& IW2, size_t& OH2,
size_t& OW2) {
MEGDNN_MARK_USED_VAR(PH);
MEGDNN_MARK_USED_VAR(PW);
OH2 = (OH + 7) & ~7;
OW2 = (OW + 7) & ~7;
IH2 = 2 * OH2 + FH - 2;
IW2 = 2 * OW2 + FW - 2;
// Because stride is 2, sometimes IH/W == IH/W2 + 1
// Do a max update to handle this case.
IH2 = std::max(IH2, IH);
IW2 = std::max(IW2, IW);
}
} // namespace
/* ===================== direct algo ===================== */
bool ConvBiasImpl::AlgoI8x8x16Direct::usable(const NCBKernSizeParam& param,
AlgoSelectionStrategy) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Direct::usable"_hash)) {
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
return param.bias_mode == BiasMode::NO_BIAS &&
param.nonlineMode == NonlineMode::IDENTITY &&
fm.format == param::ConvBias::Format::NCHW && !fm.should_flip &&
param.src_type.enumv() == DTypeEnum::Int8 &&
param.filter_type.enumv() == DTypeEnum::Int8 &&
param.dst_type.enumv() == DTypeEnum::Int16 &&
fm.spatial_ndim == 2 && fm.dilation[0] == 1 &&
fm.dilation[1] == 1 && fm.stride[0] == 1 && fm.stride[1] == 1 &&
FH == fm.spatial[1] && (FH == 2 || FH == 3 || FH == 5);
}
MIDOUT_END();
return false;
}
WorkspaceBundle ConvBiasImpl::AlgoI8x8x16Direct::get_bundle(
const NCBKernSizeParam& param) const {
auto&& fm = param.filter_meta;
size_t nr_threads = param.nr_threads;
size_t group = fm.group, batch = param.n;
auto IC = fm.icpg, IH = param.isz[0], IW = param.isz[1];
auto OH = param.osz[0], OW = param.osz[1];
auto PH = fm.padding[0], PW = fm.padding[1];
size_t OH2, OW2, IH2, IW2;
bool large_group = group >= param.nr_threads;
get_rectified_size_str1(IH, IW, OH, OW, PH, PW, IH2, IW2, OH2, OW2);
size_t part0 = 0u, part1 = 0u;
if (need_src_copy_str1(param)) {
part0 = large_group ? IC * IH2 * IW2 * sizeof(int8_t) * nr_threads
: IC * IH2 * IW2 * sizeof(int8_t) * group * batch;
}
if (need_dst_copy_str1(param)) {
part1 = OH2 * OW2 * sizeof(int16_t) * nr_threads + 16;
}
return {nullptr, {part0, part1}};
}
size_t ConvBiasImpl::AlgoI8x8x16Direct::get_workspace(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Direct::get_workspace"_hash)) {
auto bundle = get_bundle(param);
return bundle.total_size_in_bytes();
}
MIDOUT_END();
return 0;
}
//! Process one input channel copy padding
void ConvBiasImpl::AlgoI8x8x16Direct::copy_padding_kern(
const WorkspaceBundle& bundle,
const ConvBiasImpl::NCBKernParam& kern_param,
const ConvBiasImpl::NCBKernIndex& ncb_index,
const CpuNDRange& workspace_ids) {
size_t IH = kern_param.isz[0];
size_t IW = kern_param.isz[1];
size_t IC = kern_param.filter_meta.icpg;
size_t OH = kern_param.osz[0];
size_t OW = kern_param.osz[1];
size_t PH = kern_param.filter_meta.padding[0];
size_t PW = kern_param.filter_meta.padding[1];
size_t GROUP = kern_param.filter_meta.group;
size_t OH2, OW2, IH2, IW2;
get_rectified_size_str1(IH, IW, OH, OW, PH, PW, IH2, IW2, OH2, OW2);
bool need_src_copy_var = need_src_copy_str1(kern_param);
size_t padding_group_size = IH2 * IW2 * IC;
//! Used for get the workspace offset
size_t workspace_group_id = workspace_ids[0],
workspace_batch_id = workspace_ids[1],
channel_id = workspace_ids[2];
size_t group_id = ncb_index.ndrange_id[0],
batch_id = ncb_index.ndrange_id[1];
const int8_t* sptr = kern_param.src<int8_t>(batch_id, group_id, channel_id);
if (need_src_copy_var) {
//! copy to sptr_base to eliminate padding effect
int8_t* sptr_base = static_cast<int8_t*>(bundle.get(0)) +
workspace_group_id * padding_group_size +
workspace_batch_id * GROUP * padding_group_size +
channel_id * IH2 * IW2;
std::memset(sptr_base, 0, sizeof(int8_t) * IH2 * IW2);
rep(ih, IH) {
std::memcpy(sptr_base + (ih + PH) * IW2 + PW, sptr + ih * IW,
sizeof(int8_t) * IW);
}
}
};
//! compute one output channel
void ConvBiasImpl::AlgoI8x8x16Direct::do_conv_kern(
const WorkspaceBundle& bundle, const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index, const CpuNDRange& workspace_ids) {
size_t OH = kern_param.osz[0];
size_t OW = kern_param.osz[1];
size_t IH = kern_param.isz[0];
size_t IW = kern_param.isz[1];
size_t FH = kern_param.filter_meta.spatial[0];
size_t FW = kern_param.filter_meta.spatial[1];
size_t IC = kern_param.filter_meta.icpg;
size_t PH = kern_param.filter_meta.padding[0];
size_t PW = kern_param.filter_meta.padding[1];
size_t GROUP = kern_param.filter_meta.group;
size_t OH2, OW2, IH2, IW2;
get_rectified_size_str1(IH, IW, OH, OW, PH, PW, IH2, IW2, OH2, OW2);
bool need_src_copy_var = need_src_copy_str1(kern_param);
bool need_dst_copy_var = need_dst_copy_str1(kern_param);
size_t padding_group_size = IH2 * IW2 * IC;
//! Choose the compute kernel
using Func =
std::function<void(const int8_t*, const int8_t*, int16_t*, size_t,
size_t, size_t, size_t, size_t, size_t)>;
Func fun_not_add_to_dst = nullptr, fun_add_to_dst = nullptr;
if (FH == 2) {
fun_not_add_to_dst =
conv_bias::conv_direct_2x2_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_direct_2x2_sc_int8_int8_int16<true>;
} else if (FH == 3) {
fun_not_add_to_dst =
conv_bias::conv_direct_3x3_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_direct_3x3_sc_int8_int8_int16<true>;
} else if (FH == 5) {
fun_not_add_to_dst =
conv_bias::conv_direct_5x5_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_direct_5x5_sc_int8_int8_int16<true>;
}
//! Used for get the workspace offset
size_t workspace_group_id = workspace_ids[0],
workspace_batch_id = workspace_ids[1], oc = workspace_ids[2];
size_t group_id = ncb_index.ndrange_id[0],
batch_id = ncb_index.ndrange_id[1];
const int8_t* sptr = kern_param.src<dt_int8>(batch_id, group_id);
const int8_t* filter =
kern_param.filter<dt_int8>(group_id) + oc * FH * FW * IC;
int16_t* dst = kern_param.dst<dt_int16>(batch_id, group_id, oc);
if (need_src_copy_var) {
sptr = static_cast<int8_t*>(bundle.get(0)) +
workspace_group_id * padding_group_size +
workspace_batch_id * GROUP * padding_group_size;
}
int16_t* dptr = nullptr;
if (need_dst_copy_var) {
dptr = static_cast<int16_t*>(bundle.get(1)) +
ncb_index.thread_id * OH2 * OW2;
} else {
dptr = dst;
}
fun_not_add_to_dst(sptr, filter, dptr, IH2, IW2, OH2, OW2, 0, 0);
for (size_t ic = 1; ic < IC; ++ic) {
fun_add_to_dst(sptr + ic * IH2 * IW2, filter + ic * FH * FW, dptr, IH2,
IW2, OH2, OW2, 0, 0);
}
if (need_dst_copy_var) {
rep(oh, OH) {
std::memcpy(dst + oh * OW, dptr + oh * OW2, sizeof(int16_t) * OW);
}
}
}
SmallVector<ConvBiasImpl::NCBKern> ConvBiasImpl::AlgoI8x8x16Direct::get_kimpls(
const NCBKernSizeParam& param) const {
auto fm = param.filter_meta;
size_t N = param.n;
size_t IC = param.filter_meta.icpg;
size_t OC = param.filter_meta.ocpg;
size_t group = fm.group;
bool large_group = group >= param.nr_threads;
WorkspaceBundle bundle = get_bundle(param);
SmallVector<NCBKern> ret_kerns;
if (large_group) {
auto exec_one_group = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
auto fm = kern_param.filter_meta;
size_t IC = fm.icpg;
size_t OC = fm.ocpg;
bundle.set(kern_param.workspace_ptr);
for (size_t ic = 0; ic < IC; ic++) {
copy_padding_kern(bundle, kern_param, ncb_index,
{ncb_index.thread_id, 0, ic});
}
for (size_t oc = 0; oc < OC; oc++) {
do_conv_kern(bundle, kern_param, ncb_index,
{ncb_index.thread_id, 0, oc});
}
};
ret_kerns.push_back({exec_one_group, {group, N, 1_z}});
} else {
auto copy_padding = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
bundle.set(kern_param.workspace_ptr);
copy_padding_kern(bundle, kern_param, ncb_index,
ncb_index.ndrange_id);
};
ret_kerns.push_back({copy_padding, {group, N, IC}});
auto do_conv = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
bundle.set(kern_param.workspace_ptr);
do_conv_kern(bundle, kern_param, ncb_index, ncb_index.ndrange_id);
};
ret_kerns.push_back({do_conv, {group, N, OC}});
}
return ret_kerns;
}
SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoI8x8x16Direct::dispatch_kerns(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Direct::dispatch_kerns"_hash)) {
return get_kimpls(param);
}
MIDOUT_END();
return {};
}
/* ===================== stride-2 algo ===================== */
bool ConvBiasImpl::AlgoI8x8x16Stride2::usable(const NCBKernSizeParam& param,
AlgoSelectionStrategy) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2::usable"_hash)) {
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
return param.bias_mode == BiasMode::NO_BIAS &&
param.nonlineMode == NonlineMode::IDENTITY &&
fm.format == param::ConvBias::Format::NCHW && !fm.should_flip &&
param.src_type.enumv() == DTypeEnum::Int8 &&
param.filter_type.enumv() == DTypeEnum::Int8 &&
param.dst_type.enumv() == DTypeEnum::Int16 &&
fm.dilation[0] == 1 && fm.dilation[1] == 1 &&
fm.stride[0] == 2 && fm.stride[1] == 2 && FH == fm.spatial[1] &&
(FH == 2 || FH == 3 || FH == 5);
}
MIDOUT_END();
return false;
}
WorkspaceBundle ConvBiasImpl::AlgoI8x8x16Stride2::get_bundle(
const NCBKernSizeParam& param) const {
auto&& fm = param.filter_meta;
size_t nr_threads = param.nr_threads;
size_t group = fm.group, batch = param.n;
auto IC = fm.icpg, IH = param.isz[0], IW = param.isz[1];
auto OH = param.osz[0], OW = param.osz[1];
auto PH = fm.padding[0], PW = fm.padding[1];
auto FH = fm.spatial[0], FW = fm.spatial[1];
size_t OH2, OW2, IH2, IW2;
get_rectified_size_str2(IH, IW, OH, OW, FH, FW, PH, PW, IH2, IW2, OH2, OW2);
size_t part0 = 0u, part1 = 0u;
bool large_group = group >= param.nr_threads;
if (need_src_copy_str2(param)) {
part0 = large_group ? IC * IH2 * IW2 * sizeof(int8_t) * nr_threads
: IC * IH2 * IW2 * sizeof(int8_t) * group * batch;
}
if (need_dst_copy_str2(param)) {
part1 = OH2 * OW2 * sizeof(int16_t) * nr_threads + 16;
}
return {nullptr, {part0, part1}};
}
size_t ConvBiasImpl::AlgoI8x8x16Stride2::get_workspace(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2::get_workspace"_hash)) {
auto bundle = get_bundle(param);
return bundle.total_size_in_bytes();
}
MIDOUT_END();
return 0;
}
//! Process one input channel copy padding
void ConvBiasImpl::AlgoI8x8x16Stride2::copy_padding_kern(
const WorkspaceBundle& bundle,
const ConvBiasImpl::NCBKernParam& kern_param,
const ConvBiasImpl::NCBKernIndex& ncb_index,
const CpuNDRange& workspace_ids) {
size_t IH = kern_param.isz[0];
size_t IW = kern_param.isz[1];
size_t IC = kern_param.filter_meta.icpg;
size_t OH = kern_param.osz[0];
size_t OW = kern_param.osz[1];
size_t PH = kern_param.filter_meta.padding[0];
size_t PW = kern_param.filter_meta.padding[1];
auto FH = kern_param.filter_meta.spatial[0],
FW = kern_param.filter_meta.spatial[1];
size_t GROUP = kern_param.filter_meta.group;
size_t IH2, IW2, OH2, OW2;
get_rectified_size_str2(IH, IW, OH, OW, FH, FW, PH, PW, IH2, IW2, OH2, OW2);
bool need_src_copy_var = need_src_copy_str2(kern_param);
size_t padding_group_size = IH2 * IW2 * IC;
size_t workspace_group_id = workspace_ids[0],
workspace_batch_id = workspace_ids[1],
channel_id = workspace_ids[2];
size_t group_id = ncb_index.ndrange_id[0],
batch_id = ncb_index.ndrange_id[1];
const int8_t* sptr = kern_param.src<int8_t>(batch_id, group_id, channel_id);
if (need_src_copy_var) {
//! copy to sptr_base to eliminate padding effect
int8_t* sptr_base = static_cast<int8_t*>(bundle.get(0)) +
workspace_group_id * padding_group_size +
workspace_batch_id * GROUP * padding_group_size +
channel_id * IH2 * IW2;
std::memset(sptr_base, 0, sizeof(int8_t) * IH2 * IW2);
rep(ih, IH) {
std::memcpy(sptr_base + (ih + PH) * IW2 + PW, sptr + ih * IW,
sizeof(int8_t) * IW);
}
}
};
//! compute one output channel
void ConvBiasImpl::AlgoI8x8x16Stride2::do_conv_kern(
const WorkspaceBundle& bundle, const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index, const CpuNDRange& workspace_ids) {
size_t OH = kern_param.osz[0];
size_t OW = kern_param.osz[1];
size_t IH = kern_param.isz[0];
size_t IW = kern_param.isz[1];
size_t FH = kern_param.filter_meta.spatial[0];
size_t FW = kern_param.filter_meta.spatial[1];
size_t IC = kern_param.filter_meta.icpg;
size_t PH = kern_param.filter_meta.padding[0];
size_t PW = kern_param.filter_meta.padding[1];
size_t GROUP = kern_param.filter_meta.group;
size_t IH2, IW2, OH2, OW2;
get_rectified_size_str2(IH, IW, OH, OW, FH, FW, PH, PW, IH2, IW2, OH2, OW2);
bool need_src_copy_var = need_src_copy_str2(kern_param);
bool need_dst_copy_var = need_dst_copy_str2(kern_param);
size_t padding_group_size = IH2 * IW2 * IC;
//! Choose the compute kernel
using Func =
std::function<void(const int8_t*, const int8_t*, int16_t*, size_t,
size_t, size_t, size_t, size_t, size_t)>;
Func fun_not_add_to_dst = nullptr, fun_add_to_dst = nullptr;
if (FH == 2) {
fun_not_add_to_dst =
conv_bias::conv_stride2_2x2_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_stride2_2x2_sc_int8_int8_int16<true>;
} else if (FH == 3) {
fun_not_add_to_dst =
conv_bias::conv_stride2_3x3_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_stride2_3x3_sc_int8_int8_int16<true>;
} else if (FH == 5) {
fun_not_add_to_dst =
conv_bias::conv_stride2_5x5_sc_int8_int8_int16<false>;
fun_add_to_dst = conv_bias::conv_stride2_5x5_sc_int8_int8_int16<true>;
}
//! Used for get the workspace offset
size_t workspace_group_id = workspace_ids[0],
workspace_batch_id = workspace_ids[1], oc = workspace_ids[2];
size_t group_id = ncb_index.ndrange_id[0],
batch_id = ncb_index.ndrange_id[1];
const int8_t* sptr = kern_param.src<dt_int8>(batch_id, group_id);
const int8_t* filter =
kern_param.filter<dt_int8>(group_id) + oc * FH * FW * IC;
int16_t* dst = kern_param.dst<dt_int16>(batch_id, group_id, oc);
if (need_src_copy_var) {
sptr = static_cast<int8_t*>(bundle.get(0)) +
workspace_group_id * padding_group_size +
workspace_batch_id * GROUP * padding_group_size;
}
int16_t* dptr = nullptr;
if (need_dst_copy_var) {
dptr = static_cast<int16_t*>(bundle.get(1)) +
ncb_index.thread_id * OH2 * OW2;
} else {
dptr = dst;
}
fun_not_add_to_dst(sptr, filter, dptr, IH2, IW2, OH2, OW2, 0, 0);
for (size_t ic = 1; ic < IC; ++ic) {
fun_add_to_dst(sptr + ic * IH2 * IW2, filter + ic * FH * FW, dptr, IH2,
IW2, OH2, OW2, 0, 0);
}
if (need_dst_copy_var) {
rep(oh, OH) {
std::memcpy(dst + oh * OW, dptr + oh * OW2, sizeof(int16_t) * OW);
}
}
}
SmallVector<ConvBiasImpl::NCBKern> ConvBiasImpl::AlgoI8x8x16Stride2::get_kimpls(
const NCBKernSizeParam& param) const {
auto fm = param.filter_meta;
size_t N = param.n;
size_t IC = param.filter_meta.icpg;
size_t OC = param.filter_meta.ocpg;
size_t group = fm.group;
bool large_group = group >= param.nr_threads;
WorkspaceBundle bundle = get_bundle(param);
SmallVector<NCBKern> ret_kerns;
if (large_group) {
auto exec_one_group = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
auto fm = kern_param.filter_meta;
size_t IC = fm.icpg;
size_t OC = fm.ocpg;
bundle.set(kern_param.workspace_ptr);
for (size_t ic = 0; ic < IC; ic++) {
copy_padding_kern(bundle, kern_param, ncb_index,
{ncb_index.thread_id, 0, ic});
}
for (size_t oc = 0; oc < OC; oc++) {
do_conv_kern(bundle, kern_param, ncb_index,
{ncb_index.thread_id, 0, oc});
}
};
ret_kerns.push_back({exec_one_group, {group, N, 1_z}});
} else {
auto copy_padding = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
bundle.set(kern_param.workspace_ptr);
copy_padding_kern(bundle, kern_param, ncb_index,
ncb_index.ndrange_id);
};
ret_kerns.push_back({copy_padding, {group, N, IC}});
auto do_conv = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) mutable {
bundle.set(kern_param.workspace_ptr);
do_conv_kern(bundle, kern_param, ncb_index, ncb_index.ndrange_id);
};
ret_kerns.push_back({do_conv, {group, N, OC}});
}
return ret_kerns;
}
SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoI8x8x16Stride2::dispatch_kerns(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2::dispatch_kerns"_hash)) {
return get_kimpls(param);
}
MIDOUT_END();
return {};
}
bool ConvBiasImpl::AlgoI8x8x16Stride2Filter2::usable(
const NCBKernSizeParam& param,
AlgoSelectionStrategy /*algo_selection_strategy*/) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2Filter2::usable"_hash)) {
return param.bias_mode == BiasMode::NO_BIAS &&
param.nonlineMode == NonlineMode::IDENTITY &&
param.nr_threads == 1_z &&
conv_bias::can_conv_int8x8x16_stride2_flt2(param);
}
MIDOUT_END();
return false;
}
size_t ConvBiasImpl::AlgoI8x8x16Stride2Filter2::get_workspace(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2Filter2::get_workspace"_hash)) {
return conv_bias::get_workspace_in_bytes_conv_int8x8x16_stride2_flt2(
param);
}
MIDOUT_END();
return 0;
}
SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoI8x8x16Stride2Filter2::dispatch_kerns(
const NCBKernSizeParam& param) const {
// return {conv_bias::conv_int8x8x16_stride2_flt2,true};
auto kern = [](const NCBKernParam& param, const NCBKernIndex& ncb_index) {
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv("AlgoI8x8x16Stride2Filter2::dispatch_kerns"_hash)) {
auto ncb_param = param;
ncb_param.src_ptr = param.src<void>(0, ncb_index.ndrange_id[0]);
ncb_param.dst_ptr = param.dst<void>(0, ncb_index.ndrange_id[0]);
ncb_param.filter_ptr = param.filter<void>(ncb_index.ndrange_id[0]);
ncb_param.bias_ptr = param.bias<void>(0, ncb_index.ndrange_id[0]);
conv_bias::conv_int8x8x16_stride2_flt2(ncb_param);
}
MIDOUT_END();
};
size_t group = param.filter_meta.group;
return {{kern, {group, 1_z, 1_z}}};
}
/* =====================8int8x8x16 channel_wise_nchw44 stride1 stride2 algo ===================== */
bool ConvBiasImpl::AlgoS8x8x16ChanWiseStride1Stride2NCHW44::usable(
const NCBKernSizeParam& param, AlgoSelectionStrategy) const {
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
bool avaible =
//! src and filter are int8, dst is int16
(param.src_type.enumv() == DTypeEnum::Int8 &&
param.filter_type.enumv() == DTypeEnum::Int8 &&
param.dst_type.enumv() == DTypeEnum::Int16) &&
fm.format == param::Convolution::Format::NCHW44 &&
param.bias_mode != megdnn::BiasMode::BIAS &&
param.nonlineMode == megdnn::NonlineMode::IDENTITY &&
!fm.should_flip && fm.spatial_ndim == 2 && fm.dilation[0] == 1 &&
fm.dilation[1] == 1 &&
(fm.stride[0] == fm.stride[1] &&
(fm.stride[0] == 1 || fm.stride[0] == 2)) &&
FH == fm.spatial[1] && (FH == 2 || FH == 3 || FH == 5) &&
fm.icpg == 1 && fm.ocpg == 1 && fm.group % 4 == 0;
return avaible;
}
size_t ConvBiasImpl::AlgoS8x8x16ChanWiseStride1Stride2NCHW44::get_workspace(
const NCBKernSizeParam& param) const {
MIDOUT_BEGIN(
megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv(
"AlgoS8x8x16ChanWiseStride1Stride2NCHW44::get_workspace"_hash)) {
size_t stride_h = param.filter_meta.stride[0];
size_t stride_w = param.filter_meta.stride[1];
megdnn_assert(stride_h == stride_w);
if (stride_h == 1) {
return channel_wise_nchw44_8x8x16::stride1::get_bundle(param)
.total_size_in_bytes();
} else if (stride_h == 2) {
return channel_wise_nchw44_8x8x16::stride2::get_bundle(param)
.total_size_in_bytes();
} else {
return 0;
}
}
MIDOUT_END();
return 0;
}
SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoS8x8x16ChanWiseStride1Stride2NCHW44::dispatch_kerns(
const NCBKernSizeParam& param) const {
size_t stride_h = param.filter_meta.stride[0];
size_t stride_w = param.filter_meta.stride[1];
if (stride_h == stride_w && stride_h == 1) {
MIDOUT_BEGIN(
megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv(
"AlgoS8x8x16ChanWiseStride1Stride2NCHW44_dispatch_kerns"_hash)) {
return channel_wise_nchw44_8x8x16::stride1::get_kimpls(param);
}
MIDOUT_END();
return {};
} else if (stride_h == stride_w && stride_h == 2) {
MIDOUT_BEGIN(
megdnn_arm_common_conv_bias_int8816_kimpl,
midout_iv(
"AlgoS8x8x16ChanWiseStride2NCHW44_dispatch_kerns"_hash)) {
return channel_wise_nchw44_8x8x16::stride2::get_kimpls(param);
}
MIDOUT_END();
return {};
} else {
return {};
}
}
// vim: syntax=cpp.doxygen
|
// ArduinoJson - arduinojson.org
// Copyright Benoit Blanchon 2014-2020
// MIT License
#pragma once
#include <ArduinoJson/src/ArduinoJson/Strings/ConstRamStringAdapter.hpp>
#include <ArduinoJson/src/ArduinoJson/Strings/RamStringAdapter.hpp>
#include <ArduinoJson/src/ArduinoJson/Strings/SizedRamStringAdapter.hpp>
#if ARDUINOJSON_ENABLE_STD_STRING
#include <ArduinoJson/src/ArduinoJson/Strings/StdStringAdapter.hpp>
#endif
#if ARDUINOJSON_ENABLE_ARDUINO_STRING
#include <ArduinoJson/src/ArduinoJson/Strings/ArduinoStringAdapter.hpp>
#endif
#if ARDUINOJSON_ENABLE_PROGMEM
#include <ArduinoJson/src/ArduinoJson/Strings/FlashStringAdapter.hpp>
#include <ArduinoJson/src/ArduinoJson/Strings/SizedFlashStringAdapter.hpp>
#endif
|
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: itkWin32OutputWindow.cxx
Language: C++
Date: $Date$
Version: $Revision$
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
Portions of this code are covered under the VTK copyright.
See VTKCopyright.txt or http://www.kitware.com/VTKCopyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#include "itkWin32OutputWindow.h"
#include "itkObjectFactory.h"
namespace itk
{
/** */
HWND Win32OutputWindow::m_OutputWindow = 0;
Win32OutputWindow
::~Win32OutputWindow()
{
if (Win32OutputWindow::m_OutputWindow)
{
DestroyWindow(Win32OutputWindow::m_OutputWindow);
Win32OutputWindow::m_OutputWindow = NULL;
}
}
/** */
LRESULT APIENTRY
Win32OutputWindow
::WndProc(HWND hWnd, UINT message,
WPARAM wParam,
LPARAM lParam)
{
switch (message)
{
case WM_SIZE:
{
/** width of client area */
int w = LOWORD(lParam);
/** height of client area */
int h = HIWORD(lParam);
MoveWindow(Win32OutputWindow::m_OutputWindow,
0, 0, w, h, true);
}
break;
case WM_DESTROY:
Win32OutputWindow::m_OutputWindow = NULL;
Object::GlobalWarningDisplayOff();
break;
case WM_CLOSE:
if (Win32OutputWindow::m_OutputWindow)
{
DestroyWindow(Win32OutputWindow::m_OutputWindow);
Win32OutputWindow::m_OutputWindow = NULL;
}
break;
case WM_CREATE:
break;
}
return DefWindowProc(hWnd, message, wParam, lParam);
}
/** Display text in the window, and translate the \n to \r\n. */
void
Win32OutputWindow
::DisplayText(const char* text)
{
if ( !text )
{
return;
}
if ( this->GetPromptUser() )
{
this->PromptText(text);
return;
}
/** Create a buffer big enough to hold the entire text */
char* buffer = new char[strlen(text)+1];
/** Start at the begining */
const char* NewLinePos = text;
while ( NewLinePos )
{
int len;
/** Find the next new line in text */
NewLinePos = strchr(text, '\n');
/** if no new line is found then just add the text */
if(NewLinePos == 0)
{
Win32OutputWindow::AddText(text);
}
/** if a new line is found copy it to the buffer
* and add the buffer with a control new line */
else
{
len = NewLinePos - text;
strncpy(buffer, text, len);
buffer[len] = 0;
text = NewLinePos+1;
Win32OutputWindow::AddText(buffer);
Win32OutputWindow::AddText("\r\n");
}
}
delete [] buffer;
}
/** Add some text to the EDIT control. */
void
Win32OutputWindow
::AddText(const char* text)
{
if(!Initialize() || (strlen(text) == 0))
{
return;
}
/** move to the end of the text area */
SendMessage( Win32OutputWindow::m_OutputWindow, EM_SETSEL,
(WPARAM)-1, (LPARAM)-1 );
/** Append the text to the control */
SendMessage( Win32OutputWindow::m_OutputWindow, EM_REPLACESEL,
0, (LPARAM)text );
}
/** initialize the output window with an EDIT control and
* a container window. */
int
Win32OutputWindow
::Initialize()
{
/** check to see if it is already initialized */
if(Win32OutputWindow::m_OutputWindow)
{
return 1;
}
/** Initialized the output window */
WNDCLASS wndClass;
/** has the class been registered ? */
if (!GetClassInfo(GetModuleHandle(NULL),"OutputWindow",&wndClass))
{
wndClass.style = CS_HREDRAW | CS_VREDRAW;
wndClass.lpfnWndProc = Win32OutputWindow::WndProc;
wndClass.cbClsExtra = 0;
wndClass.hInstance = GetModuleHandle(NULL);
wndClass.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndClass.hCursor = LoadCursor(NULL, IDC_ARROW);
wndClass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);
wndClass.lpszMenuName = NULL;
wndClass.lpszClassName = "OutputWindow";
/** doesn't use these extra 4 bytes, but app writers may want them,
* so we provide them. */
wndClass.cbWndExtra = 4;
RegisterClass(&wndClass);
}
/** create parent container window */
HWND win = CreateWindow(
"OutputWindow", "OutputWindow",
WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN,
0, 0, 512, 512,
NULL, NULL, GetModuleHandle(NULL), NULL);
/** Now create child window with text display box */
CREATESTRUCT lpParam;
lpParam.hInstance = GetModuleHandle(NULL);
lpParam.hMenu = NULL;
lpParam.hwndParent = win;
lpParam.cx = 512;
lpParam.cy = 512;
lpParam.x = 0;
lpParam.y = 0;
lpParam.style = ES_MULTILINE | ES_READONLY | WS_CHILD
| ES_AUTOVSCROLL | ES_AUTOHSCROLL | WS_VISIBLE | WS_MAXIMIZE
| WS_VSCROLL | WS_HSCROLL;
lpParam.lpszName = "Output Control";
lpParam.lpszClass = "EDIT"; // use the RICHEDIT control widget
lpParam.dwExStyle = 0;
/**Create the EDIT window as a child of win */
Win32OutputWindow::m_OutputWindow = CreateWindow(
lpParam.lpszClass, // pointer to registered class name
"", // pointer to window name
lpParam.style, // window style
lpParam.x, // horizontal position of window
lpParam.y, // vertical position of window
lpParam.cx, // window width
lpParam.cy, // window height
lpParam.hwndParent, // handle to parent or owner window
NULL, // handle to menu or child-window identifier
lpParam.hInstance, // handle to application instance
&lpParam // pointer to window-creation data
);
const int maxsize = 5242880;
SendMessage(Win32OutputWindow::m_OutputWindow,
EM_LIMITTEXT, maxsize, 0L);
/** show the top level container window */
ShowWindow(win, SW_SHOW);
return 1;
}
/** Prompt some text */
void
Win32OutputWindow
::PromptText(const char* text)
{
OStringStream msg;
msg << text << "\nPress Cancel to supress any further messages.";
if (MessageBox(NULL, msg.str().c_str(), "Error",
MB_ICONERROR | MB_OKCANCEL) == IDCANCEL)
{
Object::GlobalWarningDisplayOff();
}
}
} // end namespace itk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.