text
stringlengths 5
1.04M
|
|---|
// *****************************************************************************
// *****************************************************************************
// Copyright 2012 - 2013, Cadence Design Systems
//
// This file is part of the Cadence LEF/DEF Open Source
// Distribution, Product Version 5.8.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// For updates, support, or to become part of the LEF/DEF Community,
// check www.openeda.org for details.
//
// $Author: dell $
// $Revision: #7 $
// $Date: 2015/01/27 $
// $State: $
// *****************************************************************************
// *****************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "lex.h"
#include "lefiCrossTalk.hpp"
#include "lefiDebug.hpp"
BEGIN_LEFDEF_PARSER_NAMESPACE
// *****************************************************************************
// lefiNoiseVictim
// *****************************************************************************
lefiNoiseVictim::lefiNoiseVictim(double d)
: length_(0.0),
numNoises_(0),
noisesAllocated_(0),
noises_(NULL)
{
Init(d);
}
void
lefiNoiseVictim::Init(double d)
{
length_ = d;
numNoises_ = 0;
noisesAllocated_ = 2;
noises_ = (double*) lefMalloc(sizeof(double) * 2);
}
void
lefiNoiseVictim::clear()
{
numNoises_ = 0;
}
void
lefiNoiseVictim::Destroy()
{
clear();
lefFree((char*) (noises_));
}
lefiNoiseVictim::~lefiNoiseVictim()
{
Destroy();
}
void
lefiNoiseVictim::addVictimNoise(double d)
{
if (numNoises_ == noisesAllocated_) {
int max;
double *ne;
int i;
if (noisesAllocated_ == 0) {
max = noisesAllocated_ = 2;
numNoises_ = 0;
} else
max = noisesAllocated_ = numNoises_ * 2;
ne = (double*) lefMalloc(sizeof(double) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = noises_[i];
lefFree((char*) (noises_));
noises_ = ne;
}
noises_[numNoises_] = d;
numNoises_ += 1;
}
int
lefiNoiseVictim::numNoises() const
{
return numNoises_;
}
double
lefiNoiseVictim::noise(int index) const
{
return noises_[index];
}
double
lefiNoiseVictim::length() const
{
return length_;
}
// *****************************************************************************
// lefiNoiseResistance
// *****************************************************************************
lefiNoiseResistance::lefiNoiseResistance()
: numNums_(0),
numsAllocated_(0),
nums_(NULL),
numVictims_(0),
victimsAllocated_(0),
victims_(NULL)
{
Init();
}
void
lefiNoiseResistance::Init()
{
numNums_ = 0;
numsAllocated_ = 1;
nums_ = (double*) lefMalloc(sizeof(double) * 1);
numVictims_ = 0;
victimsAllocated_ = 2;
victims_ = (lefiNoiseVictim**) lefMalloc(sizeof(
lefiNoiseVictim*) * 2);
}
void
lefiNoiseResistance::clear()
{
int i;
lefiNoiseVictim *r;
int max = numVictims_;
for (i = 0; i < max; i++) {
r = victims_[i];
r->Destroy();
lefFree((char*) r);
}
numVictims_ = 0;
numNums_ = 0;
}
void
lefiNoiseResistance::Destroy()
{
clear();
lefFree((char*) (nums_));
lefFree((char*) (victims_));
}
lefiNoiseResistance::~lefiNoiseResistance()
{
Destroy();
}
void
lefiNoiseResistance::addResistanceNumber(double d)
{
if (numNums_ == numsAllocated_) {
int max;
double *ne;
int i;
if (numsAllocated_ == 0) {
max = numsAllocated_ = 2;
numNums_ = 0;
} else
max = numsAllocated_ = numNums_ * 2;
ne = (double*) lefMalloc(sizeof(double) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = nums_[i];
lefFree((char*) (nums_));
nums_ = ne;
}
nums_[numNums_] = d;
numNums_ += 1;
}
void
lefiNoiseResistance::addVictimNoise(double d)
{
lefiNoiseVictim *r = victims_[numVictims_ - 1];
r->addVictimNoise(d);
}
void
lefiNoiseResistance::addVictimLength(double d)
{
lefiNoiseVictim *r;
if (numVictims_ == victimsAllocated_) {
int max;
lefiNoiseVictim **ne;
int i;
if (victimsAllocated_ == 0) {
max = victimsAllocated_ = 2;
numVictims_ = 0;
} else
max = victimsAllocated_ = numVictims_ * 2;
ne = (lefiNoiseVictim**) lefMalloc(sizeof(lefiNoiseVictim*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = victims_[i];
lefFree((char*) (victims_));
victims_ = ne;
}
r = (lefiNoiseVictim*) lefMalloc(sizeof(lefiNoiseVictim));
r->Init(d);
victims_[numVictims_] = r;
numVictims_ += 1;
}
int
lefiNoiseResistance::numVictims() const
{
return numVictims_;
}
lefiNoiseVictim *
lefiNoiseResistance::victim(int index) const
{
return victims_[index];
}
int
lefiNoiseResistance::numNums() const
{
return numNums_;
}
double
lefiNoiseResistance::num(int index) const
{
return nums_[index];
}
// *****************************************************************************
// lefiNoiseEdge
// *****************************************************************************
lefiNoiseEdge::lefiNoiseEdge()
{
Init();
}
void
lefiNoiseEdge::Init()
{
edge_ = 0;
numResistances_ = 0;
resistancesAllocated_ = 2;
resistances_ = (lefiNoiseResistance**) lefMalloc(sizeof(
lefiNoiseResistance*) * 2);
}
void
lefiNoiseEdge::clear()
{
int i;
lefiNoiseResistance *r;
int maxr = numResistances_;
for (i = 0; i < maxr; i++) {
r = resistances_[i];
r->Destroy();
lefFree((char*) r);
}
edge_ = 0;
numResistances_ = 0;
}
void
lefiNoiseEdge::Destroy()
{
clear();
lefFree((char*) (resistances_));
}
lefiNoiseEdge::~lefiNoiseEdge()
{
Destroy();
}
void
lefiNoiseEdge::addEdge(double d)
{
edge_ = d;
}
void
lefiNoiseEdge::addResistanceNumber(double d)
{
lefiNoiseResistance *r = resistances_[numResistances_ - 1];
r->addResistanceNumber(d);
}
void
lefiNoiseEdge::addResistance()
{
lefiNoiseResistance *r;
if (numResistances_ == resistancesAllocated_) {
int max;
lefiNoiseResistance **ne;
int i;
if (resistancesAllocated_ == 0) {
max = resistancesAllocated_ = 2;
numResistances_ = 0;
} else
max = resistancesAllocated_ = numResistances_ * 2;
ne = (lefiNoiseResistance**) lefMalloc(sizeof(lefiNoiseResistance*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = resistances_[i];
lefFree((char*) (resistances_));
resistances_ = ne;
}
r = (lefiNoiseResistance*) lefMalloc(sizeof(lefiNoiseResistance));
r->Init();
resistances_[numResistances_] = r;
numResistances_ += 1;
}
void
lefiNoiseEdge::addVictimNoise(double d)
{
lefiNoiseResistance *r = resistances_[numResistances_ - 1];
r->addVictimNoise(d);
}
void
lefiNoiseEdge::addVictimLength(double d)
{
lefiNoiseResistance *r = resistances_[numResistances_ - 1];
r->addVictimLength(d);
}
int
lefiNoiseEdge::numResistances()
{
return numResistances_;
}
lefiNoiseResistance *
lefiNoiseEdge::resistance(int index)
{
return resistances_[index];
}
double
lefiNoiseEdge::edge()
{
return edge_;
}
// *****************************************************************************
// lefiNoiseTable
// *****************************************************************************
lefiNoiseTable::lefiNoiseTable()
{
Init();
}
void
lefiNoiseTable::Init()
{
numEdges_ = 0;
edgesAllocated_ = 2;
edges_ = (lefiNoiseEdge**) lefMalloc(sizeof(lefiNoiseEdge*) * 2);
}
void
lefiNoiseTable::clear()
{
int i;
lefiNoiseEdge *r;
int max = numEdges_;
for (i = 0; i < max; i++) {
r = edges_[i];
r->Destroy();
lefFree((char*) r);
}
numEdges_ = 0;
}
void
lefiNoiseTable::Destroy()
{
clear();
lefFree((char*) (edges_));
}
lefiNoiseTable::~lefiNoiseTable()
{
Destroy();
}
void
lefiNoiseTable::setup(int i)
{
num_ = i;
clear();
}
void
lefiNoiseTable::newEdge()
{
lefiNoiseEdge *r;
if (numEdges_ == edgesAllocated_) {
int max;
lefiNoiseEdge **ne;
int i;
if (edgesAllocated_ == 0) {
max = edgesAllocated_ = 2;
numEdges_ = 0;
} else
max = edgesAllocated_ = numEdges_ * 2;
ne = (lefiNoiseEdge**) lefMalloc(sizeof(lefiNoiseEdge*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = edges_[i];
lefFree((char*) (edges_));
edges_ = ne;
}
r = (lefiNoiseEdge*) lefMalloc(sizeof(lefiNoiseEdge));
r->Init();
edges_[numEdges_] = r;
numEdges_ += 1;
}
void
lefiNoiseTable::addEdge(double d)
{
lefiNoiseEdge *r = edges_[numEdges_ - 1];
r->addEdge(d);
}
void
lefiNoiseTable::addResistance()
{
lefiNoiseEdge *r = edges_[numEdges_ - 1];
r->addResistance();
}
void
lefiNoiseTable::addResistanceNumber(double d)
{
lefiNoiseEdge *r = edges_[numEdges_ - 1];
r->addResistanceNumber(d);
}
void
lefiNoiseTable::addVictimLength(double d)
{
lefiNoiseEdge *r = edges_[numEdges_ - 1];
r->addVictimLength(d);
}
void
lefiNoiseTable::addVictimNoise(double d)
{
lefiNoiseEdge *r = edges_[numEdges_ - 1];
r->addVictimNoise(d);
}
int
lefiNoiseTable::num()
{
return num_;
}
int
lefiNoiseTable::numEdges()
{
return numEdges_;
}
lefiNoiseEdge *
lefiNoiseTable::edge(int index)
{
return edges_[index];
}
// *****************************************************************************
// lefiCorrectionVictim
// *****************************************************************************
lefiCorrectionVictim::lefiCorrectionVictim(double d)
{
Init(d);
}
void
lefiCorrectionVictim::Init(double d)
{
length_ = d;
numCorrections_ = 0;
correctionsAllocated_ = 2;
corrections_ = (double*) lefMalloc(sizeof(double) * 2);
}
void
lefiCorrectionVictim::clear()
{
numCorrections_ = 0;
}
void
lefiCorrectionVictim::Destroy()
{
clear();
lefFree((char*) (corrections_));
}
lefiCorrectionVictim::~lefiCorrectionVictim()
{
Destroy();
}
void
lefiCorrectionVictim::addVictimCorrection(double d)
{
if (numCorrections_ == correctionsAllocated_) {
int max;
double *ne;
int i;
if (correctionsAllocated_ == 0) {
max = correctionsAllocated_ = 2;
numCorrections_ = 0;
} else
max = correctionsAllocated_ = numCorrections_ * 2;
ne = (double*) lefMalloc(sizeof(double) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = corrections_[i];
lefFree((char*) (corrections_));
corrections_ = ne;
}
corrections_[numCorrections_] = d;
numCorrections_ += 1;
}
int
lefiCorrectionVictim::numCorrections()
{
return numCorrections_;
}
double
lefiCorrectionVictim::correction(int index)
{
return corrections_[index];
}
double
lefiCorrectionVictim::length()
{
return length_;
}
// *****************************************************************************
// lefiCorrectionResistance
// *****************************************************************************
lefiCorrectionResistance::lefiCorrectionResistance()
{
Init();
}
void
lefiCorrectionResistance::Init()
{
numNums_ = 0;
numsAllocated_ = 1;
nums_ = (double*) lefMalloc(sizeof(double) * 1);
numVictims_ = 0;
victimsAllocated_ = 2;
victims_ = (lefiCorrectionVictim**) lefMalloc(sizeof(
lefiCorrectionVictim*) * 2);
}
void
lefiCorrectionResistance::clear()
{
int i;
lefiCorrectionVictim *r;
int max = numVictims_;
for (i = 0; i < max; i++) {
r = victims_[i];
r->Destroy();
lefFree((char*) r);
}
numVictims_ = 0;
numNums_ = 0;
}
void
lefiCorrectionResistance::Destroy()
{
clear();
lefFree((char*) (nums_));
lefFree((char*) (victims_));
}
lefiCorrectionResistance::~lefiCorrectionResistance()
{
Destroy();
}
void
lefiCorrectionResistance::addResistanceNumber(double d)
{
if (numNums_ == numsAllocated_) {
int max;
double *ne;
int i;
if (numsAllocated_) {
max = numsAllocated_ = 2;
numNums_ = 0;
} else
max = numsAllocated_ = numNums_ * 2;
ne = (double*) lefMalloc(sizeof(double) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = nums_[i];
lefFree((char*) (nums_));
nums_ = ne;
}
nums_[numNums_] = d;
numNums_ += 1;
}
void
lefiCorrectionResistance::addVictimCorrection(double d)
{
lefiCorrectionVictim *r = victims_[numVictims_ - 1];
r->addVictimCorrection(d);
}
void
lefiCorrectionResistance::addVictimLength(double d)
{
lefiCorrectionVictim *r;
if (numVictims_ == victimsAllocated_) {
int max;
lefiCorrectionVictim **ne;
int i;
if (victimsAllocated_ == 0) {
max = victimsAllocated_ = 2;
numVictims_ = 0;
} else
max = victimsAllocated_ = numVictims_ * 2;
ne = (lefiCorrectionVictim**) lefMalloc(sizeof(lefiCorrectionVictim*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = victims_[i];
lefFree((char*) (victims_));
victims_ = ne;
}
r = (lefiCorrectionVictim*) lefMalloc(sizeof(lefiCorrectionVictim));
r->Init(d);
victims_[numVictims_] = r;
numVictims_ += 1;
}
int
lefiCorrectionResistance::numVictims()
{
return numVictims_;
}
lefiCorrectionVictim *
lefiCorrectionResistance::victim(int index)
{
return victims_[index];
}
int
lefiCorrectionResistance::numNums()
{
return numNums_;
}
double
lefiCorrectionResistance::num(int index)
{
return nums_[index];
}
// *****************************************************************************
// lefiCorrectionEdge
// *****************************************************************************
lefiCorrectionEdge::lefiCorrectionEdge()
{
Init();
}
void
lefiCorrectionEdge::Init()
{
edge_ = 0;
numResistances_ = 0;
resistancesAllocated_ = 2;
resistances_ = (lefiCorrectionResistance**) lefMalloc(sizeof(
lefiCorrectionResistance*) * 2);
}
void
lefiCorrectionEdge::clear()
{
int i;
lefiCorrectionResistance *r;
int maxr = numResistances_;
for (i = 0; i < maxr; i++) {
r = resistances_[i];
r->Destroy();
lefFree((char*) r);
}
edge_ = 0;
numResistances_ = 0;
}
void
lefiCorrectionEdge::Destroy()
{
clear();
lefFree((char*) (resistances_));
}
lefiCorrectionEdge::~lefiCorrectionEdge()
{
Destroy();
}
void
lefiCorrectionEdge::addEdge(double d)
{
edge_ = d;
}
void
lefiCorrectionEdge::addResistanceNumber(double d)
{
lefiCorrectionResistance *r = resistances_[numResistances_ - 1];
r->addResistanceNumber(d);
}
void
lefiCorrectionEdge::addResistance()
{
lefiCorrectionResistance *r;
if (numResistances_ == resistancesAllocated_) {
int max;
lefiCorrectionResistance **ne;
int i;
if (resistancesAllocated_ == 0) {
max = resistancesAllocated_ = 2;
numResistances_ = 0;
} else
max = resistancesAllocated_ = numResistances_ * 2;
ne = (lefiCorrectionResistance**) lefMalloc
(sizeof(lefiCorrectionResistance*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = resistances_[i];
lefFree((char*) (resistances_));
resistances_ = ne;
}
r = (lefiCorrectionResistance*) lefMalloc(sizeof(lefiCorrectionResistance));
r->Init();
resistances_[numResistances_] = r;
numResistances_ += 1;
}
void
lefiCorrectionEdge::addVictimCorrection(double d)
{
lefiCorrectionResistance *r = resistances_[numResistances_ - 1];
r->addVictimCorrection(d);
}
void
lefiCorrectionEdge::addVictimLength(double d)
{
lefiCorrectionResistance *r = resistances_[numResistances_ - 1];
r->addVictimLength(d);
}
int
lefiCorrectionEdge::numResistances()
{
return numResistances_;
}
lefiCorrectionResistance *
lefiCorrectionEdge::resistance(int index)
{
return resistances_[index];
}
double
lefiCorrectionEdge::edge()
{
return edge_;
}
// *****************************************************************************
// lefiCorrectionTable
// *****************************************************************************
lefiCorrectionTable::lefiCorrectionTable()
{
Init();
}
void
lefiCorrectionTable::Init()
{
numEdges_ = 0;
edgesAllocated_ = 2;
edges_ = (lefiCorrectionEdge**) lefMalloc(sizeof(lefiCorrectionEdge*) * 2);
}
void
lefiCorrectionTable::clear()
{
int i;
lefiCorrectionEdge *r;
int max = numEdges_;
for (i = 0; i < max; i++) {
r = edges_[i];
r->Destroy();
lefFree((char*) r);
}
numEdges_ = 0;
}
void
lefiCorrectionTable::Destroy()
{
clear();
lefFree((char*) (edges_));
}
lefiCorrectionTable::~lefiCorrectionTable()
{
Destroy();
}
void
lefiCorrectionTable::setup(int i)
{
num_ = i;
clear();
}
void
lefiCorrectionTable::newEdge()
{
lefiCorrectionEdge *r;
if (numEdges_ == edgesAllocated_) {
int max;
lefiCorrectionEdge **ne;
int i;
if (edgesAllocated_ == 0) {
max = edgesAllocated_ = 2;
numEdges_ = 0;
} else
max = edgesAllocated_ = numEdges_ * 2;
ne = (lefiCorrectionEdge**) lefMalloc(sizeof(lefiCorrectionEdge*) * max);
max /= 2;
for (i = 0; i < max; i++)
ne[i] = edges_[i];
lefFree((char*) (edges_));
edges_ = ne;
}
r = (lefiCorrectionEdge*) lefMalloc(sizeof(lefiCorrectionEdge));
r->Init();
edges_[numEdges_] = r;
numEdges_ += 1;
}
void
lefiCorrectionTable::addEdge(double d)
{
lefiCorrectionEdge *r = edges_[numEdges_ - 1];
r->addEdge(d);
}
void
lefiCorrectionTable::addResistanceNumber(double d)
{
lefiCorrectionEdge *r = edges_[numEdges_ - 1];
r->addResistanceNumber(d);
}
void
lefiCorrectionTable::addResistance()
{
lefiCorrectionEdge *r = edges_[numEdges_ - 1];
r->addResistance();
}
void
lefiCorrectionTable::addVictimLength(double d)
{
lefiCorrectionEdge *r = edges_[numEdges_ - 1];
r->addVictimLength(d);
}
void
lefiCorrectionTable::addVictimCorrection(double d)
{
lefiCorrectionEdge *r = edges_[numEdges_ - 1];
r->addVictimCorrection(d);
}
int
lefiCorrectionTable::num()
{
return num_;
}
int
lefiCorrectionTable::numEdges()
{
return numEdges_;
}
lefiCorrectionEdge *
lefiCorrectionTable::edge(int index)
{
return edges_[index];
}
END_LEFDEF_PARSER_NAMESPACE
|
//===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that expands pseudo instructions into target
// instructions to allow proper scheduling and other late optimizations. This
// pass should be run after register allocation but before the post-regalloc
// scheduling pass.
//
//===----------------------------------------------------------------------===//
#include "AArch64InstrInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <utility>
using namespace llvm;
#define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
namespace {
class AArch64ExpandPseudo : public MachineFunctionPass {
public:
const AArch64InstrInfo *TII;
static char ID;
AArch64ExpandPseudo() : MachineFunctionPass(ID) {
initializeAArch64ExpandPseudoPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &Fn) override;
StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
private:
bool expandMBB(MachineBasicBlock &MBB);
bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);
bool expandMOVImmSimple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned BitSize,
unsigned OneChunks,
unsigned ZeroChunks);
bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
unsigned ExtendImm, unsigned ZeroReg,
MachineBasicBlock::iterator &NextMBBI);
bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
};
} // end anonymous namespace
char AArch64ExpandPseudo::ID = 0;
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
AARCH64_EXPAND_PSEUDO_NAME, false, false)
/// Transfer implicit operands on the pseudo instruction to the
/// instructions created from the expansion.
static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
MachineInstrBuilder &DefMI) {
const MCInstrDesc &Desc = OldMI.getDesc();
for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
++i) {
const MachineOperand &MO = OldMI.getOperand(i);
assert(MO.isReg() && MO.getReg());
if (MO.isUse())
UseMI.add(MO);
else
DefMI.add(MO);
}
}
/// Helper function which extracts the specified 16-bit chunk from a
/// 64-bit value.
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
assert(ChunkIdx < 4 && "Out of range chunk index specified!");
return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
}
/// Check whether the given 16-bit chunk replicated to full 64-bit width
/// can be materialized with an ORR instruction.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
}
/// Check for identical 16-bit chunks within the constant and if so
/// materialize them with a single ORR instruction. The remaining one or two
/// 16-bit chunks will be materialized with MOVK instructions.
///
/// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
/// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
/// an ORR instruction.
static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const AArch64InstrInfo *TII) {
using CountMap = DenseMap<uint64_t, unsigned>;
CountMap Counts;
// Scan the constant and count how often every chunk occurs.
for (unsigned Idx = 0; Idx < 4; ++Idx)
++Counts[getChunk(UImm, Idx)];
// Traverse the chunks to find one which occurs more than once.
for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
Chunk != End; ++Chunk) {
const uint64_t ChunkVal = Chunk->first;
const unsigned Count = Chunk->second;
uint64_t Encoding = 0;
// We are looking for chunks which have two or three instances and can be
// materialized with an ORR instruction.
if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
continue;
const bool CountThree = Count == 3;
// Create the ORR-immediate instruction.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.add(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
unsigned ShiftAmt = 0;
uint64_t Imm16 = 0;
// Find the first chunk not materialized with the ORR instruction.
for (; ShiftAmt < 64; ShiftAmt += 16) {
Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
if (Imm16 != ChunkVal)
break;
}
// Create the first MOVK instruction.
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg,
RegState::Define | getDeadRegState(DstIsDead && CountThree))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
// In case we have three instances the whole constant is now materialized
// and we can exit.
if (CountThree) {
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
// Find the remaining chunk which needs to be materialized.
for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
if (Imm16 != ChunkVal)
break;
}
// Create the second MOVK instruction.
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
transferImpOps(MI, MIB, MIB2);
MI.eraseFromParent();
return true;
}
return false;
}
/// Check whether this chunk matches the pattern '1...0...'. This pattern
/// starts a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isStartChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
return false;
return isMask_64(~Chunk);
}
/// Check whether this chunk matches the pattern '0...1...' This pattern
/// ends a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isEndChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
return false;
return isMask_64(Chunk);
}
/// Clear or set all bits in the chunk at the given index.
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
const uint64_t Mask = 0xFFFF;
if (Clear)
// Clear chunk in the immediate.
Imm &= ~(Mask << (Idx * 16));
else
// Set all bits in the immediate for the particular chunk.
Imm |= Mask << (Idx * 16);
return Imm;
}
/// Check whether the constant contains a sequence of contiguous ones,
/// which might be interrupted by one or two chunks. If so, materialize the
/// sequence of contiguous ones with an ORR instruction.
/// Materialize the chunks which are either interrupting the sequence or outside
/// of the sequence with a MOVK instruction.
///
/// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
/// which ends the sequence (0...1...). Then we are looking for constants which
/// contain at least one S and E chunk.
/// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
///
/// We are also looking for constants like |S|A|B|E| where the contiguous
/// sequence of ones wraps around the MSB into the LSB.
static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const AArch64InstrInfo *TII) {
const int NotSet = -1;
const uint64_t Mask = 0xFFFF;
int StartIdx = NotSet;
int EndIdx = NotSet;
// Try to find the chunks which start/end a contiguous sequence of ones.
for (int Idx = 0; Idx < 4; ++Idx) {
int64_t Chunk = getChunk(UImm, Idx);
// Sign extend the 16-bit chunk to 64-bit.
Chunk = (Chunk << 48) >> 48;
if (isStartChunk(Chunk))
StartIdx = Idx;
else if (isEndChunk(Chunk))
EndIdx = Idx;
}
// Early exit in case we can't find a start/end chunk.
if (StartIdx == NotSet || EndIdx == NotSet)
return false;
// Outside of the contiguous sequence of ones everything needs to be zero.
uint64_t Outside = 0;
// Chunks between the start and end chunk need to have all their bits set.
uint64_t Inside = Mask;
// If our contiguous sequence of ones wraps around from the MSB into the LSB,
// just swap indices and pretend we are materializing a contiguous sequence
// of zeros surrounded by a contiguous sequence of ones.
if (StartIdx > EndIdx) {
std::swap(StartIdx, EndIdx);
std::swap(Outside, Inside);
}
uint64_t OrrImm = UImm;
int FirstMovkIdx = NotSet;
int SecondMovkIdx = NotSet;
// Find out which chunks we need to patch up to obtain a contiguous sequence
// of ones.
for (int Idx = 0; Idx < 4; ++Idx) {
const uint64_t Chunk = getChunk(UImm, Idx);
// Check whether we are looking at a chunk which is not part of the
// contiguous sequence of ones.
if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
OrrImm = updateImm(OrrImm, Idx, Outside == 0);
// Remember the index we need to patch.
if (FirstMovkIdx == NotSet)
FirstMovkIdx = Idx;
else
SecondMovkIdx = Idx;
// Check whether we are looking a chunk which is part of the contiguous
// sequence of ones.
} else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
// Remember the index we need to patch.
if (FirstMovkIdx == NotSet)
FirstMovkIdx = Idx;
else
SecondMovkIdx = Idx;
}
}
assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");
// Create the ORR-immediate instruction.
uint64_t Encoding = 0;
AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.add(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
const bool SingleMovk = SecondMovkIdx == NotSet;
// Create the first MOVK instruction.
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg,
RegState::Define | getDeadRegState(DstIsDead && SingleMovk))
.addReg(DstReg)
.addImm(getChunk(UImm, FirstMovkIdx))
.addImm(
AArch64_AM::getShifterImm(AArch64_AM::LSL, FirstMovkIdx * 16));
// Early exit in case we only need to emit a single MOVK instruction.
if (SingleMovk) {
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
// Create the second MOVK instruction.
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(getChunk(UImm, SecondMovkIdx))
.addImm(
AArch64_AM::getShifterImm(AArch64_AM::LSL, SecondMovkIdx * 16));
transferImpOps(MI, MIB, MIB2);
MI.eraseFromParent();
return true;
}
/// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
/// real move-immediate instructions to synthesize the immediate.
bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned BitSize) {
MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg();
uint64_t Imm = MI.getOperand(1).getImm();
const unsigned Mask = 0xFFFF;
if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
// Useless def, and we don't want to risk creating an invalid ORR (which
// would really write to sp).
MI.eraseFromParent();
return true;
}
// Scan the immediate and count the number of 16-bit chunks which are either
// all ones or all zeros.
unsigned OneChunks = 0;
unsigned ZeroChunks = 0;
for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
const unsigned Chunk = (Imm >> Shift) & Mask;
if (Chunk == Mask)
OneChunks++;
else if (Chunk == 0)
ZeroChunks++;
}
// FIXME: Prefer MOVZ/MOVN over ORR because of the rules for the "mov"
// alias.
// Try a single ORR.
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
.add(MI.getOperand(0))
.addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
.addImm(Encoding);
transferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
}
// Two instruction sequences.
//
// Prefer MOVZ/MOVN followed by MOVK; it's more readable, and possibly the
// fastest sequence with fast literal generation.
if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2)
return expandMOVImmSimple(MBB, MBBI, BitSize, OneChunks, ZeroChunks);
assert(BitSize == 64 && "All 32-bit immediates can be expanded with a"
"MOVZ/MOVK pair");
// Try other two-instruction sequences.
// 64-bit ORR followed by MOVK.
// We try to construct the ORR immediate in three different ways: either we
// zero out the chunk which will be replaced, we fill the chunk which will
// be replaced with ones, or we take the bit pattern from the other half of
// the 64-bit immediate. This is comprehensive because of the way ORR
// immediates are constructed.
for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
uint64_t ShiftedMask = (0xFFFFULL << Shift);
uint64_t ZeroChunk = UImm & ~ShiftedMask;
uint64_t OneChunk = UImm | ShiftedMask;
uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
if (AArch64_AM::processLogicalImmediate(ZeroChunk, BitSize, Encoding) ||
AArch64_AM::processLogicalImmediate(OneChunk, BitSize, Encoding) ||
AArch64_AM::processLogicalImmediate(ReplicateChunk,
BitSize, Encoding)) {
// Create the ORR-immediate instruction.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.add(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
// Create the MOVK instruction.
const unsigned Imm16 = getChunk(UImm, Shift / 16);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
}
// FIXME: Add more two-instruction sequences.
// Three instruction sequences.
//
// Prefer MOVZ/MOVN followed by two MOVK; it's more readable, and possibly
// the fastest sequence with fast literal generation. (If neither MOVK is
// part of a fast literal generation pair, it could be slower than the
// four-instruction sequence, but we won't worry about that for now.)
if (OneChunks || ZeroChunks)
return expandMOVImmSimple(MBB, MBBI, BitSize, OneChunks, ZeroChunks);
// Check for identical 16-bit chunks within the constant and if so materialize
// them with a single ORR instruction. The remaining one or two 16-bit chunks
// will be materialized with MOVK instructions.
if (BitSize == 64 && tryToreplicateChunks(UImm, MI, MBB, MBBI, TII))
return true;
// Check whether the constant contains a sequence of contiguous ones, which
// might be interrupted by one or two chunks. If so, materialize the sequence
// of contiguous ones with an ORR instruction. Materialize the chunks which
// are either interrupting the sequence or outside of the sequence with a
// MOVK instruction.
if (BitSize == 64 && trySequenceOfOnes(UImm, MI, MBB, MBBI, TII))
return true;
// We found no possible two or three instruction sequence; use the general
// four-instruction sequence.
return expandMOVImmSimple(MBB, MBBI, BitSize, OneChunks, ZeroChunks);
}
/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to a
/// MOVZ or MOVN of width BitSize followed by up to 3 MOVK instructions.
bool AArch64ExpandPseudo::expandMOVImmSimple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned BitSize,
unsigned OneChunks,
unsigned ZeroChunks) {
MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg();
uint64_t Imm = MI.getOperand(1).getImm();
const unsigned Mask = 0xFFFF;
// Use a MOVZ or MOVN instruction to set the high bits, followed by one or
// more MOVK instructions to insert additional 16-bit portions into the
// lower bits.
bool isNeg = false;
// Use MOVN to materialize the high bits if we have more all one chunks
// than all zero chunks.
if (OneChunks > ZeroChunks) {
isNeg = true;
Imm = ~Imm;
}
unsigned FirstOpc;
if (BitSize == 32) {
Imm &= (1LL << 32) - 1;
FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
} else {
FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
}
unsigned Shift = 0; // LSL amount for high bits with MOVZ/MOVN
unsigned LastShift = 0; // LSL amount for last MOVK
if (Imm != 0) {
unsigned LZ = countLeadingZeros(Imm);
unsigned TZ = countTrailingZeros(Imm);
Shift = (TZ / 16) * 16;
LastShift = ((63 - LZ) / 16) * 16;
}
unsigned Imm16 = (Imm >> Shift) & Mask;
bool DstIsDead = MI.getOperand(0).isDead();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
.addReg(DstReg, RegState::Define |
getDeadRegState(DstIsDead && Shift == LastShift))
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
// If a MOVN was used for the high bits of a negative value, flip the rest
// of the bits back for use with MOVK.
if (isNeg)
Imm = ~Imm;
if (Shift == LastShift) {
transferImpOps(MI, MIB1, MIB1);
MI.eraseFromParent();
return true;
}
MachineInstrBuilder MIB2;
unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
while (Shift < LastShift) {
Shift += 16;
Imm16 = (Imm >> Shift) & Mask;
if (Imm16 == (isNeg ? Mask : 0))
continue; // This 16-bit portion is already set correctly.
MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
.addReg(DstReg,
RegState::Define |
getDeadRegState(DstIsDead && Shift == LastShift))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
}
transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
bool AArch64ExpandPseudo::expandCMP_SWAP(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
const MachineOperand &Dest = MI.getOperand(0);
unsigned StatusReg = MI.getOperand(1).getReg();
bool StatusDead = MI.getOperand(1).isDead();
// Duplicating undef operands into 2 instructions does not guarantee the same
// value on both; However undef should be replaced by xzr anyway.
assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
unsigned AddrReg = MI.getOperand(2).getReg();
unsigned DesiredReg = MI.getOperand(3).getReg();
unsigned NewReg = MI.getOperand(4).getReg();
MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
MF->insert(++MBB.getIterator(), LoadCmpBB);
MF->insert(++LoadCmpBB->getIterator(), StoreBB);
MF->insert(++StoreBB->getIterator(), DoneBB);
// .Lloadcmp:
// mov wStatus, 0
// ldaxr xDest, [xAddr]
// cmp xDest, xDesired
// b.ne .Ldone
if (!StatusDead)
BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
.addImm(0).addImm(0);
BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
.addReg(AddrReg);
BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
.addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
.addReg(DesiredReg)
.addImm(ExtendImm);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
.addImm(AArch64CC::NE)
.addMBB(DoneBB)
.addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
LoadCmpBB->addSuccessor(DoneBB);
LoadCmpBB->addSuccessor(StoreBB);
// .Lstore:
// stlxr wStatus, xNew, [xAddr]
// cbnz wStatus, .Lloadcmp
BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
.addReg(NewReg)
.addReg(AddrReg);
BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
.addReg(StatusReg, getKillRegState(StatusDead))
.addMBB(LoadCmpBB);
StoreBB->addSuccessor(LoadCmpBB);
StoreBB->addSuccessor(DoneBB);
DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
DoneBB->transferSuccessors(&MBB);
MBB.addSuccessor(LoadCmpBB);
NextMBBI = MBB.end();
MI.eraseFromParent();
// Recompute livein lists.
LivePhysRegs LiveRegs;
computeAndAddLiveIns(LiveRegs, *DoneBB);
computeAndAddLiveIns(LiveRegs, *StoreBB);
computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
// Do an extra pass around the loop to get loop carried registers right.
StoreBB->clearLiveIns();
computeAndAddLiveIns(LiveRegs, *StoreBB);
LoadCmpBB->clearLiveIns();
computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
return true;
}
bool AArch64ExpandPseudo::expandCMP_SWAP_128(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
MachineOperand &DestLo = MI.getOperand(0);
MachineOperand &DestHi = MI.getOperand(1);
unsigned StatusReg = MI.getOperand(2).getReg();
bool StatusDead = MI.getOperand(2).isDead();
// Duplicating undef operands into 2 instructions does not guarantee the same
// value on both; However undef should be replaced by xzr anyway.
assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
unsigned AddrReg = MI.getOperand(3).getReg();
unsigned DesiredLoReg = MI.getOperand(4).getReg();
unsigned DesiredHiReg = MI.getOperand(5).getReg();
unsigned NewLoReg = MI.getOperand(6).getReg();
unsigned NewHiReg = MI.getOperand(7).getReg();
MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
MF->insert(++MBB.getIterator(), LoadCmpBB);
MF->insert(++LoadCmpBB->getIterator(), StoreBB);
MF->insert(++StoreBB->getIterator(), DoneBB);
// .Lloadcmp:
// ldaxp xDestLo, xDestHi, [xAddr]
// cmp xDestLo, xDesiredLo
// sbcs xDestHi, xDesiredHi
// b.ne .Ldone
BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
.addReg(DestLo.getReg(), RegState::Define)
.addReg(DestHi.getReg(), RegState::Define)
.addReg(AddrReg);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
.addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
.addReg(DesiredLoReg)
.addImm(0);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
.addUse(AArch64::WZR)
.addUse(AArch64::WZR)
.addImm(AArch64CC::EQ);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
.addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
.addReg(DesiredHiReg)
.addImm(0);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
.addUse(StatusReg, RegState::Kill)
.addUse(StatusReg, RegState::Kill)
.addImm(AArch64CC::EQ);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
.addUse(StatusReg, getKillRegState(StatusDead))
.addMBB(DoneBB);
LoadCmpBB->addSuccessor(DoneBB);
LoadCmpBB->addSuccessor(StoreBB);
// .Lstore:
// stlxp wStatus, xNewLo, xNewHi, [xAddr]
// cbnz wStatus, .Lloadcmp
BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
.addReg(NewLoReg)
.addReg(NewHiReg)
.addReg(AddrReg);
BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
.addReg(StatusReg, getKillRegState(StatusDead))
.addMBB(LoadCmpBB);
StoreBB->addSuccessor(LoadCmpBB);
StoreBB->addSuccessor(DoneBB);
DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
DoneBB->transferSuccessors(&MBB);
MBB.addSuccessor(LoadCmpBB);
NextMBBI = MBB.end();
MI.eraseFromParent();
// Recompute liveness bottom up.
LivePhysRegs LiveRegs;
computeAndAddLiveIns(LiveRegs, *DoneBB);
computeAndAddLiveIns(LiveRegs, *StoreBB);
computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
// Do an extra pass in the loop to get the loop carried dependencies right.
StoreBB->clearLiveIns();
computeAndAddLiveIns(LiveRegs, *StoreBB);
LoadCmpBB->clearLiveIns();
computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
return true;
}
/// If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
default:
break;
case AArch64::ADDWrr:
case AArch64::SUBWrr:
case AArch64::ADDXrr:
case AArch64::SUBXrr:
case AArch64::ADDSWrr:
case AArch64::SUBSWrr:
case AArch64::ADDSXrr:
case AArch64::SUBSXrr:
case AArch64::ANDWrr:
case AArch64::ANDXrr:
case AArch64::BICWrr:
case AArch64::BICXrr:
case AArch64::ANDSWrr:
case AArch64::ANDSXrr:
case AArch64::BICSWrr:
case AArch64::BICSXrr:
case AArch64::EONWrr:
case AArch64::EONXrr:
case AArch64::EORWrr:
case AArch64::EORXrr:
case AArch64::ORNWrr:
case AArch64::ORNXrr:
case AArch64::ORRWrr:
case AArch64::ORRXrr: {
unsigned Opcode;
switch (MI.getOpcode()) {
default:
return false;
case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
}
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
MI.getOperand(0).getReg())
.add(MI.getOperand(1))
.add(MI.getOperand(2))
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
transferImpOps(MI, MIB1, MIB1);
MI.eraseFromParent();
return true;
}
case AArch64::LOADgot: {
MachineFunction *MF = MBB.getParent();
unsigned DstReg = MI.getOperand(0).getReg();
const MachineOperand &MO1 = MI.getOperand(1);
unsigned Flags = MO1.getTargetFlags();
if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
// Tiny codemodel expand to LDR
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(AArch64::LDRXl), DstReg);
if (MO1.isGlobal()) {
MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
} else if (MO1.isSymbol()) {
MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
} else {
assert(MO1.isCPI() &&
"Only expect globals, externalsymbols, or constant pools");
MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
}
} else {
// Small codemodel expand into ADRP + LDR.
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
.add(MI.getOperand(0))
.addReg(DstReg);
if (MO1.isGlobal()) {
MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
MIB2.addGlobalAddress(MO1.getGlobal(), 0,
Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
} else if (MO1.isSymbol()) {
MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
} else {
assert(MO1.isCPI() &&
"Only expect globals, externalsymbols, or constant pools");
MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
Flags | AArch64II::MO_PAGE);
MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
Flags | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
}
transferImpOps(MI, MIB1, MIB2);
}
MI.eraseFromParent();
return true;
}
case AArch64::MOVaddr:
case AArch64::MOVaddrJT:
case AArch64::MOVaddrCP:
case AArch64::MOVaddrBA:
case AArch64::MOVaddrTLS:
case AArch64::MOVaddrEXT: {
// Expand into ADRP + ADD.
unsigned DstReg = MI.getOperand(0).getReg();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
.add(MI.getOperand(1));
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
.add(MI.getOperand(0))
.addReg(DstReg)
.add(MI.getOperand(2))
.addImm(0);
transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
case AArch64::ADDlowTLS:
// Produce a plain ADD
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
.add(MI.getOperand(0))
.add(MI.getOperand(1))
.add(MI.getOperand(2))
.addImm(0);
MI.eraseFromParent();
return true;
case AArch64::MOVbaseTLS: {
unsigned DstReg = MI.getOperand(0).getReg();
auto SysReg = AArch64SysReg::TPIDR_EL0;
MachineFunction *MF = MBB.getParent();
if (MF->getTarget().getTargetTriple().isOSFuchsia() &&
MF->getTarget().getCodeModel() == CodeModel::Kernel)
SysReg = AArch64SysReg::TPIDR_EL1;
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
.addImm(SysReg);
MI.eraseFromParent();
return true;
}
case AArch64::MOVi32imm:
return expandMOVImm(MBB, MBBI, 32);
case AArch64::MOVi64imm:
return expandMOVImm(MBB, MBBI, 64);
case AArch64::RET_ReallyLR: {
// Hiding the LR use with RET_ReallyLR may lead to extra kills in the
// function and missing live-ins. We are fine in practice because callee
// saved register handling ensures the register value is restored before
// RET, but we need the undef flag here to appease the MachineVerifier
// liveness checks.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
.addReg(AArch64::LR, RegState::Undef);
transferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
}
case AArch64::CMP_SWAP_8:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
AArch64::SUBSWrx,
AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_16:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
AArch64::SUBSWrx,
AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_32:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
AArch64::SUBSWrs,
AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_64:
return expandCMP_SWAP(MBB, MBBI,
AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
AArch64::XZR, NextMBBI);
case AArch64::CMP_SWAP_128:
return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
case AArch64::AESMCrrTied:
case AArch64::AESIMCrrTied: {
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
AArch64::AESIMCrr))
.add(MI.getOperand(0))
.add(MI.getOperand(1));
transferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
}
}
return false;
}
/// Iterate over the instructions in basic block MBB and expand any
/// pseudo instructions. Return true if anything was modified.
bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
Modified |= expandMI(MBB, MBBI, NMBBI);
MBBI = NMBBI;
}
return Modified;
}
bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
bool Modified = false;
for (auto &MBB : MF)
Modified |= expandMBB(MBB);
return Modified;
}
/// Returns an instance of the pseudo instruction expansion pass.
FunctionPass *llvm::createAArch64ExpandPseudoPass() {
return new AArch64ExpandPseudo();
}
|
#include "../../include/AllocatorManager.hpp"
#include "ChunkList.hpp"
#include "UnitTest.hpp"
#include <chrono>
#include <iostream>
#include <mutex>
#include <thread>
#include <cassert>
using namespace memwa;
typedef std::unique_lock< std::mutex > MyLockGuard;
typedef std::chrono::duration< unsigned int, std::milli > MillisecondDuration;
typedef std::chrono::milliseconds millis;
static bool showProximityCounts_ = true;
// ----------------------------------------------------------------------------
void SleepForRandomTime()
{
const unsigned int sleepTime = rand() % 6;
const MillisecondDuration d( sleepTime );
const millis milliseconds( d );
std::this_thread::sleep_for( milliseconds );
}
// ----------------------------------------------------------------------------
void SimpleMultithreadedTest( ut::UnitTest * u, Allocator * allocator, unsigned int objectSize, std::mutex & mutex )
{
const unsigned int reserveCount = 1000;
const unsigned int loopCount = 1000;
void * place = nullptr;
bool released = false;
ChunkList chunks( reserveCount );
try
{
for ( unsigned int ii = 0; ii < loopCount; ++ii )
{
place = allocator->Allocate( objectSize );
MyLockGuard lock( mutex );
UNIT_TEST( u, ( place != nullptr ) );
lock.unlock();
chunks.AddChunk( place );
SleepForRandomTime();
}
{
const bool allUnique = chunks.AreUnique();
MyLockGuard lock( mutex );
UNIT_TEST( u, allUnique );
lock.unlock();
}
for ( unsigned int ii = 0; ii < loopCount; ++ii )
{
place = chunks.GetTopChunk();
released = allocator->Release( place, objectSize );
MyLockGuard lock( mutex );
UNIT_TEST( u, released );
lock.unlock();
chunks.RemoveTopChunk();
SleepForRandomTime();
}
UNIT_TEST( u, chunks.GetCount() == 0 );
}
catch ( const std::exception & ex )
{
MyLockGuard lock( mutex );
UNIT_TEST_WITH_MSG( u, false, ex.what() );
}
catch ( ... )
{
MyLockGuard lock( mutex );
UNIT_TEST_WITH_MSG( u, false, "Caught unknown exception." );
}
}
// ----------------------------------------------------------------------------
void SimpleMultithreadedStackTest( ut::UnitTest * u, Allocator * allocator,
const AllocatorManager::AllocatorParameters & allocatorInfo, std::mutex & mutex )
{
const unsigned int loopCount = 1000;
void * place = nullptr;
ChunkList chunks( loopCount );
try
{
const std::size_t bytes = rand() % allocatorInfo.objectSize + allocatorInfo.alignment;
for ( unsigned int ii = 0; ii < loopCount; ++ii )
{
place = allocator->Allocate( bytes );
MyLockGuard lock( mutex );
UNIT_TEST( u, ( place != nullptr ) );
lock.unlock();
chunks.AddChunk( place );
SleepForRandomTime();
}
}
catch ( const std::exception & ex )
{
MyLockGuard lock( mutex );
UNIT_TEST_WITH_MSG( u, false, ex.what() );
}
catch ( ... )
{
MyLockGuard lock( mutex );
UNIT_TEST_WITH_MSG( u, false, "Caught unknown exception." );
}
const bool allUnique = chunks.AreUnique();
MyLockGuard lock( mutex );
UNIT_TEST( u, allUnique );
UNIT_TEST( u, chunks.GetCount() == loopCount );
}
// ----------------------------------------------------------------------------
void RunSimpleThreadTest( ut::UnitTest * u, Allocator * allocator, const AllocatorManager::AllocatorParameters allocatorInfo )
{
std::mutex unitTestMutex_;
const unsigned int threadCount = 16;
std::thread * threads[ threadCount ];
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
threads[ ii ] = new std::thread( SimpleMultithreadedTest, u, allocator, allocatorInfo.objectSize, std::ref( unitTestMutex_ ) );
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
t->join();
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
delete t;
}
}
// ----------------------------------------------------------------------------
void DoSimplePoolThreadTest( bool showProximityCounts )
{
std::cout << "Simple Pool Allocator Thread-Safety Functionality Test" << std::endl;
ut::UnitTestSet & uts = ut::UnitTestSet::GetIt();
ut::UnitTest * u = uts.AddUnitTest( "Pool Thread Test" );
UNIT_TEST_WITH_MSG( u, AllocatorManager::CreateManager( true, 4096 ), "Creation should pass since AllocatorManager does not exist yet." );
AllocatorManager::AllocatorParameters allocatorInfo;
allocatorInfo.type = AllocatorManager::AllocatorType::Pool;
allocatorInfo.objectSize = 16;
allocatorInfo.alignment = 8;
allocatorInfo.blockSize = 2048;
allocatorInfo.initialBlocks = 1;
Allocator * allocator = nullptr;
UNIT_TEST_WITH_MSG( u, ( allocator = AllocatorManager::CreateAllocator( allocatorInfo ) ) != nullptr, "allocator should not be nullptr." );
showProximityCounts_ = showProximityCounts;
RunSimpleThreadTest( u, allocator, allocatorInfo );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyAllocator( allocator, true ), "DestroyAllocator should pass since parameter is valid." );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyManager( true ), "Destruction should pass since AllocatorManager exists." );
}
// ----------------------------------------------------------------------------
void DoSimpleTinyThreadTest( bool showProximityCounts )
{
std::cout << "Simple Tiny Allocator Thread-Safety Functionality Test" << std::endl;
ut::UnitTestSet & uts = ut::UnitTestSet::GetIt();
ut::UnitTest * u = uts.AddUnitTest( "Tiny Thread Test" );
UNIT_TEST_WITH_MSG( u, AllocatorManager::CreateManager( true, 4096 ), "Creation should pass since AllocatorManager does not exist yet." );
AllocatorManager::AllocatorParameters allocatorInfo;
allocatorInfo.type = AllocatorManager::AllocatorType::Tiny;
allocatorInfo.objectSize = 16;
allocatorInfo.alignment = 8;
allocatorInfo.blockSize = 16 * 256;
allocatorInfo.initialBlocks = 1;
Allocator * allocator = nullptr;
UNIT_TEST_WITH_MSG( u, ( allocator = AllocatorManager::CreateAllocator( allocatorInfo ) ) != nullptr, "allocator should not be nullptr." );
showProximityCounts_ = showProximityCounts;
RunSimpleThreadTest( u, allocator, allocatorInfo );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyAllocator( allocator, true ), "DestroyAllocator should pass since parameter is valid." );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyManager( true ), "Destruction should pass since AllocatorManager exists." );
}
// ----------------------------------------------------------------------------
void DoSimpleStackThreadTest( bool showProximityCounts )
{
std::cout << "Simple Stack Allocator Thread-Safety Functionality Test" << std::endl;
ut::UnitTestSet & uts = ut::UnitTestSet::GetIt();
ut::UnitTest * u = uts.AddUnitTest( "Stack Thread Test" );
UNIT_TEST_WITH_MSG( u, AllocatorManager::CreateManager( true, 4096 ), "Creation should pass since AllocatorManager does not exist yet." );
AllocatorManager::AllocatorParameters allocatorInfo;
allocatorInfo.type = AllocatorManager::AllocatorType::Stack;
allocatorInfo.objectSize = 16;
allocatorInfo.alignment = 8;
allocatorInfo.blockSize = 4096;
allocatorInfo.initialBlocks = 1;
Allocator * allocator = nullptr;
UNIT_TEST_WITH_MSG( u, ( allocator = AllocatorManager::CreateAllocator( allocatorInfo ) ) != nullptr, "allocator should not be nullptr." );
showProximityCounts_ = showProximityCounts;
std::mutex unitTestMutex_;
const unsigned int threadCount = 16;
std::thread * threads[ threadCount ];
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
threads[ ii ] = new std::thread( SimpleMultithreadedStackTest, u, allocator, std::ref( allocatorInfo ), std::ref( unitTestMutex_ ) );
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
t->join();
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
delete t;
}
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyAllocator( allocator, true ), "DestroyAllocator should pass since parameter is valid." );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyManager( true ), "Destruction should pass since AllocatorManager exists." );
}
// ----------------------------------------------------------------------------
void DoSimpleLinearThreadTest( bool showProximityCounts )
{
std::cout << "Simple Linear Allocator Thread-Safety Functionality Test" << std::endl;
ut::UnitTestSet & uts = ut::UnitTestSet::GetIt();
ut::UnitTest * u = uts.AddUnitTest( "Linear Thread Test" );
UNIT_TEST_WITH_MSG( u, AllocatorManager::CreateManager( true, 4096 ), "Creation should pass since AllocatorManager does not exist yet." );
AllocatorManager::AllocatorParameters allocatorInfo;
allocatorInfo.type = AllocatorManager::AllocatorType::Linear;
allocatorInfo.objectSize = 16;
allocatorInfo.alignment = 8;
allocatorInfo.blockSize = 4096;
allocatorInfo.initialBlocks = 1;
Allocator * allocator = nullptr;
UNIT_TEST_WITH_MSG( u, ( allocator = AllocatorManager::CreateAllocator( allocatorInfo ) ) != nullptr, "allocator should not be nullptr." );
showProximityCounts_ = showProximityCounts;
std::mutex unitTestMutex_;
const unsigned int threadCount = 16;
std::thread * threads[ threadCount ];
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
threads[ ii ] = new std::thread( SimpleMultithreadedStackTest, u, allocator, std::ref( allocatorInfo ), std::ref( unitTestMutex_ ) );
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
t->join();
}
for ( unsigned int ii = 0; ii < threadCount; ++ii )
{
std::thread * t = threads[ ii ];
delete t;
}
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyAllocator( allocator, true ), "DestroyAllocator should pass since parameter is valid." );
UNIT_TEST_WITH_MSG( u, AllocatorManager::DestroyManager( true ), "Destruction should pass since AllocatorManager exists." );
}
// ----------------------------------------------------------------------------
|
#include <iostream>
#include <span>
#include <stdexcept>
#include <vector>
using uint64 = uint64_t;
using uint32 = uint32_t;
using int64 = int64_t;
using int32 = int32_t;
template <typename T> using Vec = std::vector<T>;
using IntVec = Vec<int>;
using Adjacent = Vec<Vec<bool>>;
using Graph = Vec<Vec<int>>;
class Range {
const int from;
const int to;
public:
class iterator {
public:
using iterator_category = std::input_iterator_tag;
using value_type = const int;
using difference_type = std::ptrdiff_t;
using pointer = const value_type *;
using reference = const value_type &;
private:
const int from;
const int to;
int i;
public:
iterator(const int from, const int to, const int i)
: from(from), to(to), i(i){};
iterator &operator++() {
i = to >= from ? i + 1 : i - 1;
return *this;
}
iterator operator++(int) {
iterator ret = *this;
++(*this);
return ret;
}
bool operator==(const iterator &other) const { return i == other.i; }
bool operator!=(const iterator &other) const { return i != other.i; }
reference operator*() const { return i; }
};
Range(const int to) : from(0), to(to) {}
Range(const int from, const int to) : from(from), to(to) {}
iterator begin() { return iterator(from, to, from); }
iterator end() { return iterator(from, to, to); }
};
class IO {
std::istream &input;
std::ostream &output;
public:
IO();
template <typename T> auto ReadT() const;
template <typename T> T read() const;
template <typename T> void read(T &v) const;
template <typename T> auto WriteT() const;
template <typename T> void write(const T &v) const;
void nl() const;
void sp() const;
};
IO::IO() : input(std::cin), output(std::cout) {
input.tie(nullptr);
output.tie(nullptr);
std::ios_base::sync_with_stdio(false);
}
template <typename T> auto IO::ReadT() const {
return [this]() {
T v;
input >> v;
return v;
};
}
template <typename T> T IO::read() const {
T v;
input >> v;
return v;
}
template <typename T> void IO::read(T &v) const { input >> v; }
template <typename T> auto IO::WriteT() const {
return [this](const T &v) { output << v; };
}
template <typename T> void IO::write(const T &v) const { output << v; }
void IO::nl() const { output << std::endl; }
void IO::sp() const { output << ' '; }
int main() {
const auto io = IO();
const auto s = io.read<std::string>();
const int n = s.size();
auto z = std::vector<int>(n, 0);
auto l = 0;
auto r = 0;
for (const auto i : Range(1, n)) {
z[i] = std::max(0, std::min(r - i, z[i - l]));
while (z[i] + i < n && s[z[i]] == s[i + z[i]]) {
z[i]++;
}
if (z[i] + i > r) {
l = i;
r = i + z[i];
}
}
for (const auto &x : std::span(&z[1], z.size() - 1)) {
io.write(x);
io.sp();
}
io.nl();
}
|
#ifndef PYTHONIC_INCLUDE_BUILTIN_SYSTEMEXIT_HPP
#define PYTHONIC_INCLUDE_BUILTIN_SYSTEMEXIT_HPP
#include "pythonic/include/types/exceptions.hpp"
PYTHONIC_NS_BEGIN
namespace builtins
{
PYTHONIC_EXCEPTION_DECL(SystemExit)
}
PYTHONIC_NS_END
#endif
|
/*
* intrusive_ptr.hpp
*
* Created on: Apr 25, 2019
* Author: bryan.flynt
*/
#ifndef INCLUDE_XSTD_DETAIL_MEMORY_INTRUSIVE_INTRUSIVE_PTR_HPP_
#define INCLUDE_XSTD_DETAIL_MEMORY_INTRUSIVE_INTRUSIVE_PTR_HPP_
#include "xstd/assert.hpp"
#include <cstddef> // std::nullptr_t
#include <functional> // std::less, std::hash
#include <type_traits> // std::remove_extent<T>
namespace xstd {
/// Smart pointer that retains shared ownership through a pointer
/**
* xstd::intrusive_ptr is a smart pointer that retains shared ownership
* of an object through a pointer. Several intrusiv_ptr objects may own
* the same object. The object is destroyed and its memory deallocated
* when either of the following happens:
*
* - the last remaining intrusive_ptr owning the object is destroyed
* - the last remaining intrusive_ptr owning the object is assigned
* another pointer via operator= or reset().
*
* The difference to the std::shared_ptr is the intrusive_ptr version
* attaches the reference counting to the object thereby insuring the
* memory is closer and a slight performance optimization can be made.
* There are two ways to attach the reference counting using the
* supporting class.
*
* The simplest is to inherit from the xstd::intrusive_base class:
* \code
* class Animal : public xstd::intrusive_base<Animal>{
* ...
* };
*
* class Dog : public Animal {
* ...
* };
*
* class Cat : public Animal {
* ...
* };
*
* xstd::intrusive_ptr<Animal> vec[2];
* vec[0] = new Dog();
* vec[1] = new Cat();
* \endcode
*
* The main drawback to this simple method is it requires
* reference counting to be added to the base class. This may not
* be desirable and therefore a second class, xstd::reference_count
* can be used to only add reference counting to the derived classes.
*
* \code *
* class Animal {
* ...
* };
*
* class Dog : public reference_count<Animal> {
* ...
* };
* class Cat : public reference_count<Animal> {
* ...
* };
*
* using reference_counted_base = reference_count<Animal>;
*
* xstd::intrusive_ptr<reference_counted_base> vec[2];
* vec[0] = new Dog();
* vec[1] = new Cat();
* \endcode
*/
template<typename T>
struct intrusive_ptr {
using element_type = typename std::remove_extent<T>::type;
constexpr intrusive_ptr() noexcept : data_ptr_(nullptr){}
intrusive_ptr( T* p, bool add_ref = true ): data_ptr_(p) {
if( (data_ptr_ != nullptr) && add_ref ){
intrusive_ptr_add_ref(data_ptr_);
}
}
template<typename U>
intrusive_ptr(intrusive_ptr<U> const& other): data_ptr_(other.get()){
if(data_ptr_ != nullptr){
intrusive_ptr_add_ref(data_ptr_);
}
}
intrusive_ptr(intrusive_ptr const& rhs): data_ptr_( rhs.data_ptr_ ){
if(data_ptr_ != nullptr) intrusive_ptr_add_ref( data_ptr_ );
}
~intrusive_ptr(){
if(data_ptr_ != nullptr){
intrusive_ptr_release(data_ptr_);
}
}
template<class U>
intrusive_ptr& operator=(intrusive_ptr<U> const& rhs){
this_type(rhs).swap(*this);
return *this;
}
intrusive_ptr(intrusive_ptr&& rhs) noexcept : data_ptr_(rhs.data_ptr_){
rhs.data_ptr_ = nullptr;
}
intrusive_ptr& operator=(intrusive_ptr&& rhs) noexcept{
this_type(static_cast<intrusive_ptr&&>(rhs)).swap(*this);
return *this;
}
template<class U> friend class intrusive_ptr;
template<class U>
intrusive_ptr(intrusive_ptr<U>&& rhs) noexcept : data_ptr_(rhs.data_ptr_){
rhs.data_ptr_ = nullptr;
}
template<class U>
intrusive_ptr& operator=(intrusive_ptr<U>&& rhs) noexcept{
this_type(static_cast<intrusive_ptr<U>&&>(rhs)).swap(*this);
return *this;
}
intrusive_ptr& operator=(intrusive_ptr const& rhs){
this_type(rhs).swap(*this);
return *this;
}
intrusive_ptr& operator=(T* rhs){
this_type(rhs).swap(*this);
return *this;
}
void reset(){
this_type().swap(*this);
}
void reset(T* rhs){
this_type(rhs).swap(*this);
}
void reset(T* rhs, bool add_ref){
this_type(rhs,add_ref).swap(*this);
}
T* get() const noexcept{
return data_ptr_;
}
T* detach() noexcept{
T* ret = data_ptr_;
data_ptr_ = nullptr;
return ret;
}
T& operator*() const noexcept{
ASSERT( data_ptr_ != nullptr );
return *data_ptr_;
}
T* operator->() const noexcept{
ASSERT( data_ptr_ != nullptr );
return data_ptr_;
}
void swap(intrusive_ptr& rhs) noexcept{
T* tmp = data_ptr_;
data_ptr_ = rhs.data_ptr_;
rhs.data_ptr_ = tmp;
}
/// Implicit conversion to bool
operator bool() const{
return (data_ptr_ != nullptr);
}
std::size_t use_count() const noexcept {
return (data_ptr_) ? data_ptr_->use_count() : 0;
}
private:
using this_type = intrusive_ptr;
T* data_ptr_;
};
template<class T, class U> inline bool operator==(intrusive_ptr<T> const& a, intrusive_ptr<U> const& b) noexcept{
return a.get() == b.get();
}
template<class T, class U> inline bool operator!=(intrusive_ptr<T> const& a, intrusive_ptr<U> const& b) noexcept{
return a.get() != b.get();
}
template<class T, class U> inline bool operator==(intrusive_ptr<T> const& a, U* b) noexcept{
return a.get() == b;
}
template<class T, class U> inline bool operator!=(intrusive_ptr<T> const& a, U* b) noexcept{
return a.get() != b;
}
template<class T, class U> inline bool operator==(T* a, intrusive_ptr<U> const& b) noexcept{
return a == b.get();
}
template<class T, class U> inline bool operator!=(T* a, intrusive_ptr<U> const& b) noexcept{
return a != b.get();
}
template<class T> inline bool operator==(intrusive_ptr<T> const& p, std::nullptr_t) noexcept{
return p.get() == nullptr;
}
template<class T> inline bool operator==(std::nullptr_t, intrusive_ptr<T> const& p) noexcept{
return p.get() == nullptr;
}
template<class T> inline bool operator!=(intrusive_ptr<T> const& p, std::nullptr_t) noexcept{
return p.get() != nullptr;
}
template<class T> inline bool operator!=(std::nullptr_t, intrusive_ptr<T> const& p) noexcept{
return p.get() != nullptr;
}
template<class T> inline bool operator<(intrusive_ptr<T> const& a, intrusive_ptr<T> const& b) noexcept{
return std::less<T*>()(a.get(), b.get());
}
template<class T> void swap(intrusive_ptr<T>& lhs, intrusive_ptr<T>& rhs) noexcept{
lhs.swap(rhs);
}
template<class T> T* get_pointer(intrusive_ptr<T> const& p) noexcept{
return p.get();
}
template<class T, class U> intrusive_ptr<T> static_pointer_cast(intrusive_ptr<U> const& p){
return static_cast<T*>(p.get());
}
template<class T, class U> intrusive_ptr<T> const_pointer_cast(intrusive_ptr<U> const& p){
return const_cast<T*>(p.get());
}
template<class T, class U> intrusive_ptr<T> dynamic_pointer_cast(intrusive_ptr<U> const& p){
return dynamic_cast<T*>(p.get());
}
template<class Y> std::ostream & operator<<(std::ostream & os, intrusive_ptr<Y> const& p){
os << p.get();
return os;
}
} /* namespace xstd */
namespace std{
template<typename T>
struct hash<::xstd::intrusive_ptr<T>>{
using argument_type = ::xstd::intrusive_ptr<T>;
using result_type = std::size_t;
using key_type = typename argument_type::element_type*;
result_type operator()(argument_type const& ptr) const noexcept{
return std::hash<key_type>(ptr->get());
}
};
}
#endif /* INCLUDE_XSTD_DETAIL_MEMORY_INTRUSIVE_INTRUSIVE_PTR_HPP_ */
|
/* $Id: types.hpp 4742 2014-02-12 05:18:31Z bennylp $ */
/*
* Copyright (C) 2013 Teluu Inc. (http://www.teluu.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __PJSUA2_TYPES_HPP__
#define __PJSUA2_TYPES_HPP__
#ifdef _MSC_VER
# pragma warning( disable : 4290 ) // exception spec ignored
# pragma warning( disable : 4512 ) // can't generate assignment op
#endif
/**
* @file pjsua2/types.hpp
* @brief PJSUA2 Base Types
*/
#include <pjsua2/config.hpp>
#include <string>
#include <vector>
/** PJSUA2 API is inside pj namespace */
namespace pj
{
/**
* @defgroup PJSUA2_TYPES General Data Structure
* @ingroup PJSUA2_DS
* @{
*/
using std::string;
using std::vector;
/** Array of strings */
typedef std::vector<std::string> StringVector;
/** Array of integers */
typedef std::vector<int> IntVector;
/**
* Type of token, i.e. arbitrary application user data
*/
typedef void *Token;
/**
* Socket address, encoded as string. The socket address contains host
* and port number in "host[:port]" format. The host part may contain
* hostname, domain name, IPv4 or IPv6 address. For IPv6 address, the
* address will be enclosed with square brackets, e.g. "[::1]:5060".
*/
typedef string SocketAddress;
/**
* Transport ID is an integer.
*/
typedef int TransportId;
/**
* Transport handle, corresponds to pjsip_transport instance.
*/
typedef void *TransportHandle;
/**
* Timer entry, corresponds to pj_timer_entry
*/
typedef void *TimerEntry;
/**
* Generic data
*/
typedef void *GenericData;
/*
* Forward declaration of Account and Call to be used
* by Endpoint.
*/
class Account;
class Call;
/**
* Constants
*/
enum
{
/** Invalid ID, equal to PJSUA_INVALID_ID */
INVALID_ID = -1,
/** Success, equal to PJ_SUCCESS */
SUCCESS = 0
};
//////////////////////////////////////////////////////////////////////////////
/**
* This structure contains information about an error that is thrown
* as an exception.
*/
struct Error
{
/** The error code. */
pj_status_t status;
/** The PJSUA API operation that throws the error. */
string title;
/** The error message */
string reason;
/** The PJSUA source file that throws the error */
string srcFile;
/** The line number of PJSUA source file that throws the error */
int srcLine;
/** Build error string. */
string info(bool multi_line=false) const;
/** Default constructor */
Error();
/**
* Construct an Error instance from the specified parameters. If
* \a prm_reason is empty, it will be filled with the error description
* for the status code.
*/
Error(pj_status_t prm_status,
const string &prm_title,
const string &prm_reason,
const string &prm_src_file,
int prm_src_line);
};
/*
* Error utilities.
*/
#if PJSUA2_ERROR_HAS_EXTRA_INFO
# define PJSUA2_RAISE_ERROR(status) \
PJSUA2_RAISE_ERROR2(status, __FUNCTION__)
# define PJSUA2_RAISE_ERROR2(status,op) \
PJSUA2_RAISE_ERROR3(status, op, string())
# define PJSUA2_RAISE_ERROR3(status,op,txt) \
do { \
Error err_ = Error(status, op, txt, __FILE__, __LINE__); \
PJ_LOG(1,(THIS_FILE, "%s", err_.info().c_str())); \
throw err_; \
} while (0)
#else
/** Raise Error exception */
# define PJSUA2_RAISE_ERROR(status) \
PJSUA2_RAISE_ERROR2(status, string())
/** Raise Error exception */
# define PJSUA2_RAISE_ERROR2(status,op) \
PJSUA2_RAISE_ERROR3(status, op, string())
/** Raise Error exception */
# define PJSUA2_RAISE_ERROR3(status,op,txt) \
do { \
Error err_ = Error(status, op, txt, string(), 0); \
PJ_LOG(1,(THIS_FILE, "%s", err_.info().c_str())); \
throw err_; \
} while (0)
#endif
/** Raise Error exception if the expression fails */
#define PJSUA2_CHECK_RAISE_ERROR2(status, op) \
do { \
if (status != PJ_SUCCESS) { \
PJSUA2_RAISE_ERROR2(status, op); \
} \
} while (0)
/** Raise Error exception if the status fails */
#define PJSUA2_CHECK_RAISE_ERROR(status) \
PJSUA2_CHECK_RAISE_ERROR2(status, "")
/** Raise Error exception if the expression fails */
#define PJSUA2_CHECK_EXPR(expr) \
do { \
pj_status_t the_status = expr; \
PJSUA2_CHECK_RAISE_ERROR2(the_status, #expr); \
} while (0)
//////////////////////////////////////////////////////////////////////////////
/**
* Version information.
*/
struct Version
{
/** Major number */
int major;
/** Minor number */
int minor;
/** Additional revision number */
int rev;
/** Version suffix (e.g. "-svn") */
string suffix;
/** The full version info (e.g. "2.1.0-svn") */
string full;
/**
* PJLIB version number as three bytes with the following format:
* 0xMMIIRR00, where MM: major number, II: minor number, RR: revision
* number, 00: always zero for now.
*/
unsigned numeric;
};
//////////////////////////////////////////////////////////////////////////////
/**
* Representation of time value.
*/
struct TimeVal
{
/**
* The seconds part of the time.
*/
long sec;
/**
* The miliseconds fraction of the time.
*/
long msec;
public:
/**
* Convert from pjsip
*/
void fromPj(const pj_time_val &prm);
};
/**
* @} PJSUA2
*/
} // namespace pj
#endif /* __PJSUA2_TYPES_HPP__ */
|
#include <Display.h>
#include <Utils.h>
#include <SDL.h>
#include <Surface.h>
#include <ExternalInterface.h>
#include <KeyCodes.h>
#include <map>
#ifdef BLACKBERRY
#include <SDL_syswm.h>
#include <bps/sensor.h>
#include <bps/virtualkeyboard.h>
#endif
#ifdef WEBOS
#include "PDL.h"
#include <syslog.h>
#endif
#ifdef RASPBERRYPI
#include <SDL_syswm.h>
#include "../opengl/Egl.h"
#endif
#ifdef EMSCRIPTEN
#include <emscripten.h>
#endif
#ifdef NME_MIXER
#include <SDL_mixer.h>
#endif
#ifdef SDL_IMAGE
#include <SDL_image.h>
#endif
#ifdef HX_WINDOWS
#include <windows.h>
#include <SDL_syswm.h>
HICON icon;
struct zWMcursor { void* curs; };
HWND hwnd;
SDL_SysWMinfo wminfo;
void init_win32()
{
SDL_Cursor *cursor = SDL_GetCursor();
HINSTANCE handle = GetModuleHandle(NULL);
//((struct zWMcursor *)cursor->wm_cursor)->curs = (void *)LoadCursorA(NULL, IDC_ARROW);
((struct zWMcursor *)cursor->wm_cursor)->curs = (void *)LoadCursor(NULL, IDC_ARROW);
SDL_SetCursor(cursor);
icon = LoadIcon(handle, (char *)101);
SDL_GetWMInfo(&wminfo);
hwnd = wminfo.window;
SetClassLong(hwnd, GCL_HICON, (LONG)icon);
SDL_putenv("SDL_VIDEO_WINDOW_POS=center");
SDL_putenv("SDL_VIDEO_CENTERED=center");
}
void done_win32()
{
DestroyIcon(icon);
}
#endif
namespace nme
{
static int sgDesktopWidth = 0;
static int sgDesktopHeight = 0;
static bool sgInitCalled = false;
static bool sgJoystickEnabled = false;
static int sgShaderFlags = 0;
static bool sgIsOGL2 = false;
enum { NO_TOUCH = -1 };
//To guard against multiple calls
int initSDL () {
if (sgInitCalled)
return 0;
sgInitCalled = true;
#ifdef WEBOS
if (PDL_GetPDKVersion () >= 100)
PDL_Init(0);
#endif
int err = SDL_Init (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
#if HX_WINDOWS
init_win32();
#endif
if (err == 0 && SDL_InitSubSystem (SDL_INIT_JOYSTICK) == 0) {
sgJoystickEnabled = true;
}
#ifdef BLACKBERRY
if (err == 0) {
SDL_EventState(SDL_SYSWMEVENT, SDL_ENABLE);
}
#endif
return err;
}
class SDLSurf : public Surface
{
public:
SDLSurf(SDL_Surface *inSurf,bool inDelete) : mSurf(inSurf)
{
mDelete = inDelete;
mLockedForHitTest = false;
}
~SDLSurf()
{
if (mDelete)
SDL_FreeSurface(mSurf);
}
int Width() const { return mSurf->w; }
int Height() const { return mSurf->h; }
PixelFormat Format() const
{
#ifdef EMSCRIPTEN
uint8 swap = 0;
#else
uint8 swap = mSurf->format->Bshift; // is 0 on argb
#endif
if (mSurf->flags & SDL_SRCALPHA)
return swap ? pfARGBSwap : pfARGB;
return swap ? pfXRGBSwap : pfXRGB;
}
const uint8 *GetBase() const { return (const uint8 *)mSurf->pixels; }
int GetStride() const { return mSurf->pitch; }
void Clear(uint32 inColour,const Rect *inRect)
{
SDL_Rect r;
SDL_Rect *rect_ptr = 0;
if (inRect)
{
rect_ptr = &r;
r.x = inRect->x;
r.y = inRect->y;
r.w = inRect->w;
r.h = inRect->h;
}
SDL_FillRect(mSurf,rect_ptr,SDL_MapRGBA(mSurf->format,
inColour>>16, inColour>>8, inColour, inColour>>24 ) );
}
RenderTarget BeginRender(const Rect &inRect,bool inForHitTest)
{
mLockedForHitTest = inForHitTest;
if (SDL_MUSTLOCK(mSurf) && !mLockedForHitTest)
SDL_LockSurface(mSurf);
return RenderTarget(Rect(Width(),Height()), Format(),
(uint8 *)mSurf->pixels, mSurf->pitch);
}
void EndRender()
{
if (SDL_MUSTLOCK(mSurf) && !mLockedForHitTest)
SDL_UnlockSurface(mSurf);
}
void BlitTo(const RenderTarget &outTarget,
const Rect &inSrcRect,int inPosX, int inPosY,
BlendMode inBlend, const BitmapCache *inMask,
uint32 inTint=0xffffff ) const
{
}
void BlitChannel(const RenderTarget &outTarget, const Rect &inSrcRect,
int inPosX, int inPosY,
int inSrcChannel, int inDestChannel ) const
{
}
void StretchTo(const RenderTarget &outTarget,
const Rect &inSrcRect, const DRect &inDestRect) const
{
}
SDL_Surface *mSurf;
bool mDelete;
bool mLockedForHitTest;
};
SDL_Surface *SurfaceToSDL(Surface *inSurface)
{
int swap = (gC0IsRed!=(bool)(inSurface->Format()&pfSwapRB)) ? 0xff00ff : 0;
return SDL_CreateRGBSurfaceFrom((void *)inSurface->Row(0),
inSurface->Width(), inSurface->Height(),
32, inSurface->Width()*4,
0x00ff0000^swap, 0x0000ff00,
0x000000ff^swap, 0xff000000 );
}
#ifdef SDL_IMAGE
Surface *Surface::Load(const OSChar *inFilename)
{
#ifdef HX_WINDOWS
char *filename = new char [wcslen(inFilename) + 1];
wcstombs(filename, inFilename, wcslen(inFilename));
SDL_Surface *img = IMG_Load(filename);
#else
SDL_Surface *img = IMG_Load(inFilename);
#endif
if (img != NULL)
{
Surface *result = new SDLSurf(img, true);
result->IncRef();
return result;
}
return 0;
}
Surface *Surface::LoadFromBytes(const uint8 *inBytes,int inLen)
{
return 0;
}
bool Surface::Encode( ByteArray *outBytes,bool inPNG,double inQuality)
{
return 0;
}
#endif
SDL_Cursor *CreateCursor(const char *image[],int inHotX,int inHotY)
{
int i, row, col;
Uint8 data[4*32];
Uint8 mask[4*32];
i = -1;
for ( row=0; row<32; ++row ) {
for ( col=0; col<32; ++col ) {
if ( col % 8 ) {
data[i] <<= 1;
mask[i] <<= 1;
} else {
++i;
data[i] = mask[i] = 0;
}
switch (image[row][col]) {
case 'X':
data[i] |= 0x01;
mask[i] |= 0x01;
break;
case '.':
mask[i] |= 0x01;
break;
case ' ':
break;
}
}
}
return SDL_CreateCursor(data, mask, 32, 32, inHotX, inHotY);
}
SDL_Cursor *sDefaultCursor = 0;
SDL_Cursor *sTextCursor = 0;
SDL_Cursor *sHandCursor = 0;
class SDLStage : public Stage
{
public:
SDLStage(SDL_Surface *inSurface,uint32 inFlags,bool inIsOpenGL,
int inWidth, int inHeight)
{
mWidth = inWidth;
mHeight = inHeight;
mIsOpenGL = inIsOpenGL;
mSDLSurface = inSurface;
mFlags = inFlags;
mShowCursor = true;
mLockCursor = false;
mCurrentCursor = curPointer;
mIsFullscreen = (mFlags & SDL_FULLSCREEN);
if (mIsFullscreen)
displayState = sdsFullscreenInteractive;
if (mIsOpenGL)
{
mOpenGLContext = HardwareContext::CreateOpenGL(0, 0, sgIsOGL2);
mOpenGLContext->IncRef();
mOpenGLContext->SetWindowSize(inSurface->w, inSurface->h);
mPrimarySurface = new HardwareSurface(mOpenGLContext);
}
else
{
mOpenGLContext = 0;
mPrimarySurface = new SDLSurf(inSurface,inIsOpenGL);
}
mPrimarySurface->IncRef();
#if defined(WEBOS) || defined(BLACKBERRY)
mMultiTouch = true;
#else
mMultiTouch = false;
#endif
mSingleTouchID = NO_TOUCH;
mDX = 0;
mDY = 0;
// Click detection
mDownX = 0;
mDownY = 0;
}
~SDLStage()
{
if (!mIsOpenGL)
SDL_FreeSurface(mSDLSurface);
else
mOpenGLContext->DecRef();
mPrimarySurface->DecRef();
}
void Resize(int inWidth,int inHeight)
{
#ifdef HX_WINDOWS
if (mIsOpenGL)
{
// Little hack to help windows
mSDLSurface->w = inWidth;
mSDLSurface->h = inHeight;
mOpenGLContext->SetWindowSize(inWidth,inHeight);
}
else
#endif
{
// Calling this recreates the gl context and we loose all our textures and
// display lists. So Work around it.
gTextureContextVersion++;
mSDLSurface = SDL_SetVideoMode(inWidth, inHeight, 32, mFlags);
if (mIsOpenGL)
{
#ifdef RASPBERRYPI
SDL_SysWMinfo sysInfo;
SDL_VERSION(&sysInfo.version);
if(SDL_GetWMInfo(&sysInfo)>0)
{
void *window = (void *)(size_t)sysInfo.info.x11.window;
nmeEGLResize(window, inWidth, inHeight);
}
#endif
//nme_resize_id ++;
mOpenGLContext->DecRef();
mOpenGLContext = HardwareContext::CreateOpenGL(0, 0, sgIsOGL2);
mOpenGLContext->SetWindowSize(inWidth, inHeight);
mOpenGLContext->IncRef();
mPrimarySurface->DecRef();
mPrimarySurface = new HardwareSurface(mOpenGLContext);
}
else
{
mPrimarySurface->DecRef();
mPrimarySurface = new SDLSurf(mSDLSurface,mIsOpenGL);
}
mPrimarySurface->IncRef();
}
}
void SetFullscreen(bool inFullscreen)
{
#if RASPBERRYPI
if (mIsOpenGL)
return;
#endif
if (inFullscreen != mIsFullscreen)
{
mIsFullscreen = inFullscreen;
//printf("SetFullscreen %d\n",inFullscreen);
// Calling this recreates the gl context and we loose all our textures and
// display lists. So Work around it.
gTextureContextVersion++;
int w = mIsFullscreen ? sgDesktopWidth : mWidth;
int h = mIsFullscreen ? sgDesktopHeight : mHeight;
if (mIsFullscreen)
mFlags |= SDL_FULLSCREEN;
else
mFlags &= ~SDL_FULLSCREEN;
//printf("Set %dx%d %d\n", w,h,mFlags & SDL_FULLSCREEN);
mSDLSurface = SDL_SetVideoMode(w, h, 32, mFlags);
if (!mSDLSurface && (mFlags & SDL_FULLSCREEN) )
{
// printf("Failed to set fullscreen, returning to windowed....\n");
mSDLSurface = SDL_SetVideoMode(w, h, 32, (mFlags & ~SDL_FULLSCREEN) );
}
w = mSDLSurface->w;
h = mSDLSurface->h;
if (mIsOpenGL)
{
//nme_resize_id ++;
mOpenGLContext->DecRef();
mOpenGLContext = HardwareContext::CreateOpenGL(0, 0, sgIsOGL2);
mOpenGLContext->SetWindowSize(w, h);
mOpenGLContext->IncRef();
mPrimarySurface->DecRef();
mPrimarySurface = new HardwareSurface(mOpenGLContext);
}
else
{
mPrimarySurface->DecRef();
mPrimarySurface = new SDLSurf(mSDLSurface,mIsOpenGL);
}
mPrimarySurface->IncRef();
Event resize(etResize,w,h);
ProcessEvent(resize);
}
}
bool isOpenGL() const { return mOpenGLContext; }
void ProcessEvent(Event &inEvent)
{
#ifdef HX_MACOS
if (inEvent.type == etKeyUp && (inEvent.flags & efCommandDown))
{
switch (inEvent.code)
{
case SDLK_q:
case SDLK_w:
inEvent.type = etQuit;
break;
case SDLK_m:
SDL_WM_IconifyWindow();
return;
}
}
#endif
#if defined(HX_WINDOWS) || defined(HX_LINUX)
if (inEvent.type == etKeyUp && (inEvent.flags & efAltDown) && inEvent.value == keyF4)
{
inEvent.type = etQuit;
}
#endif
#if defined(WEBOS) || defined(BLACKBERRY)
if (inEvent.type == etMouseMove || inEvent.type == etMouseDown || inEvent.type == etMouseUp) {
if (mSingleTouchID == NO_TOUCH || inEvent.value == mSingleTouchID || !mMultiTouch)
inEvent.flags |= efPrimaryTouch;
if (mMultiTouch) {
switch(inEvent.type)
{
case etMouseDown: inEvent.type = etTouchBegin; break;
case etMouseUp: inEvent.type = etTouchEnd; break;
case etMouseMove: inEvent.type = etTouchMove; break;
}
if (inEvent.type == etTouchBegin) {
mDownX = inEvent.x;
mDownY = inEvent.y;
}
if (inEvent.type == etTouchEnd) {
if (mSingleTouchID==inEvent.value)
mSingleTouchID = NO_TOUCH;
}
}
}
#endif
HandleEvent(inEvent);
}
void Flip()
{
if (mIsOpenGL)
{
#ifdef RASPBERRYPI
nmeEGLSwapBuffers();
#else
SDL_GL_SwapBuffers();
#endif
}
else
{
SDL_Flip( mSDLSurface );
}
}
void GetMouse()
{
}
void SetCursor(Cursor inCursor)
{
#if defined(WEBOS) || defined(BLACKBERRY) || defined(EMSCRIPTEN)
SDL_ShowCursor(false);
return;
#endif
if (sDefaultCursor==0)
sDefaultCursor = SDL_GetCursor();
mCurrentCursor = inCursor;
if (inCursor==curNone || !mShowCursor)
SDL_ShowCursor(false);
else
{
SDL_ShowCursor(true);
if (inCursor==curPointer)
SDL_SetCursor(sDefaultCursor);
else if (inCursor==curHand)
{
if (!sHandCursor)
sHandCursor = CreateCursor(sHandCursorData,13,1);
SDL_SetCursor(sHandCursor);
}
else
{
// TODO: Rotated
if (sTextCursor==0)
sTextCursor = CreateCursor(sTextCursorData,2,13);
SDL_SetCursor(sTextCursor);
}
}
}
void ShowCursor(bool inShow)
{
if (inShow!=mShowCursor)
{
mShowCursor = inShow;
SetCursor( mCurrentCursor );
}
}
void ConstrainCursorToWindowFrame(bool inLock) {
if (inLock != mLockCursor) {
mLockCursor = inLock;
SDL_WM_GrabInput( inLock ? SDL_GRAB_ON : SDL_GRAB_OFF );
}
}
//Note that this fires a mouse event, see the SDL_WarpMouse docs
void SetCursorPositionInWindow(int inX, int inY) {
SDL_WarpMouse( inX, inY );
}
void EnablePopupKeyboard (bool enabled) {
#ifdef WEBOS
if (PDL_GetPDKVersion () >= 300) {
if (enabled) {
PDL_SetKeyboardState (PDL_TRUE);
} else {
PDL_SetKeyboardState (PDL_FALSE);
}
}
#endif
#ifdef BLACKBERRY
if (enabled) {
virtualkeyboard_show();
} else {
virtualkeyboard_hide();
}
#endif
}
bool getMultitouchSupported() {
#if defined(WEBOS) || defined(BLACKBERRY)
return true;
#else
return false;
#endif
}
void setMultitouchActive(bool inActive) { mMultiTouch = inActive; }
bool getMultitouchActive() {
#if defined(WEBOS) || defined(BLACKBERRY)
return mMultiTouch;
#else
return false;
#endif
}
bool mMultiTouch;
int mSingleTouchID;
double mDX;
double mDY;
double mDownX;
double mDownY;
Surface *GetPrimarySurface()
{
return mPrimarySurface;
}
HardwareContext *mOpenGLContext;
SDL_Surface *mSDLSurface;
Surface *mPrimarySurface;
double mFrameRate;
bool mIsOpenGL;
Cursor mCurrentCursor;
bool mShowCursor;
bool mLockCursor;
bool mIsFullscreen;
unsigned int mFlags;
int mWidth;
int mHeight;
};
class SDLFrame : public Frame
{
public:
SDLFrame(SDL_Surface *inSurface, uint32 inFlags, bool inIsOpenGL,int inW,int inH)
{
mFlags = inFlags;
mIsOpenGL = inIsOpenGL;
mStage = new SDLStage(inSurface,mFlags,inIsOpenGL,inW,inH);
mStage->IncRef();
// SetTimer(mHandle,timerFrame, 10,0);
}
~SDLFrame()
{
mStage->DecRef();
}
void ProcessEvent(Event &inEvent)
{
mStage->ProcessEvent(inEvent);
}
void Resize(int inWidth,int inHeight)
{
mStage->Resize(inWidth,inHeight);
}
// --- Frame Interface ----------------------------------------------------
void SetTitle()
{
}
void SetIcon()
{
}
Stage *GetStage()
{
return mStage;
}
SDLStage *mStage;
bool mIsOpenGL;
uint32 mFlags;
double mAccX;
double mAccY;
double mAccZ;
};
// --- When using the simple window class -----------------------------------------------
extern "C" void MacBoot( /*void (*)()*/ );
SDLFrame *sgSDLFrame = 0;
#ifndef EMSCRIPTEN
SDL_Joystick *sgJoystick = 0;
#endif
void AddModStates(int &ioFlags,int inState = -1)
{
int state = inState==-1 ? SDL_GetModState() : inState;
if (state & KMOD_SHIFT) ioFlags |= efShiftDown;
if (state & KMOD_CTRL) ioFlags |= efCtrlDown;
if (state & KMOD_ALT) ioFlags |= efAltDown;
if (state & KMOD_META) ioFlags |= efCommandDown;
int m = SDL_GetMouseState(0,0);
if ( m & SDL_BUTTON(1) ) ioFlags |= efLeftDown;
if ( m & SDL_BUTTON(2) ) ioFlags |= efMiddleDown;
if ( m & SDL_BUTTON(3) ) ioFlags |= efRightDown;
ioFlags |= efPrimaryTouch;
ioFlags |= efNoNativeClick;
}
#define SDL_TRANS(x) case SDLK_##x: return key##x;
int SDLKeyToFlash(int inKey,bool &outRight)
{
outRight = (inKey==SDLK_RSHIFT || inKey==SDLK_RCTRL ||
inKey==SDLK_RALT || inKey==SDLK_RMETA || inKey==SDLK_RSUPER);
if (inKey>=keyA && inKey<=keyZ)
return inKey;
if (inKey>=SDLK_0 && inKey<=SDLK_9)
return inKey - SDLK_0 + keyNUMBER_0;
if (inKey>=SDLK_KP0 && inKey<=SDLK_KP9)
return inKey - SDLK_KP0 + keyNUMPAD_0;
if (inKey>=SDLK_F1 && inKey<=SDLK_F15)
return inKey - SDLK_F1 + keyF1;
switch(inKey)
{
case SDLK_RALT:
case SDLK_LALT:
return keyALTERNATE;
case SDLK_RSHIFT:
case SDLK_LSHIFT:
return keySHIFT;
case SDLK_RCTRL:
case SDLK_LCTRL:
return keyCONTROL;
case SDLK_LMETA:
case SDLK_RMETA:
return keyCOMMAND;
case SDLK_CAPSLOCK: return keyCAPS_LOCK;
case SDLK_PAGEDOWN: return keyPAGE_DOWN;
case SDLK_PAGEUP: return keyPAGE_UP;
case SDLK_EQUALS: return keyEQUAL;
case SDLK_RETURN:
case SDLK_KP_ENTER:
return keyENTER;
SDL_TRANS(BACKQUOTE)
SDL_TRANS(BACKSLASH)
SDL_TRANS(BACKSPACE)
SDL_TRANS(COMMA)
SDL_TRANS(DELETE)
SDL_TRANS(DOWN)
SDL_TRANS(END)
SDL_TRANS(ESCAPE)
SDL_TRANS(HOME)
SDL_TRANS(INSERT)
SDL_TRANS(LEFT)
SDL_TRANS(LEFTBRACKET)
SDL_TRANS(MINUS)
SDL_TRANS(PERIOD)
SDL_TRANS(QUOTE)
SDL_TRANS(RIGHT)
SDL_TRANS(RIGHTBRACKET)
SDL_TRANS(SEMICOLON)
SDL_TRANS(SLASH)
SDL_TRANS(SPACE)
SDL_TRANS(TAB)
SDL_TRANS(UP)
}
return inKey;
}
std::map<int,wchar_t> sLastUnicode;
void ProcessEvent(SDL_Event &inEvent)
{
switch(inEvent.type)
{
case SDL_QUIT:
{
Event close(etQuit);
sgSDLFrame->ProcessEvent(close);
break;
}
case SDL_ACTIVEEVENT:
{
if (inEvent.active.state & SDL_APPINPUTFOCUS)
{
Event activate( inEvent.active.gain ? etGotInputFocus : etLostInputFocus );
sgSDLFrame->ProcessEvent(activate);
}
if (inEvent.active.state & SDL_APPACTIVE)
{
Event activate( inEvent.active.gain ? etActivate : etDeactivate );
sgSDLFrame->ProcessEvent(activate);
}
break;
}
case SDL_MOUSEMOTION:
{
Event mouse(etMouseMove,inEvent.motion.x,inEvent.motion.y);
#if defined(WEBOS) || defined(BLACKBERRY)
mouse.value = inEvent.motion.which;
mouse.flags |= efLeftDown;
#else
AddModStates(mouse.flags);
#endif
sgSDLFrame->ProcessEvent(mouse);
break;
}
case SDL_MOUSEBUTTONDOWN:
{
Event mouse(etMouseDown,inEvent.button.x,inEvent.button.y,inEvent.button.button-1);
#if defined(WEBOS) || defined(BLACKBERRY)
mouse.value = inEvent.motion.which;
mouse.flags |= efLeftDown;
#else
AddModStates(mouse.flags);
#endif
sgSDLFrame->ProcessEvent(mouse);
break;
}
case SDL_MOUSEBUTTONUP:
{
Event mouse(etMouseUp,inEvent.button.x,inEvent.button.y,inEvent.button.button-1);
#if defined(WEBOS) || defined(BLACKBERRY)
mouse.value = inEvent.motion.which;
#else
AddModStates(mouse.flags);
#endif
sgSDLFrame->ProcessEvent(mouse);
break;
}
case SDL_KEYDOWN:
case SDL_KEYUP:
{
Event key(inEvent.type==SDL_KEYDOWN ? etKeyDown : etKeyUp );
bool right;
key.value = SDLKeyToFlash(inEvent.key.keysym.sym,right);
if (inEvent.type==SDL_KEYDOWN)
{
key.code = key.value==keyBACKSPACE ? keyBACKSPACE : inEvent.key.keysym.unicode;
sLastUnicode[inEvent.key.keysym.scancode] = key.code;
}
else
// SDL does not provide unicode on key up, so remember it,
// keyed by scancode
key.code = sLastUnicode[inEvent.key.keysym.scancode];
AddModStates(key.flags,inEvent.key.keysym.mod);
if (right)
key.flags |= efLocationRight;
sgSDLFrame->ProcessEvent(key);
break;
}
case SDL_VIDEOEXPOSE:
{
Event poll(etPoll);
sgSDLFrame->ProcessEvent(poll);
break;
}
case SDL_VIDEORESIZE:
{
Event resize(etResize,inEvent.resize.w,inEvent.resize.h);
sgSDLFrame->Resize(inEvent.resize.w,inEvent.resize.h);
sgSDLFrame->ProcessEvent(resize);
break;
}
case SDL_JOYAXISMOTION:
{
Event joystick(etJoyAxisMove);
joystick.id = inEvent.jaxis.which;
joystick.code = inEvent.jaxis.axis;
joystick.value = inEvent.jaxis.value;
sgSDLFrame->ProcessEvent(joystick);
break;
}
case SDL_JOYBALLMOTION:
{
Event joystick(etJoyBallMove, inEvent.jball.xrel, inEvent.jball.yrel);
joystick.id = inEvent.jball.which;
joystick.code = inEvent.jball.ball;
sgSDLFrame->ProcessEvent(joystick);
break;
}
case SDL_JOYBUTTONDOWN:
{
Event joystick(etJoyButtonDown);
joystick.id = inEvent.jbutton.which;
joystick.code = inEvent.jbutton.button;
sgSDLFrame->ProcessEvent(joystick);
break;
}
case SDL_JOYBUTTONUP:
{
Event joystick(etJoyButtonUp);
joystick.id = inEvent.jbutton.which;
joystick.code = inEvent.jbutton.button;
sgSDLFrame->ProcessEvent(joystick);
break;
}
case SDL_JOYHATMOTION:
{
Event joystick(etJoyHatMove);
joystick.id = inEvent.jhat.which;
joystick.code = inEvent.jhat.hat;
joystick.value = inEvent.jhat.value;
sgSDLFrame->ProcessEvent(joystick);
break;
}
#ifdef BLACKBERRY
case SDL_SYSWMEVENT:
{
Event syswm(etSysWM);
syswm.value = (int)inEvent.syswm.msg->event;
sgSDLFrame->ProcessEvent(syswm);
}
#endif
}
}
#ifdef EMSCRIPTEN
void loop () {
SDL_Event event;
while (SDL_PollEvent(&event)) {
ProcessEvent (event);
// if (sgDead) break;
event.type = -1;
}
Event poll(etPoll);
sgSDLFrame->ProcessEvent(poll);
}
#endif
void CreateMainFrame(FrameCreationCallback inOnFrame,int inWidth,int inHeight,
unsigned int inFlags, const char *inTitle, Surface *inIcon )
{
#ifdef HX_MACOS
MacBoot();
#endif
#ifdef WEBOS
openlog (gPackage.c_str(), 0, LOG_USER);
#endif
#ifdef HX_WINDOWS
//ShowWindow (GetConsoleWindow (), SW_MINIMIZE);
#endif
unsigned int sdl_flags = 0;
bool fullscreen = (inFlags & wfFullScreen) != 0;
bool opengl = (inFlags & wfHardware) != 0;
bool resizable = (inFlags & wfResizable) != 0;
bool borderless = (inFlags & wfBorderless) != 0;
sgShaderFlags = (inFlags & (wfAllowShaders|wfRequireShaders) );
Rect r(100,100,inWidth,inHeight);
int err = initSDL ();// SDL_Init( init_flags );
if ( err == -1 )
{
fprintf(stderr,"Could not initialize SDL : %s\n", SDL_GetError());
inOnFrame(0);
// SDL_GetError()
return;
}
#ifdef BLACKBERRY
virtualkeyboard_request_events(0);
#endif
SDL_EnableUNICODE(1);
SDL_EnableKeyRepeat(500,30);
gSDLIsInit = true;
#ifdef NME_MIXER
#ifdef WEBOS
int chunksize = 256;
if (PDL_GetPDKVersion () == 100 || PDL_GetHardwareID () < 300)
{
// use a larger chunksize for older devices
chunksize = 1024;
}
#elif BLACKBERRY
int chunksize = 512;
#elif HX_WINDOWS
int chunksize = 2048;
#else
int chunksize = 4096;
#endif
int frequency = 44100;
//int frequency = MIX_DEFAULT_FREQUENCY //22050
// The default frequency would have less latency, but is incompatible with the average MP3 file
if ( Mix_OpenAudio(frequency, MIX_DEFAULT_FORMAT, MIX_DEFAULT_CHANNELS, chunksize)!= 0 )
{
fprintf(stderr,"Could not open sound: %s\n", Mix_GetError());
gSDLIsInit = false;
}
#endif
const SDL_VideoInfo *info = SDL_GetVideoInfo();
sgDesktopWidth = info->current_w;
sgDesktopHeight = info->current_h;
#ifdef RASPBERRYPI
sdl_flags = SDL_SWSURFACE;
if (opengl)
fullscreen = true;
#else
sdl_flags = SDL_HWSURFACE;
#endif
if ( resizable )
sdl_flags |= SDL_RESIZABLE;
if ( borderless )
sdl_flags |= SDL_NOFRAME;
if ( fullscreen )
{
sdl_flags |= SDL_FULLSCREEN;
}
int use_w = fullscreen ? 0 : inWidth;
int use_h = fullscreen ? 0 : inHeight;
#if defined(IPHONE) || defined(BLACKBERRY) || defined(EMSCRIPTEN)
sdl_flags |= SDL_NOFRAME;
#else
if (inIcon)
{
SDL_Surface *sdl = SurfaceToSDL(inIcon);
SDL_WM_SetIcon( sdl, NULL );
}
#endif
#if defined (HX_WINDOWS) || defined (HX_LINUX)
SDL_WM_SetCaption( inTitle, 0 );
#endif
SDL_Surface* screen = 0;
bool is_opengl = false;
sgIsOGL2 = false;
#ifdef RASPBERRYPI
bool nmeEgl = true;
#else
bool nmeEgl = false;
#endif
if (opengl && !nmeEgl)
{
int aa_tries = (inFlags & wfHW_AA) ? ( (inFlags & wfHW_AA_HIRES) ? 2 : 1 ) : 0;
//int bpp = info->vfmt->BitsPerPixel;
int startingPass = 0;
// Try for 24:8 depth:stencil
if (inFlags & wfStencilBuffer)
startingPass = 1;
#if defined (WEBOS) || defined (BLACKBERRY) || defined(EMSCRIPTEN)
// Start at 16 bits...
startingPass = 2;
#endif
// No need to loop over depth
if (!(inFlags & wfDepthBuffer))
startingPass = 2;
int oglLevelPasses = 1;
#if !defined(NME_FORCE_GLES1) && (defined(WEBOS) || defined(BLACKBERRY) || defined(EMSCRIPTEN))
// Try 2 then 1 ?
if ( (inFlags & wfAllowShaders) && !(inFlags & wfRequireShaders) )
oglLevelPasses = 2;
#endif
// Find config...
for(int oglPass = 0; oglPass< oglLevelPasses && !is_opengl; oglPass++)
{
#ifdef NME_FORCE_GLES1
int level = 1;
#else
int level = (inFlags & wfRequireShaders) ? 2 : (inFlags & wfAllowShaders) ? 2-oglPass : 1;
#endif
for(int depthPass=startingPass;depthPass<3 && !is_opengl;depthPass++)
{
/* Initialize the display */
for(int aa_pass = aa_tries; aa_pass>=0 && !is_opengl; --aa_pass)
{
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8 );
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE,8 );
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8 );
#if defined(WEBOS) || defined(BLACKBERRY) || defined(EMSCRIPTEN)
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, level);
#endif
// try 32 24 or 16 bit depth...
if (inFlags & wfDepthBuffer)
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32 - depthPass*8 );
if (inFlags & wfStencilBuffer)
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8 );
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (aa_tries > 0)
{
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, aa_pass>0);
SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 1<<aa_pass );
}
#ifndef EMSCRIPTEN
if ( inFlags & wfVSync )
{
SDL_GL_SetAttribute(SDL_GL_SWAP_CONTROL, 1);
}
#endif
sdl_flags |= SDL_OPENGL;
if (!(screen = SDL_SetVideoMode( use_w, use_h, 32, sdl_flags)))
{
if (depthPass==2 && aa_pass==0 && oglPass==oglLevelPasses-1)
{
sdl_flags &= ~SDL_OPENGL;
fprintf(stderr, "Couldn't set OpenGL mode32: %s\n", SDL_GetError());
}
}
else
{
is_opengl = true;
#if defined(WEBOS) || defined(BLACKBERRY) || defined(EMSCRIPTEN)
sgIsOGL2 = level==2;
#else
// TODO: check extensions support
sgIsOGL2 = (inFlags & (wfAllowShaders | wfRequireShaders) );
#endif
break;
}
}
}
}
}
if (!screen)
{
if (!opengl || !nmeEgl)
sdl_flags |= SDL_DOUBLEBUF;
screen = SDL_SetVideoMode( use_w, use_h, 32, sdl_flags );
if (!screen)
{
fprintf(stderr, "Couldn't set video mode: %s\n", SDL_GetError());
inOnFrame(0);
gSDLIsInit = false;
return;
}
}
#ifdef RASPBERRYPI
if (opengl)
{
sgIsOGL2 = (inFlags & (wfAllowShaders | wfRequireShaders) );
use_w = screen->w;
use_h = screen->h;
bool ok = nmeEGLCreate( 0, use_w, use_h,
sgIsOGL2,
(inFlags & wfDepthBuffer) ? 16 : 0,
(inFlags & wfStencilBuffer) ? 8 : 0,
0 );
if (ok)
is_opengl = true;
}
#endif
HintColourOrder( is_opengl || screen->format->Rmask==0xff );
#ifdef WEBOS
PDL_ScreenTimeoutEnable(PDL_TRUE);
#endif
#ifndef EMSCRIPTEN
int numJoysticks = SDL_NumJoysticks();
if (sgJoystickEnabled && numJoysticks > 0) {
for (int i = 0; i < numJoysticks; i++) {
sgJoystick = SDL_JoystickOpen(i);
}
#ifndef WEBOS
SDL_JoystickEventState(SDL_TRUE);
#endif
}
#endif
sgSDLFrame = new SDLFrame( screen, sdl_flags, is_opengl, inWidth, inHeight );
inOnFrame(sgSDLFrame);
#ifdef EMSCRIPTEN
emscripten_set_main_loop (loop, 0, true);
#else
StartAnimation();
#endif
}
bool sgDead = false;
void SetIcon( const char *path ) {
#ifndef EMSCRIPTEN
initSDL();
SDL_Surface *surf = SDL_LoadBMP(path);
if ( surf != NULL )
SDL_WM_SetIcon( surf, NULL);
#endif
}
QuickVec<int>* CapabilitiesGetScreenResolutions() {
initSDL ();
QuickVec<int> *out = new QuickVec<int>();
// Get available fullscreen/hardware modes
SDL_Rect** modes = SDL_ListModes(NULL, SDL_FULLSCREEN|SDL_HWSURFACE);
// Check if there are any modes available
if (modes == (SDL_Rect**)0) {
return out;
}
// Check if our resolution is unrestricted
if (modes == (SDL_Rect**)-1) {
return out;
}
else{
// Print valid modes
for ( int i=0; modes[i]; ++i) {
out->push_back( modes[ i ]->w );
out->push_back( modes[ i ]->h );
}
}
return out;
}
#ifndef BLACKBERRY
double CapabilitiesGetScreenResolutionX() {
initSDL ();
return sgDesktopWidth;
/*SDL_Rect** modes = SDL_ListModes(NULL, SDL_FULLSCREEN);
if (modes == (SDL_Rect**)0 || modes == (SDL_Rect**)-1) {
const SDL_VideoInfo* videoInfo = SDL_GetVideoInfo();
return videoInfo->current_w;
}
return modes[0]->w;*/
}
double CapabilitiesGetScreenResolutionY() {
initSDL ();
return sgDesktopHeight;
/*SDL_Rect** modes = SDL_ListModes(NULL, SDL_FULLSCREEN);
if (modes == (SDL_Rect**)0 || modes == (SDL_Rect**)-1) {
const SDL_VideoInfo* videoInfo = SDL_GetVideoInfo();
return videoInfo->current_h;
}
return modes[0]->h;*/
}
#endif
void PauseAnimation() {}
void ResumeAnimation() {}
void StopAnimation()
{
#ifdef NME_MIXER
Mix_CloseAudio();
#endif
#ifdef WEBOS
closelog();
PDL_Quit();
#endif
sgDead = true;
}
static SDL_TimerID sgTimerID = 0;
bool sgTimerActive = false;
Uint32 OnTimer(Uint32 interval, void *)
{
// Ping off an event - any event will force the frame check.
SDL_Event event;
SDL_UserEvent userevent;
/* In this example, our callback pushes an SDL_USEREVENT event
into the queue, and causes ourself to be called again at the
same interval: */
userevent.type = SDL_USEREVENT;
userevent.code = 0;
userevent.data1 = NULL;
userevent.data2 = NULL;
event.type = SDL_USEREVENT;
event.user = userevent;
sgTimerActive = false;
sgTimerID = 0;
SDL_PushEvent(&event);
return 0;
}
#ifdef WEBOS
bool GetAcceleration(double &outX, double &outY, double &outZ)
{
outX = SDL_JoystickGetAxis(sgJoystick, 0) / 32768.0;
outY = SDL_JoystickGetAxis(sgJoystick, 1) / 32768.0;
outZ = SDL_JoystickGetAxis(sgJoystick, 2) / 32768.0;
return true;
}
#endif
#ifndef SDL_NOEVENT
#define SDL_NOEVENT -1;
#endif
void StartAnimation()
{
SDL_Event event;
#ifndef EMSCRIPTEN
bool firstTime = true;
while(!sgDead)
{
event.type=SDL_NOEVENT;
while (!sgDead && (firstTime || SDL_WaitEvent(&event)))
{
firstTime = false;
if (sgTimerActive && sgTimerID)
{
SDL_RemoveTimer(sgTimerID);
sgTimerActive = false;
sgTimerID = 0;
}
ProcessEvent(event);
if (sgDead) break;
event.type = SDL_NOEVENT;
#endif
while (SDL_PollEvent(&event)) {
ProcessEvent (event);
if (sgDead) break;
event.type = -1;
}
Event poll(etPoll);
sgSDLFrame->ProcessEvent(poll);
#ifndef EMSCRIPTEN
if (sgDead) break;
double next = sgSDLFrame->GetStage()->GetNextWake() - GetTimeStamp();
if (next > 0.001) {
int snooze = next*1000.0;
sgTimerActive = true;
sgTimerID = SDL_AddTimer(snooze, OnTimer, 0);
} else {
OnTimer(0, 0);
}
}
}
Event deactivate( etDeactivate );
sgSDLFrame->ProcessEvent(deactivate);
Event kill(etDestroyHandler);
sgSDLFrame->ProcessEvent(kill);
SDL_Quit();
#endif
#if HX_WINDOWS
done_win32();
#endif
}
/*
Frame *CreateTopLevelWindow(int inWidth,int inHeight,unsigned int inFlags, wchar_t *inTitle, wchar_t *inIcon )
{
return 0;
}
*/
} // end namespace nme
#if 0
if (event.type == SDL_JOYAXISMOTION)
{
alloc_field( evt, val_id( "type" ), alloc_int( et_jaxis ) );
alloc_field( evt, val_id( "axis" ), alloc_int( event.jaxis.axis ) );
alloc_field( evt, val_id( "value" ), alloc_int( event.jaxis.value ) );
alloc_field( evt, val_id( "which" ), alloc_int( event.jaxis.which ) );
return evt;
}
if (event.type == SDL_JOYBUTTONDOWN || event.type == SDL_JOYBUTTONUP)
{
alloc_field( evt, val_id( "type" ), alloc_int( et_jbutton ) );
alloc_field( evt, val_id( "button" ), alloc_int( event.jbutton.button ) );
alloc_field( evt, val_id( "state" ), alloc_int( event.jbutton.state ) );
alloc_field( evt, val_id( "which" ), alloc_int( event.jbutton.which ) );
return evt;
}
if (event.type == SDL_JOYHATMOTION)
{
alloc_field( evt, val_id( "type" ), alloc_int( et_jhat ) );
alloc_field( evt, val_id( "button" ), alloc_int( event.jhat.hat ) );
alloc_field( evt, val_id( "value" ), alloc_int( event.jhat.value ) );
alloc_field( evt, val_id( "which" ), alloc_int( event.jhat.which ) );
return evt;
}
if (event.type == SDL_JOYBALLMOTION)
{
alloc_field( evt, val_id( "type" ), alloc_int( et_jball ) );
alloc_field( evt, val_id( "ball" ), alloc_int( event.jball.ball ) );
alloc_field( evt, val_id( "xrel" ), alloc_int( event.jball.xrel ) );
alloc_field( evt, val_id( "yrel" ), alloc_int( event.jball.yrel ) );
alloc_field( evt, val_id( "which" ), alloc_int( event.jball.which ) );
return evt;
}
if (event.type==SDL_VIDEORESIZE)
{
alloc_field( evt, val_id( "type" ), alloc_int( et_resize ) );
alloc_field( evt, val_id( "width" ), alloc_int( event.resize.w ) );
alloc_field( evt, val_id( "height" ), alloc_int( event.resize.h ) );
return evt;
}
#endif
#if 0
/*
*/
value nme_get_mouse_position()
{
int x,y;
#ifdef SDL13
SDL_GetMouseState(0,&x,&y);
#else
SDL_GetMouseState(&x,&y);
#endif
value pos = alloc_empty_object();
alloc_field( pos, val_id( "x" ), alloc_int( x ) );
alloc_field( pos, val_id( "y" ), alloc_int( y ) );
return pos;
}
#endif
|
/******************************************************************************\
Copyright (c) 2005-2018, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This sample was distributed or derived from the Intel's Media Samples package.
The original version of this sample may be obtained from https://software.intel.com/en-us/intel-media-server-studio
or https://software.intel.com/en-us/media-client-solutions-support.
\**********************************************************************************/
#include "sysmem_allocator.h"
#define MSDK_ALIGN32(X) (((mfxU32)((X)+31)) & (~ (mfxU32)31))
#define ID_BUFFER MFX_MAKEFOURCC('B','U','F','F')
#define ID_FRAME MFX_MAKEFOURCC('F','R','M','E')
#pragma warning(disable : 4100)
SysMemFrameAllocator::SysMemFrameAllocator()
: m_pBufferAllocator(0), m_bOwnBufferAllocator(false)
{
}
SysMemFrameAllocator::~SysMemFrameAllocator()
{
Close();
}
mfxStatus SysMemFrameAllocator::Init(mfxAllocatorParams *pParams)
{
// check if any params passed from application
if (pParams)
{
SysMemAllocatorParams *pSysMemParams = 0;
pSysMemParams = dynamic_cast<SysMemAllocatorParams *>(pParams);
if (!pSysMemParams)
return MFX_ERR_NOT_INITIALIZED;
m_pBufferAllocator = pSysMemParams->pBufferAllocator;
m_bOwnBufferAllocator = false;
}
// if buffer allocator wasn't passed from application create own
if (!m_pBufferAllocator)
{
m_pBufferAllocator = new SysMemBufferAllocator;
if (!m_pBufferAllocator)
return MFX_ERR_MEMORY_ALLOC;
m_bOwnBufferAllocator = true;
}
return MFX_ERR_NONE;
}
mfxStatus SysMemFrameAllocator::Close()
{
mfxStatus sts = BaseFrameAllocator::Close();
if (m_bOwnBufferAllocator)
{
delete m_pBufferAllocator;
m_pBufferAllocator = 0;
}
return sts;
}
mfxStatus SysMemFrameAllocator::LockFrame(mfxMemId mid, mfxFrameData *ptr)
{
if (!m_pBufferAllocator)
return MFX_ERR_NOT_INITIALIZED;
if (!ptr)
return MFX_ERR_NULL_PTR;
// If allocator uses pointers instead of mids, no further action is required
if (!mid && ptr->Y)
return MFX_ERR_NONE;
sFrame *fs = 0;
mfxStatus sts = m_pBufferAllocator->Lock(m_pBufferAllocator->pthis, mid,(mfxU8 **)&fs);
if (MFX_ERR_NONE != sts)
return sts;
if (ID_FRAME != fs->id)
{
m_pBufferAllocator->Unlock(m_pBufferAllocator->pthis, mid);
return MFX_ERR_INVALID_HANDLE;
}
mfxU16 Width2 = (mfxU16)MSDK_ALIGN32(fs->info.Width);
mfxU16 Height2 = (mfxU16)MSDK_ALIGN32(fs->info.Height);
ptr->B = ptr->Y = (mfxU8 *)fs + MSDK_ALIGN32(sizeof(sFrame));
switch (fs->info.FourCC)
{
case MFX_FOURCC_NV12:
ptr->U = ptr->Y + Width2 * Height2;
ptr->V = ptr->U + 1;
ptr->Pitch = Width2;
break;
case MFX_FOURCC_NV16:
ptr->U = ptr->Y + Width2 * Height2;
ptr->V = ptr->U + 1;
ptr->Pitch = Width2;
break;
case MFX_FOURCC_YV12:
ptr->V = ptr->Y + Width2 * Height2;
ptr->U = ptr->V + (Width2 >> 1) * (Height2 >> 1);
ptr->Pitch = Width2;
break;
case MFX_FOURCC_UYVY:
ptr->U = ptr->Y;
ptr->Y = ptr->U + 1;
ptr->V = ptr->U + 2;
ptr->Pitch = 2 * Width2;
break;
case MFX_FOURCC_YUY2:
ptr->U = ptr->Y + 1;
ptr->V = ptr->Y + 3;
ptr->Pitch = 2 * Width2;
break;
#if (MFX_VERSION >= MFX_VERSION_NEXT)
case MFX_FOURCC_RGB565:
ptr->G = ptr->B;
ptr->R = ptr->B;
ptr->Pitch = 2 * Width2;
break;
#endif
case MFX_FOURCC_RGB3:
ptr->G = ptr->B + 1;
ptr->R = ptr->B + 2;
ptr->Pitch = 3 * Width2;
break;
case MFX_FOURCC_RGB4:
case MFX_FOURCC_A2RGB10:
ptr->G = ptr->B + 1;
ptr->R = ptr->B + 2;
ptr->A = ptr->B + 3;
ptr->Pitch = 4 * Width2;
break;
case MFX_FOURCC_R16:
ptr->Y16 = (mfxU16 *)ptr->B;
ptr->Pitch = 2 * Width2;
break;
case MFX_FOURCC_P010:
ptr->U = ptr->Y + Width2 * Height2 * 2;
ptr->V = ptr->U + 2;
ptr->Pitch = Width2 * 2;
break;
case MFX_FOURCC_P210:
ptr->U = ptr->Y + Width2 * Height2 * 2;
ptr->V = ptr->U + 2;
ptr->Pitch = Width2 * 2;
break;
case MFX_FOURCC_AYUV:
ptr->V = ptr->B;
ptr->U = ptr->V + 1;
ptr->Y = ptr->V + 2;
ptr->A = ptr->V + 3;
ptr->Pitch = 4 * Width2;
break;
default:
return MFX_ERR_UNSUPPORTED;
}
return MFX_ERR_NONE;
}
mfxStatus SysMemFrameAllocator::UnlockFrame(mfxMemId mid, mfxFrameData *ptr)
{
if (!m_pBufferAllocator)
return MFX_ERR_NOT_INITIALIZED;
// If allocator uses pointers instead of mids, no further action is required
if (!mid && ptr->Y)
return MFX_ERR_NONE;
mfxStatus sts = m_pBufferAllocator->Unlock(m_pBufferAllocator->pthis, mid);
if (MFX_ERR_NONE != sts)
return sts;
if (NULL != ptr)
{
ptr->Pitch = 0;
ptr->Y = 0;
ptr->U = 0;
ptr->V = 0;
ptr->A = 0;
}
return MFX_ERR_NONE;
}
mfxStatus SysMemFrameAllocator::GetFrameHDL(mfxMemId mid, mfxHDL *handle)
{
return MFX_ERR_UNSUPPORTED;
}
mfxStatus SysMemFrameAllocator::CheckRequestType(mfxFrameAllocRequest *request)
{
mfxStatus sts = BaseFrameAllocator::CheckRequestType(request);
if (MFX_ERR_NONE != sts)
return sts;
if ((request->Type & MFX_MEMTYPE_SYSTEM_MEMORY) != 0)
return MFX_ERR_NONE;
else
return MFX_ERR_UNSUPPORTED;
}
mfxStatus SysMemFrameAllocator::AllocImpl(mfxFrameAllocRequest *request, mfxFrameAllocResponse *response)
{
if (!m_pBufferAllocator)
return MFX_ERR_NOT_INITIALIZED;
mfxU32 numAllocated = 0;
mfxU32 Width2 = MSDK_ALIGN32(request->Info.Width);
mfxU32 Height2 = MSDK_ALIGN32(request->Info.Height);
mfxU32 nbytes;
switch (request->Info.FourCC)
{
case MFX_FOURCC_YV12:
case MFX_FOURCC_NV12:
nbytes = Width2*Height2 + (Width2>>1)*(Height2>>1) + (Width2>>1)*(Height2>>1);
break;
case MFX_FOURCC_NV16:
nbytes = Width2*Height2 + (Width2>>1)*(Height2) + (Width2>>1)*(Height2);
break;
#if (MFX_VERSION >= MFX_VERSION_NEXT)
case MFX_FOURCC_RGB565:
nbytes = 2*Width2*Height2;
break;
#endif
case MFX_FOURCC_RGB3:
nbytes = Width2*Height2 + Width2*Height2 + Width2*Height2;
break;
case MFX_FOURCC_RGB4:
case MFX_FOURCC_AYUV:
nbytes = Width2*Height2 + Width2*Height2 + Width2*Height2 + Width2*Height2;
break;
case MFX_FOURCC_UYVY:
case MFX_FOURCC_YUY2:
nbytes = Width2*Height2 + (Width2>>1)*(Height2) + (Width2>>1)*(Height2);
break;
case MFX_FOURCC_R16:
nbytes = 2*Width2*Height2;
break;
case MFX_FOURCC_P010:
nbytes = Width2*Height2 + (Width2>>1)*(Height2>>1) + (Width2>>1)*(Height2>>1);
nbytes *= 2;
break;
case MFX_FOURCC_A2RGB10:
nbytes = Width2*Height2*4; // 4 bytes per pixel
break;
case MFX_FOURCC_P210:
nbytes = Width2*Height2 + (Width2>>1)*(Height2) + (Width2>>1)*(Height2);
nbytes *= 2; // 16bits
break;
default:
return MFX_ERR_UNSUPPORTED;
}
safe_array<mfxMemId> mids(new mfxMemId[request->NumFrameSuggested]);
if (!mids.get())
return MFX_ERR_MEMORY_ALLOC;
// allocate frames
for (numAllocated = 0; numAllocated < request->NumFrameSuggested; numAllocated ++)
{
mfxStatus sts = m_pBufferAllocator->Alloc(m_pBufferAllocator->pthis,
nbytes + MSDK_ALIGN32(sizeof(sFrame)), request->Type, &(mids.get()[numAllocated]));
if (MFX_ERR_NONE != sts)
break;
sFrame *fs;
sts = m_pBufferAllocator->Lock(m_pBufferAllocator->pthis, mids.get()[numAllocated], (mfxU8 **)&fs);
if (MFX_ERR_NONE != sts)
break;
fs->id = ID_FRAME;
fs->info = request->Info;
m_pBufferAllocator->Unlock(m_pBufferAllocator->pthis, mids.get()[numAllocated]);
}
// check the number of allocated frames
if (numAllocated < request->NumFrameSuggested)
{
return MFX_ERR_MEMORY_ALLOC;
}
response->NumFrameActual = (mfxU16) numAllocated;
response->mids = mids.release();
return MFX_ERR_NONE;
}
mfxStatus SysMemFrameAllocator::ReleaseResponse(mfxFrameAllocResponse *response)
{
if (!response)
return MFX_ERR_NULL_PTR;
if (!m_pBufferAllocator)
return MFX_ERR_NOT_INITIALIZED;
mfxStatus sts = MFX_ERR_NONE;
if (response->mids)
{
for (mfxU32 i = 0; i < response->NumFrameActual; i++)
{
if (response->mids[i])
{
sts = m_pBufferAllocator->Free(m_pBufferAllocator->pthis, response->mids[i]);
if (MFX_ERR_NONE != sts)
return sts;
}
}
}
delete [] response->mids;
response->mids = 0;
return sts;
}
SysMemBufferAllocator::SysMemBufferAllocator()
{
}
SysMemBufferAllocator::~SysMemBufferAllocator()
{
}
mfxStatus SysMemBufferAllocator::AllocBuffer(mfxU32 nbytes, mfxU16 type, mfxMemId *mid)
{
if (!mid)
return MFX_ERR_NULL_PTR;
if (0 == (type & MFX_MEMTYPE_SYSTEM_MEMORY))
return MFX_ERR_UNSUPPORTED;
mfxU32 header_size = MSDK_ALIGN32(sizeof(sBuffer));
mfxU8 *buffer_ptr = (mfxU8 *)calloc(header_size + nbytes + 32, 1);
if (!buffer_ptr)
return MFX_ERR_MEMORY_ALLOC;
sBuffer *bs = (sBuffer *)buffer_ptr;
bs->id = ID_BUFFER;
bs->type = type;
bs->nbytes = nbytes;
*mid = (mfxHDL) bs;
return MFX_ERR_NONE;
}
mfxStatus SysMemBufferAllocator::LockBuffer(mfxMemId mid, mfxU8 **ptr)
{
if (!ptr)
return MFX_ERR_NULL_PTR;
sBuffer *bs = (sBuffer *)mid;
if (!bs)
return MFX_ERR_INVALID_HANDLE;
if (ID_BUFFER != bs->id)
return MFX_ERR_INVALID_HANDLE;
*ptr = (mfxU8*)((size_t)((mfxU8 *)bs+MSDK_ALIGN32(sizeof(sBuffer))+31)&(~((size_t)31)));
return MFX_ERR_NONE;
}
mfxStatus SysMemBufferAllocator::UnlockBuffer(mfxMemId mid)
{
sBuffer *bs = (sBuffer *)mid;
if (!bs || ID_BUFFER != bs->id)
return MFX_ERR_INVALID_HANDLE;
return MFX_ERR_NONE;
}
mfxStatus SysMemBufferAllocator::FreeBuffer(mfxMemId mid)
{
sBuffer *bs = (sBuffer *)mid;
if (!bs || ID_BUFFER != bs->id)
return MFX_ERR_INVALID_HANDLE;
free(bs);
return MFX_ERR_NONE;
}
|
#include "lak/algorithm.hpp"
#include "lak/compiler.hpp"
#include "lak/memmanip.hpp"
#include "lak/span.hpp"
/* --- fixed size --- */
template<typename T, size_t SIZE>
constexpr lak::packed_array<T, SIZE>::packed_array(
packed_array<T, SIZE> &&other)
{
lak::swap(_value, other._value);
lak::swap(_next, other._next);
}
template<typename T, size_t SIZE>
constexpr lak::packed_array<T, SIZE> &lak::packed_array<T, SIZE>::operator=(
packed_array<T, SIZE> &&other)
{
lak::swap(_value, other._value);
lak::swap(_next, other._next);
return *this;
}
template<typename T, size_t SIZE>
lak::packed_array<T, SIZE>::packed_array(std::initializer_list<T> list)
{
ASSERT_EQUAL(list.size(), SIZE);
for (size_t i = 0; i < list.size() && i < SIZE; ++i)
operator[](i) = list.begin()[i];
}
template<typename T>
constexpr lak::packed_array<T, 1>::packed_array(packed_array<T, 1> &&other)
{
lak::swap(_value, other._value);
}
template<typename T>
constexpr lak::packed_array<T, 1> &lak::packed_array<T, 1>::operator=(
packed_array<T, 1> &&other)
{
lak::swap(_value, other._value);
return *this;
}
template<typename T>
lak::packed_array<T, 1>::packed_array(std::initializer_list<T> list)
{
ASSERT_EQUAL(list.size(), 1);
_value = *list.begin();
}
|
// Distributed under the MIT License.
// See LICENSE.txt for details.
#include "Framework/TestingFramework.hpp"
#include <cstddef>
#include <iterator>
#include <numeric>
#include "DataStructures/Tensor/Expressions/Evaluate.hpp"
#include "DataStructures/Tensor/Expressions/Product.hpp"
#include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
#include "DataStructures/Tensor/Tensor.hpp"
#include "Utilities/MakeWithValue.hpp"
namespace {
template <typename... Ts>
void assign_unique_values_to_tensor(
gsl::not_null<Tensor<double, Ts...>*> tensor) noexcept {
std::iota(tensor->begin(), tensor->end(), 0.0);
}
template <typename... Ts>
void assign_unique_values_to_tensor(
gsl::not_null<Tensor<DataVector, Ts...>*> tensor) noexcept {
double value = 0.0;
for (auto index_it = tensor->begin(); index_it != tensor->end(); index_it++) {
for (auto vector_it = index_it->begin(); vector_it != index_it->end();
vector_it++) {
*vector_it = value;
value += 1.0;
}
}
}
// \brief Test the outer product and quotient of a tensor and `double` is
// correctly evaluated
//
// \details
// The outer product cases tested are:
// - \f$L_{ij} = R * S_{ij}\f$
// - \f$L_{ij} = S_{ij} * R\f$
// - \f$L_{ij} = R * S_{ij} * T\f$
//
// The division cases tested are:
// - \f$L_{ij} = S_{ij} / R\f$
// - \f$L_{ij} = R * S_{ij} / T\f$
//
// where \f$R\f$ and \f$T\f$ are `double`s and \f$S\f$ and \f$L\f$ are Tensors
// with data type `double` or DataVector.
//
// \tparam DataType the type of data being stored in the tensor operand of the
// products
template <typename DataType>
void test_outer_product_quotient_double(
const DataType& used_for_size) noexcept {
constexpr size_t dim = 3;
using tensor_type =
Tensor<DataType, Symmetry<1, 1>,
index_list<SpatialIndex<dim, UpLo::Lo, Frame::Inertial>,
SpatialIndex<dim, UpLo::Lo, Frame::Inertial>>>;
tensor_type S(used_for_size);
assign_unique_values_to_tensor(make_not_null(&S));
// \f$L_{ij} = R * S_{ij}\f$
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
const tensor_type Lij_from_R_Sij =
TensorExpressions::evaluate<ti_i, ti_j>(5.6 * S(ti_i, ti_j));
// \f$L_{ij} = S_{ij} * R\f$
const tensor_type Lij_from_Sij_R =
TensorExpressions::evaluate<ti_i, ti_j>(S(ti_i, ti_j) * -8.1);
// \f$L_{ij} = R * S_{ij} * T\f$
const tensor_type Lij_from_R_Sij_T =
TensorExpressions::evaluate<ti_i, ti_j>(-1.7 * S(ti_i, ti_j) * 0.6);
// \f$L_{ij} = S_{ij} / R\f$
const tensor_type Lij_from_Sij_over_R =
TensorExpressions::evaluate<ti_i, ti_j>(S(ti_i, ti_j) / 4.3);
// \f$L_{ij} = R * S_{ij} / T\f$
const tensor_type Lij_from_R_Sij_over_T =
TensorExpressions::evaluate<ti_i, ti_j>(-5.2 * S(ti_i, ti_j) / 1.6);
for (size_t i = 0; i < dim; i++) {
for (size_t j = 0; j < dim; j++) {
CHECK(Lij_from_R_Sij.get(i, j) == 5.6 * S.get(i, j));
CHECK(Lij_from_Sij_R.get(i, j) == S.get(i, j) * -8.1);
CHECK(Lij_from_R_Sij_T.get(i, j) == -1.7 * S.get(i, j) * 0.6);
CHECK(Lij_from_Sij_over_R.get(i, j) == S.get(i, j) / 4.3);
CHECK(Lij_from_R_Sij_over_T.get(i, j) == -5.2 * S.get(i, j) / 1.6);
}
}
}
// \brief Test the outer product of a rank 0 tensor with another tensor is
// correctly evaluated
//
// \details
// The outer product cases tested are:
// - \f$L = R * R\f$
// - \f$L = R * R * R\f$
// - \f$L^{a} = R * S^{a}\f$
// - \f$L_{ai} = R * T_{ai}\f$
//
// For the last two cases, both operand orderings are tested. For the last case,
// both LHS index orderings are tested.
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_outer_product_rank_0_operand(const DataType& used_for_size) noexcept {
Tensor<DataType> R{{{used_for_size}}};
if constexpr (std::is_same_v<DataType, double>) {
// Replace tensor's value from `used_for_size` to a proper test value
R.get() = -3.7;
} else {
assign_unique_values_to_tensor(make_not_null(&R));
}
// \f$L = R * R\f$
CHECK(TensorExpressions::evaluate(R() * R()).get() == R.get() * R.get());
// \f$L = R * R * R\f$
CHECK(TensorExpressions::evaluate(R() * R() * R()).get() ==
R.get() * R.get() * R.get());
Tensor<DataType, Symmetry<1>,
index_list<SpacetimeIndex<3, UpLo::Up, Frame::Inertial>>>
Su(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Su));
// \f$L^{a} = R * S^{a}\f$
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
const decltype(Su) LA_from_R_SA =
TensorExpressions::evaluate<ti_A>(R() * Su(ti_A));
// \f$L^{a} = S^{a} * R\f$
const decltype(Su) LA_from_SA_R =
TensorExpressions::evaluate<ti_A>(Su(ti_A) * R());
for (size_t a = 0; a < 4; a++) {
CHECK(LA_from_R_SA.get(a) == R.get() * Su.get(a));
CHECK(LA_from_SA_R.get(a) == Su.get(a) * R.get());
}
Tensor<DataType, Symmetry<2, 1>,
index_list<SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>,
SpatialIndex<4, UpLo::Lo, Frame::Inertial>>>
Tll(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Tll));
// \f$L_{ai} = R * T_{ai}\f$
const Tensor<DataType, Symmetry<2, 1>,
index_list<SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>,
SpatialIndex<4, UpLo::Lo, Frame::Inertial>>>
Lai_from_R_Tai =
TensorExpressions::evaluate<ti_a, ti_i>(R() * Tll(ti_a, ti_i));
// \f$L_{ia} = R * T_{ai}\f$
const Tensor<DataType, Symmetry<2, 1>,
index_list<SpatialIndex<4, UpLo::Lo, Frame::Inertial>,
SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>>>
Lia_from_R_Tai =
TensorExpressions::evaluate<ti_i, ti_a>(R() * Tll(ti_a, ti_i));
// \f$L_{ai} = T_{ai} * R\f$
const Tensor<DataType, Symmetry<2, 1>,
index_list<SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>,
SpatialIndex<4, UpLo::Lo, Frame::Inertial>>>
Lai_from_Tai_R =
TensorExpressions::evaluate<ti_a, ti_i>(Tll(ti_a, ti_i) * R());
// \f$L_{ia} = T_{ai} * R\f$
const Tensor<DataType, Symmetry<2, 1>,
index_list<SpatialIndex<4, UpLo::Lo, Frame::Inertial>,
SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>>>
Lia_from_Tai_R =
TensorExpressions::evaluate<ti_i, ti_a>(Tll(ti_a, ti_i) * R());
for (size_t a = 0; a < 4; a++) {
for (size_t i = 0; i < 4; i++) {
const DataType expected_R_Tai_product = R.get() * Tll.get(a, i);
CHECK(Lai_from_R_Tai.get(a, i) == expected_R_Tai_product);
CHECK(Lia_from_R_Tai.get(i, a) == expected_R_Tai_product);
const DataType expected_Tai_R_product = Tll.get(a, i) * R.get();
CHECK(Lai_from_Tai_R.get(a, i) == expected_Tai_R_product);
CHECK(Lia_from_Tai_R.get(i, a) == expected_Tai_R_product);
}
}
}
// \brief Test the outer product of rank 1 tensors with another tensor is
// correctly evaluated
//
// \details
// The outer product cases tested are:
// - \f$L^{a}{}_{i} = R_{i} * S^{a}\f$
// - \f$L^{ja}{}_{i} = R_{i} * S^{a} * T^{j}\f$
// - \f$L_{k}{}^{c}{}_{d} = S^{c} * G_{dk}\f$
// - \f$L^{c}{}_{dk} = G_{dk} * S^{c}\f$
//
// For each case, the LHS index order is different from the order in the
// operands.
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_outer_product_rank_1_operand(const DataType& used_for_size) noexcept {
Tensor<DataType, Symmetry<1>,
index_list<SpatialIndex<3, UpLo::Lo, Frame::Grid>>>
Rl(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rl));
Tensor<DataType, Symmetry<1>,
index_list<SpacetimeIndex<3, UpLo::Up, Frame::Grid>>>
Su(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Su));
// \f$L^{a}{}_{i} = R_{i} * S^{a}\f$
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
const Tensor<DataType, Symmetry<2, 1>,
index_list<SpacetimeIndex<3, UpLo::Up, Frame::Grid>,
SpatialIndex<3, UpLo::Lo, Frame::Grid>>>
LAi_from_Ri_SA =
TensorExpressions::evaluate<ti_A, ti_i>(Rl(ti_i) * Su(ti_A));
for (size_t i = 0; i < 3; i++) {
for (size_t a = 0; a < 4; a++) {
CHECK(LAi_from_Ri_SA.get(a, i) == Rl.get(i) * Su.get(a));
}
}
Tensor<DataType, Symmetry<1>,
index_list<SpatialIndex<3, UpLo::Up, Frame::Grid>>>
Tu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Tu));
// \f$L^{ja}{}_{i} = R_{i} * S^{a} * T^{j}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<SpatialIndex<3, UpLo::Up, Frame::Grid>,
SpacetimeIndex<3, UpLo::Up, Frame::Grid>,
SpatialIndex<3, UpLo::Lo, Frame::Grid>>>
LJAi_from_Ri_SA_TJ = TensorExpressions::evaluate<ti_J, ti_A, ti_i>(
Rl(ti_i) * Su(ti_A) * Tu(ti_J));
for (size_t j = 0; j < 3; j++) {
for (size_t a = 0; a < 4; a++) {
for (size_t i = 0; i < 3; i++) {
CHECK(LJAi_from_Ri_SA_TJ.get(j, a, i) ==
Rl.get(i) * Su.get(a) * Tu.get(j));
}
}
}
Tensor<DataType, Symmetry<2, 1>,
index_list<SpacetimeIndex<3, UpLo::Lo, Frame::Grid>,
SpatialIndex<4, UpLo::Lo, Frame::Grid>>>
Gll(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Gll));
// \f$L_{k}{}^{c}{}_{d} = S^{c} * G_{dk}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<SpatialIndex<4, UpLo::Lo, Frame::Grid>,
SpacetimeIndex<3, UpLo::Up, Frame::Grid>,
SpacetimeIndex<3, UpLo::Lo, Frame::Grid>>>
LkCd_from_SC_Gdk = TensorExpressions::evaluate<ti_k, ti_C, ti_d>(
Su(ti_C) * Gll(ti_d, ti_k));
// \f$L^{c}{}_{dk} = G_{dk} * S^{c}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<SpacetimeIndex<3, UpLo::Up, Frame::Grid>,
SpacetimeIndex<3, UpLo::Lo, Frame::Grid>,
SpatialIndex<4, UpLo::Lo, Frame::Grid>>>
LCdk_from_Gdk_SC = TensorExpressions::evaluate<ti_C, ti_d, ti_k>(
Gll(ti_d, ti_k) * Su(ti_C));
for (size_t k = 0; k < 4; k++) {
for (size_t c = 0; c < 4; c++) {
for (size_t d = 0; d < 4; d++) {
CHECK(LkCd_from_SC_Gdk.get(k, c, d) == Su.get(c) * Gll.get(d, k));
CHECK(LCdk_from_Gdk_SC.get(c, d, k) == Gll.get(d, k) * Su.get(c));
}
}
}
}
// \brief Test the outer product of two rank 2 tensors is correctly evaluated
//
// \details
// All LHS index orders are tested. One such example case:
// \f$L_{abc}{}^{i} = R_{ab} * S^{i}{}_{c}\f$
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_outer_product_rank_2x2_operands(
const DataType& used_for_size) noexcept {
using R_index = SpacetimeIndex<3, UpLo::Lo, Frame::Grid>;
using S_first_index = SpatialIndex<4, UpLo::Up, Frame::Grid>;
using S_second_index = SpacetimeIndex<2, UpLo::Lo, Frame::Grid>;
Tensor<DataType, Symmetry<1, 1>, index_list<R_index, R_index>> Rll(
used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rll));
Tensor<DataType, Symmetry<2, 1>, index_list<S_first_index, S_second_index>>
Sul(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sul));
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
const Tensor<DataType, Symmetry<3, 3, 2, 1>,
index_list<R_index, R_index, S_first_index, S_second_index>>
L_abIc = TensorExpressions::evaluate<ti_a, ti_b, ti_I, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 3, 2, 1>,
index_list<R_index, R_index, S_second_index, S_first_index>>
L_abcI = TensorExpressions::evaluate<ti_a, ti_b, ti_c, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 3, 1>,
index_list<R_index, S_first_index, R_index, S_second_index>>
L_aIbc = TensorExpressions::evaluate<ti_a, ti_I, ti_b, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 3>,
index_list<R_index, S_first_index, S_second_index, R_index>>
L_aIcb = TensorExpressions::evaluate<ti_a, ti_I, ti_c, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 3, 1>,
index_list<R_index, S_second_index, R_index, S_first_index>>
L_acbI = TensorExpressions::evaluate<ti_a, ti_c, ti_b, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 3>,
index_list<R_index, S_second_index, S_first_index, R_index>>
L_acIb = TensorExpressions::evaluate<ti_a, ti_c, ti_I, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 3, 2, 1>,
index_list<R_index, R_index, S_first_index, S_second_index>>
L_baIc = TensorExpressions::evaluate<ti_b, ti_a, ti_I, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 3, 2, 1>,
index_list<R_index, R_index, S_second_index, S_first_index>>
L_bacI = TensorExpressions::evaluate<ti_b, ti_a, ti_c, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 3, 1>,
index_list<R_index, S_first_index, R_index, S_second_index>>
L_bIac = TensorExpressions::evaluate<ti_b, ti_I, ti_a, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 3>,
index_list<R_index, S_first_index, S_second_index, R_index>>
L_bIca = TensorExpressions::evaluate<ti_b, ti_I, ti_c, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 3, 1>,
index_list<R_index, S_second_index, R_index, S_first_index>>
L_bcaI = TensorExpressions::evaluate<ti_b, ti_c, ti_a, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 3>,
index_list<R_index, S_second_index, S_first_index, R_index>>
L_bcIa = TensorExpressions::evaluate<ti_b, ti_c, ti_I, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 2, 1>,
index_list<S_first_index, R_index, R_index, S_second_index>>
L_Iabc = TensorExpressions::evaluate<ti_I, ti_a, ti_b, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 2>,
index_list<S_first_index, R_index, S_second_index, R_index>>
L_Iacb = TensorExpressions::evaluate<ti_I, ti_a, ti_c, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 2, 1>,
index_list<S_first_index, R_index, R_index, S_second_index>>
L_Ibac = TensorExpressions::evaluate<ti_I, ti_b, ti_a, ti_c>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 2>,
index_list<S_first_index, R_index, S_second_index, R_index>>
L_Ibca = TensorExpressions::evaluate<ti_I, ti_b, ti_c, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 1>,
index_list<S_first_index, S_second_index, R_index, R_index>>
L_Icab = TensorExpressions::evaluate<ti_I, ti_c, ti_a, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 1>,
index_list<S_first_index, S_second_index, R_index, R_index>>
L_Icba = TensorExpressions::evaluate<ti_I, ti_c, ti_b, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 2, 1>,
index_list<S_second_index, R_index, R_index, S_first_index>>
L_cabI = TensorExpressions::evaluate<ti_c, ti_a, ti_b, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 2>,
index_list<S_second_index, R_index, S_first_index, R_index>>
L_caIb = TensorExpressions::evaluate<ti_c, ti_a, ti_I, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 2, 1>,
index_list<S_second_index, R_index, R_index, S_first_index>>
L_cbaI = TensorExpressions::evaluate<ti_c, ti_b, ti_a, ti_I>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 2>,
index_list<S_second_index, R_index, S_first_index, R_index>>
L_cbIa = TensorExpressions::evaluate<ti_c, ti_b, ti_I, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 1>,
index_list<S_second_index, S_first_index, R_index, R_index>>
L_cIab = TensorExpressions::evaluate<ti_c, ti_I, ti_a, ti_b>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
const Tensor<DataType, Symmetry<3, 2, 1, 1>,
index_list<S_second_index, S_first_index, R_index, R_index>>
L_cIba = TensorExpressions::evaluate<ti_c, ti_I, ti_b, ti_a>(
Rll(ti_a, ti_b) * Sul(ti_I, ti_c));
for (size_t a = 0; a < R_index::dim; a++) {
for (size_t b = 0; b < R_index::dim; b++) {
for (size_t i = 0; i < S_first_index::dim; i++) {
for (size_t c = 0; c < S_second_index::dim; c++) {
CHECK(L_abIc.get(a, b, i, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_abcI.get(a, b, c, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_aIbc.get(a, i, b, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_aIcb.get(a, i, c, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_acbI.get(a, c, b, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_acIb.get(a, c, i, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_baIc.get(b, a, i, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_bacI.get(b, a, c, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_bIac.get(b, i, a, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_bIca.get(b, i, c, a) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_bcaI.get(b, c, a, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_bcIa.get(b, c, i, a) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Iabc.get(i, a, b, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Iacb.get(i, a, c, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Ibac.get(i, b, a, c) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Ibca.get(i, b, c, a) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Icab.get(i, c, a, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_Icba.get(i, c, b, a) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_cabI.get(c, a, b, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_caIb.get(c, a, i, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_cbaI.get(c, b, a, i) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_cbIa.get(c, b, i, a) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_cIab.get(c, i, a, b) == Rll.get(a, b) * Sul.get(i, c));
CHECK(L_cIba.get(c, i, b, a) == Rll.get(a, b) * Sul.get(i, c));
}
}
}
}
}
// \brief Test the outer product of a rank 0, rank 1, and rank 2 tensor is
// correctly evaluated
//
// \details
// The outer product cases tested are permutations of the form:
// - \f$L^{a}{}_{ib} = R * S^{a} * T_{bi}\f$
//
// Each case represents an ordering for the operands and the LHS indices.
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_outer_product_rank_0x1x2_operands(
const DataType& used_for_size) noexcept {
Tensor<DataType> R{{{used_for_size}}};
if constexpr (std::is_same_v<DataType, double>) {
// Replace tensor's value from `used_for_size` to a proper test value
R.get() = 4.5;
} else {
assign_unique_values_to_tensor(make_not_null(&R));
}
using S_index = SpacetimeIndex<3, UpLo::Up, Frame::Inertial>;
using T_first_index = SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>;
using T_second_index = SpatialIndex<4, UpLo::Lo, Frame::Inertial>;
Tensor<DataType, Symmetry<1>, index_list<S_index>> Su(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Su));
Tensor<DataType, Symmetry<2, 1>, index_list<T_first_index, T_second_index>>
Tll(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Tll));
// \f$R * S^{a} * T_{bi}\f$
const auto R_SA_Tbi_expr = R() * Su(ti_A) * Tll(ti_b, ti_i);
// \f$L^{a}{}_{bi} = R * S^{a} * T_{bi}\f$
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(R_SA_Tbi_expr);
// \f$L^{a}{}_{ib} = R * S^{a} * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(R_SA_Tbi_expr);
// \f$L_{b}{}^{a}{}_{i} = R * S^{a} * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(R_SA_Tbi_expr);
// \f$L_{bi}{}^{a} = R * S^{a} * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(R_SA_Tbi_expr);
// \f$L_{i}{}^{a}{}_{b} = R * S^{a} * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(R_SA_Tbi_expr);
// \f$L_{ib}{}^{a} = R * S^{a} * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_R_SA_Tbi =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(R_SA_Tbi_expr);
// \f$R * T_{bi} * S^{a}\f$
const auto R_Tbi_SA_expr = R() * Tll(ti_b, ti_i) * Su(ti_A);
// \f$L^{a}{}_{bi} = R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(R_Tbi_SA_expr);
// \f$L^{a}{}_{ib} = R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(R_Tbi_SA_expr);
// \f$L_{b}{}^{a}{}_{i} = R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(R_Tbi_SA_expr);
// \f$L_{bi}{}^{a} = R * R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(R_Tbi_SA_expr);
// \f$L_{i}{}^{a}{}_{b} = R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(R_Tbi_SA_expr);
// \f$L_{ib}{}^{a} = R * T_{bi} * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_R_Tbi_SA =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(R_Tbi_SA_expr);
// \f$S^{a} * R * T_{bi}\f$
const auto SA_R_Tbi_expr = Su(ti_A) * R() * Tll(ti_b, ti_i);
// \f$L^{a}{}_{bi} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(SA_R_Tbi_expr);
// \f$L^{a}{}_{ib} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(SA_R_Tbi_expr);
// \f$L_{b}{}^{a}{}_{i} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(SA_R_Tbi_expr);
// \f$L_{bi}{}^{a} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(SA_R_Tbi_expr);
// \f$L_{i}{}^{a}{}_{b} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(SA_R_Tbi_expr);
// \f$L_{ib}{}^{a} = S^{a} * R * T_{bi}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_SA_R_Tbi =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(SA_R_Tbi_expr);
// \f$S^{a} * T_{bi} * R\f$
const auto SA_Tbi_R_expr = Su(ti_A) * Tll(ti_b, ti_i) * R();
// \f$L^{a}{}_{bi} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(SA_Tbi_R_expr);
// \f$L^{a}{}_{ib} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(SA_Tbi_R_expr);
// \f$L_{b}{}^{a}{}_{i} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(SA_Tbi_R_expr);
// \f$L_{bi}{}^{a} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(SA_Tbi_R_expr);
// \f$L_{i}{}^{a}{}_{b} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(SA_Tbi_R_expr);
// \f$L_{ib}{}^{a} = S^{a} * T_{bi} * R\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_SA_Tbi_R =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(SA_Tbi_R_expr);
// \f$T_{bi} * R * S^{a}\f$
const auto Tbi_R_SA_expr = Tll(ti_b, ti_i) * R() * Su(ti_A);
// \f$L^{a}{}_{bi} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(Tbi_R_SA_expr);
// \f$L^{a}{}_{ib} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(Tbi_R_SA_expr);
// \f$L_{b}{}^{a}{}_{i} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(Tbi_R_SA_expr);
// \f$L_{bi}{}^{a} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(Tbi_R_SA_expr);
// \f$L_{i}{}^{a}{}_{b} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(Tbi_R_SA_expr);
// \f$L_{ib}{}^{a} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_Tbi_R_SA =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(Tbi_R_SA_expr);
// \f$T_{bi} * S^{a} * R\f$
const auto Tbi_SA_R_expr = Tll(ti_b, ti_i) * Su(ti_A) * R();
// \f$L^{a}{}_{bi} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_first_index, T_second_index>>
LAbi_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_A, ti_b, ti_i>(Tbi_SA_R_expr);
// \f$L^{a}{}_{ib} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<S_index, T_second_index, T_first_index>>
LAib_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_A, ti_i, ti_b>(Tbi_SA_R_expr);
// \f$L_{b}{}^{a}{}_{i} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, S_index, T_second_index>>
LbAi_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_b, ti_A, ti_i>(Tbi_SA_R_expr);
// \f$L_{bi}{}^{a} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_first_index, T_second_index, S_index>>
LbiA_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_b, ti_i, ti_A>(Tbi_SA_R_expr);
// \f$L_{i}{}^{a}{}_{b} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, S_index, T_first_index>>
LiAb_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_i, ti_A, ti_b>(Tbi_SA_R_expr);
// \f$L_{ib}{}^{a} = T_{bi} * R * S^{a}\f$
const Tensor<DataType, Symmetry<3, 2, 1>,
index_list<T_second_index, T_first_index, S_index>>
LibA_from_Tbi_SA_R =
TensorExpressions::evaluate<ti_i, ti_b, ti_A>(Tbi_SA_R_expr);
for (size_t a = 0; a < S_index::dim; a++) {
for (size_t b = 0; b < T_first_index::dim; b++) {
for (size_t i = 0; i < T_second_index::dim; i++) {
const DataType expected_product = R.get() * Su.get(a) * Tll.get(b, i);
CHECK(LAbi_from_R_SA_Tbi.get(a, b, i) == expected_product);
CHECK(LAib_from_R_SA_Tbi.get(a, i, b) == expected_product);
CHECK(LbAi_from_R_SA_Tbi.get(b, a, i) == expected_product);
CHECK(LbiA_from_R_SA_Tbi.get(b, i, a) == expected_product);
CHECK(LiAb_from_R_SA_Tbi.get(i, a, b) == expected_product);
CHECK(LibA_from_R_SA_Tbi.get(i, b, a) == expected_product);
CHECK(LAbi_from_R_Tbi_SA.get(a, b, i) == expected_product);
CHECK(LAib_from_R_Tbi_SA.get(a, i, b) == expected_product);
CHECK(LbAi_from_R_Tbi_SA.get(b, a, i) == expected_product);
CHECK(LbiA_from_R_Tbi_SA.get(b, i, a) == expected_product);
CHECK(LiAb_from_R_Tbi_SA.get(i, a, b) == expected_product);
CHECK(LibA_from_R_Tbi_SA.get(i, b, a) == expected_product);
CHECK(LAbi_from_SA_R_Tbi.get(a, b, i) == expected_product);
CHECK(LAib_from_SA_R_Tbi.get(a, i, b) == expected_product);
CHECK(LbAi_from_SA_R_Tbi.get(b, a, i) == expected_product);
CHECK(LbiA_from_SA_R_Tbi.get(b, i, a) == expected_product);
CHECK(LiAb_from_SA_R_Tbi.get(i, a, b) == expected_product);
CHECK(LibA_from_SA_R_Tbi.get(i, b, a) == expected_product);
CHECK(LAbi_from_SA_Tbi_R.get(a, b, i) == expected_product);
CHECK(LAib_from_SA_Tbi_R.get(a, i, b) == expected_product);
CHECK(LbAi_from_SA_Tbi_R.get(b, a, i) == expected_product);
CHECK(LbiA_from_SA_Tbi_R.get(b, i, a) == expected_product);
CHECK(LiAb_from_SA_Tbi_R.get(i, a, b) == expected_product);
CHECK(LibA_from_SA_Tbi_R.get(i, b, a) == expected_product);
CHECK(LAbi_from_Tbi_R_SA.get(a, b, i) == expected_product);
CHECK(LAib_from_Tbi_R_SA.get(a, i, b) == expected_product);
CHECK(LbAi_from_Tbi_R_SA.get(b, a, i) == expected_product);
CHECK(LbiA_from_Tbi_R_SA.get(b, i, a) == expected_product);
CHECK(LiAb_from_Tbi_R_SA.get(i, a, b) == expected_product);
CHECK(LibA_from_Tbi_R_SA.get(i, b, a) == expected_product);
CHECK(LAbi_from_Tbi_SA_R.get(a, b, i) == expected_product);
CHECK(LAib_from_Tbi_SA_R.get(a, i, b) == expected_product);
CHECK(LbAi_from_Tbi_SA_R.get(b, a, i) == expected_product);
CHECK(LbiA_from_Tbi_SA_R.get(b, i, a) == expected_product);
CHECK(LiAb_from_Tbi_SA_R.get(i, a, b) == expected_product);
CHECK(LibA_from_Tbi_SA_R.get(i, b, a) == expected_product);
}
}
}
}
// \brief Test the inner product of two rank 1 tensors is correctly evaluated
//
// \details
// The inner product cases tested are:
// - \f$L = R^{a} * S_{a}\f$
// - \f$L = S_{a} * R^{a}\f$
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_inner_product_rank_1x1_operands(
const DataType& used_for_size) noexcept {
Tensor<DataType, Symmetry<1>,
index_list<SpacetimeIndex<3, UpLo::Up, Frame::Grid>>>
Ru(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Ru));
Tensor<DataType, Symmetry<1>,
index_list<SpacetimeIndex<3, UpLo::Lo, Frame::Grid>>>
Sl(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sl));
// \f$L = R^{a} * S_{a}\f$
const Tensor<DataType> L_from_RA_Sa =
TensorExpressions::evaluate(Ru(ti_A) * Sl(ti_a));
// \f$L = S_{a} * R^{a}\f$
const Tensor<DataType> L_from_Sa_RA =
TensorExpressions::evaluate(Sl(ti_a) * Ru(ti_A));
DataType expected_sum = make_with_value<DataType>(used_for_size, 0.0);
for (size_t a = 0; a < 4; a++) {
expected_sum += (Ru.get(a) * Sl.get(a));
}
CHECK(L_from_RA_Sa.get() == expected_sum);
CHECK(L_from_Sa_RA.get() == expected_sum);
}
// \brief Test the inner product of two rank 2 tensors is correctly evaluated
//
// \details
// All cases in this test contract both pairs of indices of the two rank 2
// tensor operands to a resulting rank 0 tensor. For each case, the two tensor
// operands have one spacetime and one spatial index. Each case is a permutation
// of the positions of contracted pairs and their valences. One such example
// case: \f$L = R_{ai} * S^{ai}\f$
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_inner_product_rank_2x2_operands(
const DataType& used_for_size) noexcept {
using lower_spacetime_index = SpacetimeIndex<3, UpLo::Lo, Frame::Inertial>;
using upper_spacetime_index = SpacetimeIndex<3, UpLo::Up, Frame::Inertial>;
using lower_spatial_index = SpatialIndex<2, UpLo::Lo, Frame::Inertial>;
using upper_spatial_index = SpatialIndex<2, UpLo::Up, Frame::Inertial>;
// All tensor variables starting with 'R' refer to tensors whose first index
// is a spacetime index and whose second index is a spatial index. Conversely,
// all tensor variables starting with 'S' refer to tensors whose first index
// is a spatial index and whose second index is a spacetime index.
Tensor<DataType, Symmetry<2, 1>,
index_list<lower_spacetime_index, lower_spatial_index>>
Rll(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rll));
Tensor<DataType, Symmetry<2, 1>,
index_list<lower_spatial_index, lower_spacetime_index>>
Sll(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sll));
Tensor<DataType, Symmetry<2, 1>,
index_list<upper_spacetime_index, upper_spatial_index>>
Ruu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Ruu));
Tensor<DataType, Symmetry<2, 1>,
index_list<upper_spatial_index, upper_spacetime_index>>
Suu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Suu));
Tensor<DataType, Symmetry<2, 1>,
index_list<lower_spacetime_index, upper_spatial_index>>
Rlu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rlu));
Tensor<DataType, Symmetry<2, 1>,
index_list<lower_spatial_index, upper_spacetime_index>>
Slu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Slu));
Tensor<DataType, Symmetry<2, 1>,
index_list<upper_spacetime_index, lower_spatial_index>>
Rul(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rul));
Tensor<DataType, Symmetry<2, 1>,
index_list<upper_spatial_index, lower_spacetime_index>>
Sul(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sul));
// \f$L = Rll_{ai} * Ruu^{ai}\f$
const Tensor<DataType> L_aiAI_product =
TensorExpressions::evaluate(Rll(ti_a, ti_i) * Ruu(ti_A, ti_I));
// \f$L = Rll_{ai} * Suu^{ia}\f$
const Tensor<DataType> L_aiIA_product =
TensorExpressions::evaluate(Rll(ti_a, ti_i) * Suu(ti_I, ti_A));
// \f$L = Ruu^{ai} * Rll_{ai}\f$
const Tensor<DataType> L_AIai_product =
TensorExpressions::evaluate(Ruu(ti_A, ti_I) * Rll(ti_a, ti_i));
// \f$L = Ruu^{ai} * Sll_{ia}\f$
const Tensor<DataType> L_AIia_product =
TensorExpressions::evaluate(Ruu(ti_A, ti_I) * Sll(ti_i, ti_a));
// \f$L = Rlu_{a}{}^{i} * Rul^{a}{}_{i}\f$
const Tensor<DataType> L_aIAi_product =
TensorExpressions::evaluate(Rlu(ti_a, ti_I) * Rul(ti_A, ti_i));
// \f$L = Rlu_{a}{}^{i} * Slu_{i}{}^{a}\f$
const Tensor<DataType> L_aIiA_product =
TensorExpressions::evaluate(Rlu(ti_a, ti_I) * Slu(ti_i, ti_A));
// \f$L = Rul^{a}{}_{i} * Rlu_{a}{}^{i}\f$
const Tensor<DataType> L_AiaI_product =
TensorExpressions::evaluate(Rul(ti_A, ti_i) * Rlu(ti_a, ti_I));
// \f$L = Rul^{a}{}_{i} * Sul^{i}{}_{a}\f$
const Tensor<DataType> L_AiIa_product =
TensorExpressions::evaluate(Rul(ti_A, ti_i) * Sul(ti_I, ti_a));
DataType L_aiAI_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_aiIA_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_AIai_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_AIia_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_aIAi_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_aIiA_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_AiaI_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_AiIa_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
for (size_t a = 0; a < lower_spacetime_index::dim; a++) {
for (size_t i = 0; i < lower_spatial_index::dim; i++) {
L_aiAI_expected_product += (Rll.get(a, i) * Ruu.get(a, i));
L_aiIA_expected_product += (Rll.get(a, i) * Suu.get(i, a));
L_AIai_expected_product += (Ruu.get(a, i) * Rll.get(a, i));
L_AIia_expected_product += (Ruu.get(a, i) * Sll.get(i, a));
L_aIAi_expected_product += (Rlu.get(a, i) * Rul.get(a, i));
L_aIiA_expected_product += (Rlu.get(a, i) * Slu.get(i, a));
L_AiaI_expected_product += (Rul.get(a, i) * Rlu.get(a, i));
L_AiIa_expected_product += (Rul.get(a, i) * Sul.get(i, a));
}
}
CHECK(L_aiAI_product.get() == L_aiAI_expected_product);
CHECK(L_aiIA_product.get() == L_aiIA_expected_product);
CHECK(L_AIai_product.get() == L_AIai_expected_product);
CHECK(L_AIia_product.get() == L_AIia_expected_product);
CHECK(L_aIAi_product.get() == L_aIAi_expected_product);
CHECK(L_aIiA_product.get() == L_aIiA_expected_product);
CHECK(L_AiaI_product.get() == L_AiaI_expected_product);
CHECK(L_AiIa_product.get() == L_AiIa_expected_product);
}
// \brief Test the product of two tensors with one pair of indices to contract
// is correctly evaluated
//
// \details
// The product cases tested are:
// - \f$L_{b} = R_{ab} * T^{a}\f$
// - \f$L_{ac} = R_{ab} * S^{b}_{c}\f$
//
// All cases in this test contract one pair of indices of the two tensor
// operands. Each case is a permutation of the position of the contracted pair
// and the ordering of the LHS indices.
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_two_term_inner_outer_product(const DataType& used_for_size) noexcept {
using R_index = SpacetimeIndex<3, UpLo::Lo, Frame::Grid>;
using T_index = SpacetimeIndex<3, UpLo::Up, Frame::Grid>;
Tensor<DataType, Symmetry<1, 1>, index_list<R_index, R_index>> Rll(
used_for_size);
assign_unique_values_to_tensor(make_not_null(&Rll));
Tensor<DataType, Symmetry<1>, index_list<T_index>> Tu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Tu));
// \f$L_{b} = R_{ab} * T^{a}\f$
// Use explicit type (vs auto) for LHS Tensor so the compiler checks the
// return type of `evaluate`
using Lb = Tensor<DataType, Symmetry<1>, index_list<R_index>>;
const Lb Lb_from_Rab_TA =
TensorExpressions::evaluate<ti_b>(Rll(ti_a, ti_b) * Tu(ti_A));
// \f$L_{b} = R_{ba} * T^{a}\f$
const Lb Lb_from_Rba_TA =
TensorExpressions::evaluate<ti_b>(Rll(ti_b, ti_a) * Tu(ti_A));
// \f$L_{b} = T^{a} * R_{ab}\f$
const Lb Lb_from_TA_Rab =
TensorExpressions::evaluate<ti_b>(Tu(ti_A) * Rll(ti_a, ti_b));
// \f$L_{b} = T^{a} * R_{ba}\f$
const Lb Lb_from_TA_Rba =
TensorExpressions::evaluate<ti_b>(Tu(ti_A) * Rll(ti_b, ti_a));
for (size_t b = 0; b < R_index::dim; b++) {
DataType expected_product = make_with_value<DataType>(used_for_size, 0.0);
for (size_t a = 0; a < T_index::dim; a++) {
expected_product += (Rll.get(a, b) * Tu.get(a));
}
CHECK(Lb_from_Rab_TA.get(b) == expected_product);
CHECK(Lb_from_Rba_TA.get(b) == expected_product);
CHECK(Lb_from_TA_Rab.get(b) == expected_product);
CHECK(Lb_from_TA_Rba.get(b) == expected_product);
}
using S_lower_index = SpacetimeIndex<2, UpLo::Lo, Frame::Grid>;
using S_upper_index = SpacetimeIndex<3, UpLo::Up, Frame::Grid>;
Tensor<DataType, Symmetry<2, 1>, index_list<S_upper_index, S_lower_index>>
Sul(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sul));
Tensor<DataType, Symmetry<2, 1>, index_list<S_lower_index, S_upper_index>>
Slu(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Slu));
// \f$L_{ac} = R_{ab} * S^{b}_{c}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<R_index, S_lower_index>>
L_abBc_to_ac = TensorExpressions::evaluate<ti_a, ti_c>(Rll(ti_a, ti_b) *
Sul(ti_B, ti_c));
// \f$L_{ca} = R_{ab} * S^{b}_{c}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<S_lower_index, R_index>>
L_abBc_to_ca = TensorExpressions::evaluate<ti_c, ti_a>(Rll(ti_a, ti_b) *
Sul(ti_B, ti_c));
// \f$L_{ac} = R_{ab} * S_{c}^{b}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<R_index, S_lower_index>>
L_abcB_to_ac = TensorExpressions::evaluate<ti_a, ti_c>(Rll(ti_a, ti_b) *
Slu(ti_c, ti_B));
// \f$L_{ca} = R_{ab} * S_{c}^{b}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<S_lower_index, R_index>>
L_abcB_to_ca = TensorExpressions::evaluate<ti_c, ti_a>(Rll(ti_a, ti_b) *
Slu(ti_c, ti_B));
// \f$L_{ac} = R_{ba} * S^{b}_{c}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<R_index, S_lower_index>>
L_baBc_to_ac = TensorExpressions::evaluate<ti_a, ti_c>(Rll(ti_b, ti_a) *
Sul(ti_B, ti_c));
// \f$L_{ca} = R_{ba} * S^{b}_{c}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<S_lower_index, R_index>>
L_baBc_to_ca = TensorExpressions::evaluate<ti_c, ti_a>(Rll(ti_b, ti_a) *
Sul(ti_B, ti_c));
// \f$L_{ac} = R_{ba} * S_{c}^{b}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<R_index, S_lower_index>>
L_bacB_to_ac = TensorExpressions::evaluate<ti_a, ti_c>(Rll(ti_b, ti_a) *
Slu(ti_c, ti_B));
// \f$L_{ca} = R_{ba} * S_{c}^{b}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<S_lower_index, R_index>>
L_bacB_to_ca = TensorExpressions::evaluate<ti_c, ti_a>(Rll(ti_b, ti_a) *
Slu(ti_c, ti_B));
for (size_t a = 0; a < R_index::dim; a++) {
for (size_t c = 0; c < S_lower_index::dim; c++) {
DataType L_abBc_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_abcB_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_baBc_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
DataType L_bacB_expected_product =
make_with_value<DataType>(used_for_size, 0.0);
for (size_t b = 0; b < 4; b++) {
L_abBc_expected_product += (Rll.get(a, b) * Sul.get(b, c));
L_abcB_expected_product += (Rll.get(a, b) * Slu.get(c, b));
L_baBc_expected_product += (Rll.get(b, a) * Sul.get(b, c));
L_bacB_expected_product += (Rll.get(b, a) * Slu.get(c, b));
}
CHECK(L_abBc_to_ac.get(a, c) == L_abBc_expected_product);
CHECK(L_abBc_to_ca.get(c, a) == L_abBc_expected_product);
CHECK(L_abcB_to_ac.get(a, c) == L_abcB_expected_product);
CHECK(L_abcB_to_ca.get(c, a) == L_abcB_expected_product);
CHECK(L_baBc_to_ac.get(a, c) == L_baBc_expected_product);
CHECK(L_baBc_to_ca.get(c, a) == L_baBc_expected_product);
CHECK(L_bacB_to_ac.get(a, c) == L_bacB_expected_product);
CHECK(L_bacB_to_ca.get(c, a) == L_bacB_expected_product);
}
}
}
// \brief Test the product of three tensors involving both inner and outer
// products of indices is correctly evaluated
//
// \details
// The product cases tested are:
// - \f$L_{i} = R^{j} * S_{j} * T_{i}\f$
// - \f$L_{i}{}^{k} = S_{j} * T_{i} * G^{jk}\f$
//
// For each case, multiple operand orderings are tested. For the second case,
// both LHS index orderings are also tested.
//
// \tparam DataType the type of data being stored in the product operands
template <typename DataType>
void test_three_term_inner_outer_product(
const DataType& used_for_size) noexcept {
Tensor<DataType, Symmetry<1>,
index_list<SpatialIndex<3, UpLo::Up, Frame::Inertial>>>
Ru(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Ru));
Tensor<DataType, Symmetry<1>,
index_list<SpatialIndex<3, UpLo::Lo, Frame::Inertial>>>
Sl(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Sl));
Tensor<DataType, Symmetry<1>,
index_list<SpatialIndex<3, UpLo::Lo, Frame::Inertial>>>
Tl(used_for_size);
assign_unique_values_to_tensor(make_not_null(&Tl));
// \f$L_{i} = R^{j} * S_{j} * T_{i}\f$
const decltype(Tl) Li_from_Jji =
TensorExpressions::evaluate<ti_i>(Ru(ti_J) * Sl(ti_j) * Tl(ti_i));
// \f$L_{i} = R^{j} * T_{i} * S_{j}\f$
const decltype(Tl) Li_from_Jij =
TensorExpressions::evaluate<ti_i>(Ru(ti_J) * Tl(ti_i) * Sl(ti_j));
// \f$L_{i} = T_{i} * S_{j} * R^{j}\f$
const decltype(Tl) Li_from_ijJ =
TensorExpressions::evaluate<ti_i>(Tl(ti_i) * Sl(ti_j) * Ru(ti_J));
for (size_t i = 0; i < 3; i++) {
DataType expected_product = make_with_value<DataType>(used_for_size, 0.0);
for (size_t j = 0; j < 3; j++) {
expected_product += (Ru.get(j) * Sl.get(j) * Tl.get(i));
}
CHECK(Li_from_Jji.get(i) == expected_product);
CHECK(Li_from_Jij.get(i) == expected_product);
CHECK(Li_from_ijJ.get(i) == expected_product);
}
using T_index = tmpl::front<typename decltype(Tl)::index_list>;
using G_index = SpatialIndex<3, UpLo::Up, Frame::Inertial>;
Tensor<DataType, Symmetry<2, 1>, index_list<G_index, G_index>> Guu(
used_for_size);
assign_unique_values_to_tensor(make_not_null(&Guu));
// \f$L_{i}{}^{k} = S_{j} * T_{i} * G^{jk}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_Sj_Ti_GJK = TensorExpressions::evaluate<ti_i, ti_K>(
Sl(ti_j) * Tl(ti_i) * Guu(ti_J, ti_K));
// \f$L^{k}{}_{i} = S_{j} * T_{i} * G^{jk}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_Sj_Ti_GJK = TensorExpressions::evaluate<ti_K, ti_i>(
Sl(ti_j) * Tl(ti_i) * Guu(ti_J, ti_K));
// \f$L_{i}{}^{k} = S_{j} * G^{jk} * T_{i}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_Sj_GJK_Ti = TensorExpressions::evaluate<ti_i, ti_K>(
Sl(ti_j) * Guu(ti_J, ti_K) * Tl(ti_i));
// \f$L^{k}{}_{i} = S_{j} * G^{jk} * T_{i}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_Sj_GJK_Ti = TensorExpressions::evaluate<ti_K, ti_i>(
Sl(ti_j) * Guu(ti_J, ti_K) * Tl(ti_i));
// \f$L_{i}{}^{k} = T_{i} * S_{j} * G^{jk}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_Ti_Sj_GJK = TensorExpressions::evaluate<ti_i, ti_K>(
Tl(ti_i) * Sl(ti_j) * Guu(ti_J, ti_K));
// \f$L^{k}{}_{i} = T_{i} * S_{j} * G^{jk}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_Ti_Sj_GJK = TensorExpressions::evaluate<ti_K, ti_i>(
Tl(ti_i) * Sl(ti_j) * Guu(ti_J, ti_K));
// \f$L_{i}{}^{k} = T_{i} * G^{jk} * S_{j}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_Ti_GJK_Sj = TensorExpressions::evaluate<ti_i, ti_K>(
Tl(ti_i) * Guu(ti_J, ti_K) * Sl(ti_j));
// \f$L^{k}{}_{i} = T_{i} * G^{jk} * S_{j}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_Ti_GJK_Sj = TensorExpressions::evaluate<ti_K, ti_i>(
Tl(ti_i) * Guu(ti_J, ti_K) * Sl(ti_j));
// \f$L_{i}{}^{k} = G^{jk} * S_{j} * T_{i}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_GJK_Sj_Ti = TensorExpressions::evaluate<ti_i, ti_K>(
Guu(ti_J, ti_K) * Sl(ti_j) * Tl(ti_i));
// \f$L^{k}{}_{i} = G^{jk} * S_{j} * T_{i}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_GJK_Sj_Ti = TensorExpressions::evaluate<ti_K, ti_i>(
Guu(ti_J, ti_K) * Sl(ti_j) * Tl(ti_i));
// \f$L_{i}{}^{k} = G^{jk} * T_{i} * S_{j}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<T_index, G_index>>
LiK_from_GJK_Ti_Sj = TensorExpressions::evaluate<ti_i, ti_K>(
Guu(ti_J, ti_K) * Tl(ti_i) * Sl(ti_j));
// \f$L^{k}{}_{i} = G^{jk} * T_{i} * S_{j}\f$
const Tensor<DataType, Symmetry<2, 1>, index_list<G_index, T_index>>
LKi_from_GJK_Ti_Sj = TensorExpressions::evaluate<ti_K, ti_i>(
Guu(ti_J, ti_K) * Tl(ti_i) * Sl(ti_j));
for (size_t k = 0; k < G_index::dim; k++) {
for (size_t i = 0; i < T_index::dim; i++) {
DataType expected_product =
make_with_value<DataType>(used_for_size, 0.0);
for (size_t j = 0; j < G_index::dim; j++) {
expected_product += (Sl.get(j) * Tl.get(i) * Guu.get(j, k));
}
CHECK(LiK_from_Sj_Ti_GJK.get(i, k) == expected_product);
CHECK(LKi_from_Sj_Ti_GJK.get(k, i) == expected_product);
CHECK(LiK_from_Sj_GJK_Ti.get(i, k) == expected_product);
CHECK(LKi_from_Sj_GJK_Ti.get(k, i) == expected_product);
CHECK(LiK_from_Ti_Sj_GJK.get(i, k) == expected_product);
CHECK(LKi_from_Ti_Sj_GJK.get(k, i) == expected_product);
CHECK(LiK_from_Ti_GJK_Sj.get(i, k) == expected_product);
CHECK(LKi_from_Ti_GJK_Sj.get(k, i) == expected_product);
CHECK(LiK_from_GJK_Sj_Ti.get(i, k) == expected_product);
CHECK(LKi_from_GJK_Sj_Ti.get(k, i) == expected_product);
CHECK(LiK_from_GJK_Ti_Sj.get(i, k) == expected_product);
CHECK(LKi_from_GJK_Ti_Sj.get(k, i) == expected_product);
}
}
}
template <typename DataType>
void test_products(const DataType& used_for_size) noexcept {
test_outer_product_quotient_double(used_for_size);
test_outer_product_rank_0_operand(used_for_size);
test_outer_product_rank_1_operand(used_for_size);
test_outer_product_rank_2x2_operands(used_for_size);
test_outer_product_rank_0x1x2_operands(used_for_size);
test_inner_product_rank_1x1_operands(used_for_size);
test_inner_product_rank_2x2_operands(used_for_size);
test_two_term_inner_outer_product(used_for_size);
test_three_term_inner_outer_product(used_for_size);
}
} // namespace
SPECTRE_TEST_CASE("Unit.DataStructures.Tensor.Expression.Product",
"[DataStructures][Unit]") {
test_products(std::numeric_limits<double>::signaling_NaN());
test_products(DataVector(5, std::numeric_limits<double>::signaling_NaN()));
}
|
#include<bits/stdc++.h>
#include<conio.h>
using namespace std;
#define fastio ios_base::sync_with_stdio(false); cin.tie(NULL);
#define aa auto
#define cn const
#define ll long long
#define ld long double
#define fr first
#define sc second
#define pll pair<ll,ll>
#define tll tuple<ll,ll,ll>
#define vll vector<ll>
#define bp(x) __builtin_popcountll(x)
#define all(a) a.begin(),a.end()
#define arr(a) a.rbegin(),a.rend()
#define cb(a,i) (a>>i&1)
#define mp(a,b) make_pair(a,b)
#define pb(a) push_back(a)
#define ve vector
#define db(args...) { string _s = #args; replace(_s.begin(), _s.end(), ',', ' '); stringstream _ss(_s); istream_iterator<string> _it(_ss); err(_it, args); cerr<<"\n";}
void err(istream_iterator<string> it) {}
template<typename T, typename... Args>
void err(istream_iterator<string> it, T a, Args... args) {
cerr << *it << "=" << a << " ! ";
err(++it, args...);
}
cn ll N=5e5+7,N1=25,inf=2e18;
cn ld eps=1e-8;
cn ll M=1e9+7;
//cn ll M=998244353;
vll ma[3];
void prin(){
cout<<"\n";
ll i,j;
getch();
for(i=3; i>=0; i--){
for(j=0; j<3; j++){
if(ma[j].size()<=i) cout<<'.';
else cout<<ma[j][i];
cout<<" ";
}
cout<<"\n";
}
cout<<"\n";
}
void hanoi(ll n=4,ll f=0,ll a=1,ll t=2){
if(!n) return;
hanoi(n-1,f,t,a);
ma[f].pop_back(); ma[t].push_back(n);
prin();
hanoi(n-1,a,f,t);
}
int main(){
for(ll i=4; i; i--) ma[0].push_back(i);
prin();
hanoi();
}
|
/*
* Copyright 2010 Mario Zechner (contact@badlogicgames.com), Nathan Sweet (admin@esotericsoftware.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
#include "Box2D.h"
#include "RevoluteJoint.h"
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetJointAngle
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetJointAngle
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetJointAngle();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetJointSpeed
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetJointSpeed
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetJointSpeed();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniIsLimitEnabled
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniIsLimitEnabled
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->IsLimitEnabled();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniEnableLimit
* Signature: (JZ)V
*/
JNIEXPORT void JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniEnableLimit
(JNIEnv *, jobject, jlong addr, jboolean flag)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
joint->EnableLimit(flag);
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetLowerLimit
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetLowerLimit
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetLowerLimit();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetUpperLimit
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetUpperLimit
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetUpperLimit();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniSetLimits
* Signature: (JFF)V
*/
JNIEXPORT void JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniSetLimits
(JNIEnv *, jobject, jlong addr, jfloat lower, jfloat upper)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
joint->SetLimits(lower, upper );
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniIsMotorEnabled
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniIsMotorEnabled
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->IsMotorEnabled();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniEnableMotor
* Signature: (JZ)V
*/
JNIEXPORT void JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniEnableMotor
(JNIEnv *, jobject, jlong addr, jboolean flag)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
joint->EnableMotor(flag);
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniSetMotorSpeed
* Signature: (JF)V
*/
JNIEXPORT void JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniSetMotorSpeed
(JNIEnv *, jobject, jlong addr, jfloat speed)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
joint->SetMotorSpeed(speed);
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetMotorSpeed
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetMotorSpeed
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetMotorSpeed();
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniSetMaxMotorTorque
* Signature: (JF)V
*/
JNIEXPORT void JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniSetMaxMotorTorque
(JNIEnv *, jobject, jlong addr, jfloat torque)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
joint->SetMaxMotorTorque(torque);
}
/*
* Class: com_badlogic_gdx_physics_box2d_joints_RevoluteJoint
* Method: jniGetMotorTorque
* Signature: (J)F
*/
JNIEXPORT jfloat JNICALL Java_com_badlogic_gdx_physics_box2d_joints_RevoluteJoint_jniGetMotorTorque
(JNIEnv *, jobject, jlong addr)
{
b2RevoluteJoint* joint = (b2RevoluteJoint*)addr;
return joint->GetMotorTorque();
}
|
/*
* Copyright 2019-2021 Diligent Graphics LLC
* Copyright 2015-2019 Egor Yusov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* In no event and under no legal theory, whether in tort (including negligence),
* contract, or otherwise, unless required by applicable law (such as deliberate
* and grossly negligent acts) or agreed to in writing, shall any Contributor be
* liable for any damages, including any direct, indirect, special, incidental,
* or consequential damages of any character arising as a result of this License or
* out of the use or inability to use the software (including but not limited to damages
* for loss of goodwill, work stoppage, computer failure or malfunction, or any and
* all other commercial damages or losses), even if such Contributor has been advised
* of the possibility of such damages.
*/
#pragma once
#include <array>
#include "TestingEnvironment.hpp"
#include "DXCompiler.hpp"
#define VK_NO_PROTOTYPES
#include "vulkan/vulkan.h"
namespace Diligent
{
namespace Testing
{
class TestingEnvironmentVk final : public TestingEnvironment
{
public:
using CreateInfo = TestingEnvironment::CreateInfo;
TestingEnvironmentVk(const CreateInfo& CI,
const SwapChainDesc& SCDesc);
~TestingEnvironmentVk();
static TestingEnvironmentVk* GetInstance() { return ValidatedCast<TestingEnvironmentVk>(TestingEnvironment::GetInstance()); }
void CreateImage2D(uint32_t Width,
uint32_t Height,
VkFormat vkFormat,
VkImageUsageFlags vkUsage,
VkImageLayout vkInitialLayout,
VkDeviceMemory& vkMemory,
VkImage& vkImage);
void CreateBuffer(VkDeviceSize Size,
VkBufferUsageFlags vkUsage,
VkMemoryPropertyFlags MemoryFlags,
VkDeviceMemory& vkMemory,
VkBuffer& vkBuffer);
uint32_t GetMemoryTypeIndex(uint32_t memoryTypeBitsRequirement,
VkMemoryPropertyFlags requiredProperties) const;
VkDevice GetVkDevice()
{
return m_vkDevice;
}
VkPhysicalDevice GetVkPhysicalDevice()
{
return m_vkPhysicalDevice;
}
virtual bool HasDXCompiler() const override final
{
return m_pDxCompiler != nullptr && m_pDxCompiler->IsLoaded();
}
virtual void GetDXCompilerVersion(Uint32& MajorVersion, Uint32& MinorVersion) const override final
{
if (m_pDxCompiler != nullptr)
m_pDxCompiler->GetVersion(MajorVersion, MinorVersion);
}
virtual bool SupportsRayTracing() const override final;
VkShaderModule CreateShaderModule(const SHADER_TYPE ShaderType, const std::string& ShaderSource);
static VkRenderPassCreateInfo GetRenderPassCreateInfo(
Uint32 NumRenderTargets,
const VkFormat RTVFormats[],
VkFormat DSVFormat,
Uint32 SampleCount,
VkAttachmentLoadOp DepthAttachmentLoadOp,
VkAttachmentLoadOp ColorAttachmentLoadOp,
std::array<VkAttachmentDescription, MAX_RENDER_TARGETS + 1>& Attachments,
std::array<VkAttachmentReference, MAX_RENDER_TARGETS + 1>& AttachmentReferences,
VkSubpassDescription& SubpassDesc);
VkCommandBuffer AllocateCommandBuffer();
void SubmitCommandBuffer(VkCommandBuffer vkCmdBuffer, bool WaitForIdle = true);
static void TransitionImageLayout(VkCommandBuffer CmdBuffer,
VkImage Image,
VkImageLayout& CurrentLayout,
VkImageLayout NewLayout,
const VkImageSubresourceRange& SubresRange,
VkPipelineStageFlags EnabledGraphicsShaderStages,
VkPipelineStageFlags SrcStages = 0,
VkPipelineStageFlags DestStages = 0);
private:
VkDevice m_vkDevice = VK_NULL_HANDLE;
VkPhysicalDevice m_vkPhysicalDevice = VK_NULL_HANDLE;
VkCommandPool m_vkCmdPool = VK_NULL_HANDLE;
VkFence m_vkFence = VK_NULL_HANDLE;
std::unique_ptr<IDXCompiler> m_pDxCompiler;
VkPhysicalDeviceMemoryProperties m_MemoryProperties = {};
public:
VkPhysicalDeviceDescriptorIndexingFeaturesEXT DescriptorIndexing = {};
VkPhysicalDeviceProperties DeviceProps = {};
};
} // namespace Testing
} // namespace Diligent
|
/***************************************************************************
* This file is part of the CuteReport project *
* Copyright (C) 2012-2015 by Alexander Mikhalov *
* alexander.mikhalov@gmail.com *
* *
** GNU General Public License Usage **
* *
* This library is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
***************************************************************************/
#include "pageeditorcontainer.h"
#include "ui_pageeditorcontainer.h"
#include "pageinterface.h"
#include "reportcore.h"
#include "pageeditor.h"
PageEditorContainer::PageEditorContainer(PageEditor * pageEditor, QWidget *parent) :
QWidget(parent),
ui(new Ui::PageEditorContainer),
m_pageEditor(pageEditor)
{
ui->setupUi(this);
ui->pageTabs->hide();
}
PageEditorContainer::~PageEditorContainer()
{
delete ui;
}
void PageEditorContainer::init()
{
connect(ui->deletePageButton, SIGNAL(clicked()), this, SLOT(slotDeleteClicked()));
connect(ui->addPageButton, SIGNAL(clicked()), this, SLOT(slotCreateClicked()));
connect(ui->clonePageButton, SIGNAL(clicked()), this, SLOT(slotCloneClicked()));
connect(ui->pageMoveFront, SIGNAL(clicked()), m_pageEditor, SLOT(slotPageMoveFront()));
connect(ui->pageMoveBack, SIGNAL(clicked()), m_pageEditor, SLOT(slotPageMoveBack()));
connect(ui->pageTabs, SIGNAL(CurrentChanged(int)), this, SLOT(slotCurrentTabChanged(int)));
connect(ui->pageTabs, SIGNAL(tabDoubleClicked(int)), this, SLOT(slotTabDoubleClicked(int)));
ui->tools->setImagesPath(m_pageEditor->core()->reportCore()->imagesPath());
}
void PageEditorContainer::saveSettings()
{
m_pageEditor->core()->setSettingValue("PageEditor/splitterState", ui->splitter->saveState());
m_pageEditor->core()->setSettingValue("PageEditor/splitter2State", ui->splitter2->saveState());
m_pageEditor->core()->setSettingValue("PageEditor/tabMode", ui->pageTabs->mode());
}
void PageEditorContainer::updateButtonsStatus()
{
ui->deletePageButton->setEnabled(ui->pageTabs->tabsCount());
ui->clonePageButton->setEnabled(ui->pageTabs->tabsCount());
ui->pageMoveFront->setEnabled(ui->pageTabs->tabsCount() > 1);
ui->pageMoveBack->setEnabled(ui->pageTabs->tabsCount() > 1);
ui->actions->setEnabled(ui->pageTabs->tabsCount());
ui->tools->setEnabled(ui->pageTabs->tabsCount());
}
FontEditor *PageEditorContainer::fontEditor()
{
return ui->fontEditor;
}
AlignmentEditor *PageEditorContainer::alignmentEditor()
{
return ui->alignmentEditor;
}
FrameEditor *PageEditorContainer::frameEditor()
{
return ui->frameEditor;
}
void PageEditorContainer::reloadSettings()
{
QVariant value;
if ((value = m_pageEditor->core()->getSettingValue("PageEditor/splitterState")).isNull())
ui->splitter->setSizes( QList<int>() << width()*0.8 << width()*0.2 );
else
ui->splitter->restoreState(value.toByteArray());
if ((value = m_pageEditor->core()->getSettingValue("PageEditor/splitter2State")).isNull())
ui->splitter2->setSizes( QList<int>() << width()*0.4 << width()*0.6 );
else
ui->splitter2->restoreState(value.toByteArray());
FancyTabWidget::Mode default_mode = FancyTabWidget::Mode_LargeSidebar;
ui->pageTabs->SetMode(FancyTabWidget::Mode(m_pageEditor->core()->getSettingValue("PageEditor/tabMode", default_mode).toInt()));
}
void PageEditorContainer::addPropertyEditor(QWidget * widget)
{
widget->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
ui->PropertyEditorLayout->addWidget(widget);
}
void PageEditorContainer::addObjectInspector(QWidget * widget)
{
widget->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
ui->ObjectInspectorLayout->addWidget(widget);
}
void PageEditorContainer::addTab(QWidget * widget, QIcon icon, const QString &name)
{
widget->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
ui->pageTabs->AddTab(widget, icon, name);
ui->pageTabs->updateState();
updateButtonsStatus();
}
void PageEditorContainer::removeTab(const QString & name)
{
ui->pageTabs->deleteTab(name);
ui->pageTabs->updateState();
updateButtonsStatus();
}
void PageEditorContainer::removeAllTabs()
{
ui->pageTabs->deleteAllTabs();
ui->pageTabs->updateState();
updateButtonsStatus();
}
void PageEditorContainer::setCurrentTab(const QString &pageName)
{
ui->pageTabs->blockSignals(true);
for (int i=0; i<ui->pageTabs->tabsCount(); ++i)
if (ui->pageTabs->tabText(i) == pageName) {
ui->pageTabs->SetCurrentIndex(i);
break;
}
ui->pageTabs->blockSignals(false);
}
void PageEditorContainer::setNewPageName(const QString &pageName, const QString &newName)
{
for (int i=0; i<ui->pageTabs->tabsCount(); ++i)
if (ui->pageTabs->tabText(i) == pageName) {
ui->pageTabs->changeText(newName, i );
break;
}
}
void PageEditorContainer::addPagePlugins(QList<CuteReport::ReportPluginInterface*> pages)
{
if (pages.count() > 1) {
delete ui->addPageButton->menu();
Menu * menu = new Menu( this );
foreach(CuteReport::ReportPluginInterface * plugin, pages) {
CuteReport::PageInterface * page = static_cast<CuteReport::PageInterface *>(plugin);
QString actionName = QString("%1 (%2)").arg(page->moduleShortName(), page->suitName());
QAction * newItem = new QAction(page->icon(), actionName, this );
newItem->setData(page->moduleFullName());
menu->addAction(newItem);
}
ui->addPageButton->setMenu(menu);
} else if (pages.count() == 1){
m_pageeModuleName = pages.at(0)->moduleFullName();
}
}
void PageEditorContainer::slotDeleteClicked()
{
if (!ui->pageTabs->tabsCount())
return;
emit requestForDeletePage(ui->pageTabs->current_text());
}
void PageEditorContainer::slotCreateClicked()
{
emit requestForCreatePage(m_pageeModuleName);
}
void PageEditorContainer::slotCloneClicked()
{
if (!ui->pageTabs->tabsCount())
return;
emit requestForClonePage(ui->pageTabs->current_text());
}
void PageEditorContainer::slotCurrentTabChanged(int)
{
emit currentTabChanged(ui->pageTabs->current_text());
}
void PageEditorContainer::slotTabDoubleClicked(int index)
{
emit requestForRenamePage(ui->pageTabs->tabText(index));
}
//void PageEditorContainer::slotNewPageActions(QList<CuteReport::PageAction*> actions)
//{
// ui->pageActionsToolBar->clear();
// ui->menuPage->clear();
// ui->menuPage->addActions(defaultPageActions);
// foreach (CuteReport::PageAction * pageAction, actions) {
// ui->pageActionsToolBar->addAction(pageAction->action);
// ui->menuPage->addAction(pageAction->action);
// }
//}
void PageEditorContainer::addItem(const QIcon &icon, const QString &name, const QString &suiteName, const QString &group)
{
ui->tools->addItem(icon, name, suiteName, group);
}
void PageEditorContainer::setPageActions(QList<CuteReport::PageAction*> actions)
{
QList<QAction*> actionList = ui->actions->actions();
foreach (QAction * action, actionList)
ui->actions->removeAction(action);
foreach (CuteReport::PageAction * pageAction, actions) {
ui->actions->addAction(pageAction->action);
}
}
|
// Time: O(n)
// Space: O(1)
class Solution {
public:
int scoreOfParentheses(string S) {
int result = 0, depth = 0;
for (int i = 0; i < S.length(); ++i) {
if (S[i] == '(') {
++depth;
} else {
--depth;
if (S[i - 1] == '(') {
result += 1 << depth;
}
}
}
return result;
}
};
// Time: O(n)
// Space: O(h)
class Solution2 {
public:
int scoreOfParentheses(string S) {
vector<int> stack(1, 0);
for (const auto& c : S) {
if (c == '(') {
stack.emplace_back(0);
} else {
const auto last = stack.back(); stack.pop_back();
stack.back() += max(1, 2 * last);
}
}
return stack.front();
}
};
|
#pragma once
#include "../../JObject.hpp"
class JFloatArray;
namespace android::os
{
class Handler;
}
namespace android::graphics
{
class SurfaceTexture : public JObject
{
public:
// Fields
// QJniObject forward
template<typename ...Ts> explicit SurfaceTexture(const char *className, const char *sig, Ts...agv) : JObject(className, sig, std::forward<Ts>(agv)...) {}
SurfaceTexture(QJniObject obj);
// Constructors
SurfaceTexture(jboolean arg0);
SurfaceTexture(jint arg0);
SurfaceTexture(jint arg0, jboolean arg1);
// Methods
void attachToGLContext(jint arg0) const;
void detachFromGLContext() const;
jlong getTimestamp() const;
void getTransformMatrix(JFloatArray arg0) const;
jboolean isReleased() const;
void release() const;
void releaseTexImage() const;
void setDefaultBufferSize(jint arg0, jint arg1) const;
void setOnFrameAvailableListener(JObject arg0) const;
void setOnFrameAvailableListener(JObject arg0, android::os::Handler arg1) const;
void updateTexImage() const;
};
} // namespace android::graphics
|
/*
* This file is part of the CitizenFX project - http://citizen.re/
*
* See LICENSE and MENTIONS in the root of the source tree for information
* regarding licensing.
*/
#include "StdInc.h"
#include "fiDevice.h"
#include "CrossLibraryInterfaces.h"
namespace rage
{
WRAPPER fiDevice* fiDevice::GetDevice(const char* path, bool allowRoot) { EAXJMP(0x5ABC80); }
WRAPPER void fiDevice::Unmount(const char* rootPath) { EAXJMP(0x5AC080); }
rage::fiDevice::~fiDevice() {}
__declspec(dllexport) fwEvent<> fiDevice::OnInitialMount;
}
|
#ifndef _VKDEVICEUTIL_HPP
#define _VKDEVICEUTIL_HPP
/*-------------------------------------------------------------------------
* Vulkan CTS Framework
* --------------------
*
* Copyright (c) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Instance and device initialization utilities.
*//*--------------------------------------------------------------------*/
#include "vkDefs.hpp"
#include "vkRef.hpp"
#include <vector>
#include <string>
namespace tcu
{
class CommandLine;
}
namespace vk
{
Move<VkInstance> createDefaultInstance (const PlatformInterface& vkPlatform,
deUint32 apiVersion);
Move<VkInstance> createDefaultInstance (const PlatformInterface& vkPlatform,
deUint32 apiVersion,
const std::vector<std::string>& enabledLayers,
const std::vector<std::string>& enabledExtensions,
const VkAllocationCallbacks* pAllocator = DE_NULL);
Move<VkInstance> createInstanceWithExtensions (const PlatformInterface& vkp,
const deUint32 version,
const std::vector<std::string> requiredExtensions);
Move<VkInstance> createInstanceWithExtension (const PlatformInterface& vkp,
const deUint32 version,
const std::string requiredExtension);
deUint32 chooseDeviceIndex (const InstanceInterface& vkInstance,
const VkInstance instance,
const tcu::CommandLine& cmdLine);
VkPhysicalDevice chooseDevice (const InstanceInterface& vkInstance,
const VkInstance instance,
const tcu::CommandLine& cmdLine);
} // vk
#endif // _VKDEVICEUTIL_HPP
|
//-----------------------------------------------------------------------------------------------------------------------------
// Copyright (c) 2019 Juho Lepistö
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
// CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------------------------------------------------------
//! @file UTest_Hal_Clocks.cpp
//! @author Juho Lepistö juho.lepisto(a)gmail.com
//! @date 25 Jul 2019
//!
//! @brief These are unit tests for Hal_Clocks.cpp
//!
//! These are unit tests for Hal_Clocks.cpp utilising Catch2 and FakeIt.
//-----------------------------------------------------------------------------------------------------------------------------
// 1. Include Files
//-----------------------------------------------------------------------------------------------------------------------------
#include <Catch_Utils.hpp>
#include <Hal_Clocks.hpp>
#include <Utils_Bit.hpp>
#include <stm32f429xx_mock.h>
#include <Utils_Assert_Mock.hpp>
#include <Hal_Internal_Mock.hpp>
#include <ASch_Configuration.hpp>
//-----------------------------------------------------------------------------------------------------------------------------
// 2. Test Structs and Variables
//-----------------------------------------------------------------------------------------------------------------------------
namespace
{
}
#define PRINT_PLL() uint32_t pllp = (RCC->PLLCFGR & RCC_PLLCFGR_PLLP_Msk) >> RCC_PLLCFGR_PLLP_Pos; \
uint32_t plln = (RCC->PLLCFGR & RCC_PLLCFGR_PLLN_Msk) >> RCC_PLLCFGR_PLLN_Pos; \
uint32_t pllm = (RCC->PLLCFGR & RCC_PLLCFGR_PLLM_Msk) >> RCC_PLLCFGR_PLLM_Pos; \
uint32_t pllq = (RCC->PLLCFGR & RCC_PLLCFGR_PLLQ_Msk) >> RCC_PLLCFGR_PLLQ_Pos; \
INFO ("PLLCFGR: " << std::hex << RCC->PLLCFGR); \
INFO ("PLLP: " << pllp); \
INFO ("PLLN: " << plln); \
INFO ("PLLM: " << pllm); \
INFO ("PLLQ: " << pllq);
//-----------------------------------------------------------------------------------------------------------------------------
// 3. Test Cases
//-----------------------------------------------------------------------------------------------------------------------------
SCENARIO ("Clocks are enabled", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
HalMock::InitInternal();
ASchMock::Assert::Init();
GIVEN ("HSI is disabled and clocks are functioning correctly")
{
// HSI is on by default, so disable it first.
Utils::SetBit(RCC->CR, RCC_CR_HSIRDY_Pos, false);
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the HSI clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::highSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("HSION bit in RCC CR register shall be set and HSIRDY bit shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_HSION_Pos) == true);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToSet, RCC->CR, RCC_CR_HSIRDY_Pos);
}
}
}
}
GIVEN ("HSE is disabled and clocks are functioning correctly")
{
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the HSE clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::highSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("HSEON bit in RCC CR register shall be set and HSERDY bit shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_HSEON_Pos) == true);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToSet, RCC->CR, RCC_CR_HSERDY_Pos);
}
}
}
}
GIVEN ("LSI is disabled and clocks are functioning correctly")
{
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the LSI clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::lowSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("LSION bit in RCC CSR register shall be set and LSIRDY bit shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CSR, RCC_CSR_LSION_Pos) == true);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToSet, RCC->CSR, RCC_CSR_LSIRDY_Pos);
}
}
}
}
GIVEN ("LSE is disabled and clocks are functioning correctly")
{
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the LSE clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::lowSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
THEN ("LSEON bit in RCC BDCR register shall be set and LSERDY bit shall be waited")
{
REQUIRE (Utils::GetBit(RCC->BDCR, RCC_BDCR_LSEON_Pos) == true);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToSet, RCC->BDCR, RCC_BDCR_LSERDY_Pos);
}
}
}
}
GIVEN ("PLL is disabled and clocks are functioning correctly")
{
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the PLL clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::pll);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
THEN ("LSEON bit in RCC BDCR register shall be set and LSERDY bit shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_PLLON_Pos) == true);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToSet, RCC->CR, RCC_CR_PLLRDY_Pos);
}
}
}
}
GIVEN ("the oscillators are already running")
{
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
Utils::SetBit(RCC->CSR, RCC_CSR_LSIRDY_Pos, true);
Utils::SetBit(RCC->BDCR, RCC_BDCR_LSERDY_Pos, true);
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, false);
WHEN ("the the clocks are enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::highSpeed_internal);
Hal::Error error2 = Hal::Clocks::Enable(Hal::OscillatorType::highSpeed_external);
Hal::Error error3 = Hal::Clocks::Enable(Hal::OscillatorType::lowSpeed_internal);
Hal::Error error4 = Hal::Clocks::Enable(Hal::OscillatorType::lowSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
REQUIRE (error2 == Hal::Error::noErrors);
REQUIRE (error3 == Hal::Error::noErrors);
REQUIRE (error4 == Hal::Error::noErrors);
AND_THEN ("no configurations shall occur")
{
REQUIRE_CALLS (0, HalMock::mockHalInternal, WaitForBitToSet);
}
}
}
}
GIVEN ("HSE is disabled and clock fails to start")
{
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToSet, true);
WHEN ("the HSE clock is enabled")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::highSpeed_external);
THEN ("timeout error shall occur")
{
REQUIRE (error == Hal::Error::timeout);
}
}
}
GIVEN ("the system is in init state")
{
WHEN ("secondary PLL is enabled directly")
{
Hal::Error error = Hal::Clocks::Enable(Hal::OscillatorType::pllSecondary);
THEN ("parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
}
SCENARIO ("Clocks are disabled", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
HalMock::InitInternal();
ASchMock::Assert::Init();
GIVEN ("HSI is enabled and is not used as a system clock")
{
Utils::SetBit(RCC->CR, RCC_CR_HSION_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_HSIRDY_Pos, true);
// HSE as system clock.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_0);
// This tells the HAL that the oscillator has stopped correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the HSI clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("HSION bit in RCC CR register shall be cleared and HSERDY bit clearing shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_HSION_Pos) == false);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToClear, RCC->CR, RCC_CR_HSIRDY_Pos);
}
}
}
}
GIVEN ("HSE is enabled and clocks are functioning correctly")
{
Utils::SetBit(RCC->CR, RCC_CR_HSEON_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// This tells the HAL that the oscillator has stopped correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the HSE clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("HSEON bit in RCC CR register shall be cleared and HSERDY bit clearing shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_HSEON_Pos) == false);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToClear, RCC->CR, RCC_CR_HSERDY_Pos);
}
}
}
}
GIVEN ("LSI is enabled and clocks are functioning correctly")
{
Utils::SetBit(RCC->CSR, RCC_CSR_LSION_Pos, true);
Utils::SetBit(RCC->CSR, RCC_CSR_LSIRDY_Pos, true);
// This tells the HAL that the oscillator has stopped correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the LSI clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::lowSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("LSION bit in RCC CSR register shall be cleared and LSIRDY bit clearing shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CSR, RCC_CSR_LSION_Pos) == false);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToClear, RCC->CSR, RCC_CSR_LSIRDY_Pos);
}
}
}
}
GIVEN ("LSE is enabled and clocks are functioning correctly")
{
Utils::SetBit(RCC->BDCR, RCC_BDCR_LSEON_Pos, true);
Utils::SetBit(RCC->BDCR, RCC_BDCR_LSERDY_Pos, true);
// This tells the HAL that the oscillator has stopped correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the LSE clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::lowSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("LSEON bit in RCC BDCR register shall be cleared and LSERDY bit clearing shall be waited")
{
REQUIRE (Utils::GetBit(RCC->BDCR, RCC_BDCR_LSEON_Pos) == false);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToClear, RCC->BDCR, RCC_BDCR_LSERDY_Pos);
}
}
}
}
GIVEN ("PLL is enabled and clocks are functioning correctly")
{
Utils::SetBit(RCC->CR, RCC_CR_PLLON_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
// This tells the HAL that the oscillator has stopped correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the PLL clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::pll);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("PLLON bit in RCC CR register shall be cleared and PLLRDY bit clearing shall be waited")
{
REQUIRE (Utils::GetBit(RCC->CR, RCC_CR_PLLON_Pos) == false);
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitToClear, RCC->CR, RCC_CR_PLLRDY_Pos);
}
}
}
}
GIVEN ("the oscillators are already disabled")
{
// Disable HSI
Utils::SetBit(RCC->CR, RCC_CR_HSIRDY_Pos, false);
// Disable HSI as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_Msk);
// This tells the HAL that the oscillator has started correctly.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, false);
WHEN ("the the clocks are enabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_internal);
Hal::Error error2 = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_external);
Hal::Error error3 = Hal::Clocks::Disable(Hal::OscillatorType::lowSpeed_internal);
Hal::Error error4 = Hal::Clocks::Disable(Hal::OscillatorType::lowSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
REQUIRE (error2 == Hal::Error::noErrors);
REQUIRE (error3 == Hal::Error::noErrors);
REQUIRE (error4 == Hal::Error::noErrors);
AND_THEN ("no configurations shall occur")
{
REQUIRE_CALLS (0, HalMock::mockHalInternal, WaitForBitToClear);
}
}
}
}
GIVEN ("HSE is enabled and clock fails to stop")
{
Utils::SetBit(RCC->CR, RCC_CR_HSEON_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// This tells the HAL that the oscillator failed to stop.
SET_RETURN(HalMock::mockHalInternal, WaitForBitToClear, true);
WHEN ("the HSE clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_external);
THEN ("timeout error shall occur")
{
REQUIRE (error == Hal::Error::timeout);
}
}
}
GIVEN ("HSE is enabled and used as system clock")
{
// HSE is enabled
Utils::SetBit(RCC->CR, RCC_CR_HSEON_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// Set HSE as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_0);
WHEN ("the HSE clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::highSpeed_external);
THEN ("reserved resource error shall occur")
{
REQUIRE (error == Hal::Error::reservedResource);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
GIVEN ("PLL is enabled")
{
// PLL is enabled
Utils::SetBit(RCC->CR, RCC_CR_PLLON_Pos, true);
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
WHEN ("the secondary PLL clock is disabled")
{
Hal::Error error = Hal::Clocks::Disable(Hal::OscillatorType::pllSecondary);
THEN ("invalid parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
}
SCENARIO ("PLL is configured", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
HalMock::InitInternal();
ASchMock::Assert::Init();
GIVEN ("system is at init state")
{
// Set PLL source to HSE for checking that it gets cleared in case of HSI PLL source.
Utils::SetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos, true);
WHEN ("PLL is enabled with source from HSI and target of 168MHz and the frequency is read back")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::highSpeed_internal, Hal::MHz(168UL));
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true); // Set PLLRDY bit in order to be able to read the frequency.
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pll);
uint32_t secondaryFrequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pllSecondary);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("PLLP, PLLN, PLLM, and PLLQ shall have correct values")
{
// 100MHz from 24MHz shall result in PLLP = 2, PLLN = 100, PLLM = 11.
PRINT_PLL();
INFO ("Frequency: " << frequency);
INFO ("Frequency: " << secondaryFrequency);
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLP_Msk, 0UL));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLN_Msk, (63UL << RCC_PLLCFGR_PLLN_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLM_Msk, (3UL << RCC_PLLCFGR_PLLM_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ_Msk, (7UL << RCC_PLLCFGR_PLLQ_Pos)));
AND_THEN ("the PLL frequency shall be set to 168MHz and secondary frequency to 48MHz")
{
REQUIRE (frequency == Hal::MHz(168UL));
REQUIRE (secondaryFrequency == Hal::MHz(48UL));
AND_THEN ("the PLL source is set to HSE")
{
REQUIRE (Utils::GetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos) == false);
}
}
}
}
}
}
GIVEN ("HSE is enabled and set as 24MHz")
{
// Enable HSE
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// Set frequency
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, Hal::MHz(24UL));
// Set PLL source to HSI for checking that it gets cleared in case of HSE PLL source.
Utils::SetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos, false);
WHEN ("PLL is enabled with source from HSE and target of 109MHz and the frequency is read back")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::highSpeed_external, Hal::MHz(109UL));
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true); // Set PLLRDY bit in order to be able to read the frequency.
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pll);
uint32_t secondaryFrequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pllSecondary);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("PLLP, PLLN, PLLM, and PLLQ shall have correct values")
{
// 100MHz from 24MHz shall result in PLLP = 2, PLLN = 100, PLLM = 11.
PRINT_PLL();
INFO ("Frequency: " << frequency);
INFO ("Frequency: " << secondaryFrequency);
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLP_Msk, 0UL));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLN_Msk, (100UL << RCC_PLLCFGR_PLLN_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLM_Msk, (11UL << RCC_PLLCFGR_PLLM_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ_Msk, (5UL << RCC_PLLCFGR_PLLQ_Pos)));
AND_THEN ("the PLL frequency shall be set to 109 090 909 Hz and secondary frequency to 43 636 363 Hz")
{
REQUIRE (frequency == 109090909UL);
REQUIRE (secondaryFrequency == 43636363UL);
AND_THEN ("the PLL source is set to HSE")
{
REQUIRE (Utils::GetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos) == true);
}
}
}
}
}
}
GIVEN ("system is at init state")
{
WHEN ("PLL is enabled with source from LSI and target of 120MHz")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::lowSpeed_internal, Hal::MHz(120UL));
THEN ("invalid parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
GIVEN ("system is at init state")
{
WHEN ("PLL is enabled with source from HSI and target of 169MHz")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::highSpeed_internal, Hal::MHz(169UL));
THEN ("invalid parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
GIVEN ("HSE is not running")
{
WHEN ("PLL is enabled with source from HSE and target of 120MHz")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::highSpeed_external, Hal::MHz(120UL));
THEN ("unavailable resource error shall occur")
{
REQUIRE (error == Hal::Error::unavailableResource);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
GIVEN ("HSE is running, but frequency is not configured")
{
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, 0UL);
WHEN ("PLL is enabled with source from non-running HSE and target of 120MHz")
{
Hal::Error error = Hal::Clocks::ConfigurePll(Hal::OscillatorType::highSpeed_external, Hal::MHz(120UL));
THEN ("invalid parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
}
}
SCENARIO ("PLL registers are manually configured", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
GIVEN ("system is at init state")
{
// Set PLL source to HSE for checking that it gets cleared in case of HSI PLL source.
Utils::SetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos, true);
WHEN ("PLL is configured with valid register values from HSI")
{
Hal::pllRegisters_t registers =
{
.pllp = 8UL,
.plln = 432UL,
.pllm = 63UL,
.pllq = 15UL
};
Hal::Error error = Hal::Clocks::ConfigurePllManually(Hal::OscillatorType::highSpeed_internal, registers);
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true); // Set PLLRDY bit in order to be able to read the frequency.
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pll);
uint32_t secondaryFrequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pllSecondary);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("PLLP, PLLN, PLLM, and PLLQ shall have correct values")
{
// 100MHz from 24MHz shall result in PLLP = 2, PLLN = 100, PLLM = 11.
PRINT_PLL();
INFO ("Frequency: " << frequency);
INFO ("Frequency: " << secondaryFrequency);
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLP_Msk, (3UL << RCC_PLLCFGR_PLLP_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLN_Msk, (432UL << RCC_PLLCFGR_PLLN_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLM_Msk, (63UL << RCC_PLLCFGR_PLLM_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ_Msk, (15UL << RCC_PLLCFGR_PLLQ_Pos)));
AND_THEN ("the PLL frequency shall be set to 109 090 909 Hz and secondary frequency to 43 636 363 Hz")
{
REQUIRE (frequency == 13714286UL);
REQUIRE (secondaryFrequency == 7314286UL);
AND_THEN ("the PLL source is set to HSI")
{
REQUIRE (Utils::GetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos) == false);
}
}
}
}
}
}
GIVEN ("HSE is enabled and set as 24MHz")
{
// Enable HSE
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// Set frequency
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, Hal::MHz(24UL));
// Set PLL source to HSI for checking that it gets cleared in case of HSE PLL source.
Utils::SetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos, false);
WHEN ("PLL is configured with valid register values from HSE")
{
Hal::pllRegisters_t registers =
{
.pllp = 2UL,
.plln = 50UL,
.pllm = 4UL,
.pllq = 7UL
};
Hal::Error error = Hal::Clocks::ConfigurePllManually(Hal::OscillatorType::highSpeed_external, registers);
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true); // Set PLLRDY bit in order to be able to read the frequency.
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pll);
uint32_t secondaryFrequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::pllSecondary);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("PLLP, PLLN, PLLM, and PLLQ shall have correct values")
{
// 100MHz from 24MHz shall result in PLLP = 2, PLLN = 100, PLLM = 11.
PRINT_PLL();
INFO ("Frequency: " << frequency);
INFO ("Frequency: " << secondaryFrequency);
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLP_Msk, 0UL));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLN_Msk, (50UL << RCC_PLLCFGR_PLLN_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLM_Msk, (4UL << RCC_PLLCFGR_PLLM_Pos)));
REQUIRE (Utils::CompareBits(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ_Msk, (7UL << RCC_PLLCFGR_PLLQ_Pos)));
AND_THEN ("the PLL frequency shall be set to 109 090 909 Hz and secondary frequency to 43 636 363 Hz")
{
REQUIRE (frequency == Hal::MHz(150UL));
REQUIRE (APPROX_EQUAL(secondaryFrequency, 42857143UL, 3UL)); // Add some tolerance due to float inaccuracies.
AND_THEN ("the PLL source is set to HSE")
{
REQUIRE (Utils::GetBit(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC_Pos) == true);
}
}
}
}
}
}
}
SCENARIO ("Clock status is read", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
GIVEN ("HSI is enabled")
{
// HSI is on by default
WHEN ("the HSI status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::highSpeed_internal);
THEN ("the check shall return true")
{
REQUIRE (isRunning == true);
}
}
}
GIVEN ("HSI is disabled")
{
// Disable HSI
Utils::SetBit(RCC->CR, RCC_CR_HSIRDY_Pos, false);
WHEN ("the HSI status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::highSpeed_internal);
THEN ("the check shall return false")
{
REQUIRE (isRunning == false);
}
}
}
GIVEN ("HSE is enabled")
{
// Enable HSE
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
WHEN ("the HSE status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::highSpeed_external);
THEN ("the check shall return true")
{
REQUIRE (isRunning == true);
}
}
}
GIVEN ("HSE is disabled")
{
// HSE is disabled by default
WHEN ("the HSE status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::highSpeed_external);
THEN ("the check shall return false")
{
REQUIRE (isRunning == false);
}
}
}
GIVEN ("LSI is enabled")
{
// Enable LSI
Utils::SetBit(RCC->CSR, RCC_CSR_LSIRDY_Pos, true);
WHEN ("the LSI status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::lowSpeed_internal);
THEN ("the check shall return true")
{
REQUIRE (isRunning == true);
}
}
}
GIVEN ("LSI is disabled")
{
// LSI is disabled by default
WHEN ("the LSI status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::lowSpeed_internal);
THEN ("the check shall return false")
{
REQUIRE (isRunning == false);
}
}
}
GIVEN ("LSE is enabled")
{
// Enable LSE
Utils::SetBit(RCC->BDCR, RCC_BDCR_LSERDY_Pos, true);
WHEN ("the LSE status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::lowSpeed_external);
THEN ("the check shall return true")
{
REQUIRE (isRunning == true);
}
}
}
GIVEN ("LSE is disabled")
{
// LSE is disabled by default
WHEN ("the LSE status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::lowSpeed_external);
THEN ("the check shall return false")
{
REQUIRE (isRunning == false);
}
}
}
GIVEN ("PLL is enabled")
{
// Enable PLL
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
WHEN ("the PLL status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::pll);
THEN ("the check shall return true")
{
REQUIRE (isRunning == true);
}
}
}
GIVEN ("PLL is disabled")
{
// PLL is disabled by default
WHEN ("the PLL status is checked")
{
bool isRunning = Hal::Clocks::IsRunning(Hal::OscillatorType::pll);
THEN ("the check shall return false")
{
REQUIRE (isRunning == false);
}
}
}
}
SCENARIO ("Clock frequencies are written and read", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
ASchMock::Assert::Init();
GIVEN ("HSI is enabled")
{
// HSI is on by default
WHEN ("HSI frequency is read")
{
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::highSpeed_internal);
THEN ("frequency shall be 16MHz")
{
REQUIRE (frequency == Hal::MHz(16UL));
}
}
}
GIVEN ("HSI is enabled")
{
// HSI is on by default
WHEN ("the HSI frequency is set to 10MHz")
{
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_internal, Hal::MHz(10UL));
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::highSpeed_internal);
THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
AND_THEN ("the frequency shall still be 16MHz")
{
REQUIRE (frequency == Hal::MHz(16UL));
}
}
}
}
GIVEN ("HSE is enabled and its frequency is set to 24MHz")
{
// Enable HSE
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, Hal::MHz(24UL));
WHEN ("the HSE frequency is read")
{
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::highSpeed_external);
THEN ("frequency shall be 24MHz")
{
REQUIRE (frequency == Hal::MHz(24UL));
}
}
}
GIVEN ("HSE is disabled and its frequency is set to 24MHz")
{
// HSE is disabled by default.
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, Hal::MHz(24UL));
WHEN ("the HSE frequency is read")
{
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::highSpeed_external);
THEN ("frequency shall be 0MHz")
{
REQUIRE (frequency == 0UL);
}
}
}
GIVEN ("LSI is enabled")
{
Utils::SetBit(RCC->CSR, RCC_CSR_LSIRDY_Pos, true);
WHEN ("the LSI frequency is set to 30kHz")
{
Hal::Clocks::SetFrequency(Hal::OscillatorType::lowSpeed_internal, Hal::kHz(30UL));
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::lowSpeed_internal);
THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
AND_THEN ("the frequency shall still be 32kHz")
{
REQUIRE (frequency == Hal::kHz(32UL));
}
}
}
}
GIVEN ("LSE is enabled")
{
Utils::SetBit(RCC->BDCR, RCC_BDCR_LSERDY_Pos, true);
WHEN ("the LSE frequency is set to 30kHz")
{
Hal::Clocks::SetFrequency(Hal::OscillatorType::lowSpeed_external, Hal::kHz(30UL));
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::lowSpeed_external);
THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
AND_THEN ("the frequency shall still be 32768Hz")
{
REQUIRE (frequency == 32768UL);
}
}
}
}
GIVEN ("PLL is enabled")
{
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
WHEN ("the PLL frequency is set to 100MHz")
{
Hal::Clocks::SetFrequency(Hal::OscillatorType::pll, Hal::MHz(100UL));
THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
GIVEN ("PLL is enabled")
{
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
WHEN ("the PLL secondary frequency is set to 48MHz")
{
Hal::Clocks::SetFrequency(Hal::OscillatorType::pllSecondary, Hal::MHz(48UL));
THEN ("an assert failure shall trigger")
{
REQUIRE (ASchMock::Assert::GetFails() == 1UL);
}
}
}
GIVEN ("system is at init state")
{
WHEN ("the unknown frequency is read")
{
uint32_t frequency = Hal::Clocks::GetFrequency(Hal::OscillatorType::unknown);
THEN ("frequency shall be 0MHz")
{
REQUIRE (frequency == 0UL);
}
}
}
}
SCENARIO ("System clock frequency is read", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
GIVEN ("HSI is system clock")
{
// HSI is system clock by default
WHEN ("system clock frequency is read")
{
uint32_t frequency = Hal::Clocks::GetSysClockFrequency();
THEN ("the frequency shall be 16MHz")
{
REQUIRE (frequency == Hal::MHz(16UL));
}
}
}
GIVEN ("24MHz HSE is system clock")
{
// HSE as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_0);
Hal::Clocks::SetFrequency(Hal::OscillatorType::highSpeed_external, Hal::MHz(24UL));
WHEN ("system clock frequency is read")
{
uint32_t frequency = Hal::Clocks::GetSysClockFrequency();
THEN ("the frequency shall be 24MHz")
{
REQUIRE (frequency == Hal::MHz(24UL));
}
}
}
}
SCENARIO ("System clock is selected", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
HalMock::InitInternal();
GIVEN ("HSE is system clock and HSI is enabled")
{
// Set HSE as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_0);
// HSI is enabled by default. No need to enable it again.
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock was changed successfully.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, false);
WHEN ("the HSI is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::highSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("SWS bits in RCC CFGR register shall be set to 00 and corresponding SW bits shall be waited")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, 0UL));
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitPatternToSet, RCC->CFGR, (uint32_t)RCC_CFGR_SWS_Msk, (uint32_t)0UL);
}
}
}
}
GIVEN ("HSE is enabled")
{
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock was changed successfully.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, false);
WHEN ("the HSI is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::highSpeed_external);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("SWS bits in RCC CFGR register shall be set to 01 and corresponding SW bits shall be waited")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_0));
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitPatternToSet, RCC->CFGR, (uint32_t)RCC_CFGR_SWS_Msk, (uint32_t)RCC_CFGR_SWS_0);
}
}
}
}
GIVEN ("PLL is enabled")
{
Utils::SetBit(RCC->CR, RCC_CR_PLLRDY_Pos, true);
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock was changed successfully.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, false);
WHEN ("the PLL is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::pll);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("SWS bits in RCC CFGR register shall be set to 10 and corresponding SW bits shall be waited")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_1));
REQUIRE_PARAM_CALLS (1, HalMock::mockHalInternal, WaitForBitPatternToSet, RCC->CFGR, (uint32_t)RCC_CFGR_SWS_Msk, (uint32_t)RCC_CFGR_SWS_1);
}
}
}
}
GIVEN ("system is at init state")
{
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock was changed successfully.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, false);
WHEN ("the HSI is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::highSpeed_internal);
THEN ("no errors shall occur")
{
REQUIRE (error == Hal::Error::noErrors);
AND_THEN ("SWS bits in RCC CFGR register shall not be changed")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk));
REQUIRE_CALLS (0, HalMock::mockHalInternal, WaitForBitPatternToSet);
}
}
}
}
GIVEN ("system is at init state")
{
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
WHEN ("the LSE is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::lowSpeed_external);
THEN ("invalid parameter error shall occur")
{
REQUIRE (error == Hal::Error::invalidParameter);
AND_THEN ("SWS bits in RCC CFGR register shall not be changed")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk));
REQUIRE_CALLS (0, HalMock::mockHalInternal, WaitForBitPatternToSet);
}
}
}
}
GIVEN ("PLL is disabled")
{
// PLL is disabled by default.
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock was changed successfully.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, false);
WHEN ("the PLL is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::pll);
THEN ("unavailable resource error shall occur")
{
REQUIRE (error == Hal::Error::unavailableResource);
AND_THEN ("SWS bits in RCC CFGR register shall note be changed")
{
REQUIRE (Utils::CompareBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk));
REQUIRE_CALLS (0, HalMock::mockHalInternal, WaitForBitPatternToSet);
}
}
}
}
GIVEN ("HSE is enabled")
{
Utils::SetBit(RCC->CR, RCC_CR_HSERDY_Pos, true);
// Set invalid value to SW bits for checking that the bits are set afterwards.
Utils::SetBits(RCC->CFGR, RCC_CFGR_SW_Msk, RCC_CFGR_SW_Msk);
// This tells the HAL that the system clock could not be changed.
SET_RETURN(HalMock::mockHalInternal, WaitForBitPatternToSet, true);
WHEN ("the HSI is set as system clock source")
{
Hal::Error error = Hal::Clocks::SetSysClockSource(Hal::OscillatorType::highSpeed_external);
THEN ("timeout error shall occur")
{
REQUIRE (error == Hal::Error::timeout);
}
}
}
}
SCENARIO ("System clock source is read", "[hal_system]")
{
Hal_Mock::InitRccRegisters();
GIVEN ("HSI is system clock")
{
// HSI is system clock by default
WHEN ("system clock source is read")
{
Hal::OscillatorType type = Hal::Clocks::GetSysClockSource();
THEN ("the clock type shall be HSI")
{
REQUIRE (type == Hal::OscillatorType::highSpeed_internal);
}
}
}
GIVEN ("HSE is system clock")
{
// Set HSE as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_0);
WHEN ("system clock source is read")
{
Hal::OscillatorType type = Hal::Clocks::GetSysClockSource();
THEN ("the clock type shall be HSE")
{
REQUIRE (type == Hal::OscillatorType::highSpeed_external);
}
}
}
GIVEN ("PLL is system clock")
{
// Set PLL as system clock
Utils::SetBits(RCC->CFGR, RCC_CFGR_SWS_Msk, RCC_CFGR_SWS_1);
WHEN ("system clock source is read")
{
Hal::OscillatorType type = Hal::Clocks::GetSysClockSource();
THEN ("the clock type shall be PLL")
{
REQUIRE (type == Hal::OscillatorType::pll);
}
}
}
}
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2020 The PIVX developers
// Copyright (c) 2017-2020 The UNNY developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "base58.h"
#include "clientversion.h"
#include "init.h"
#include "main.h"
#include "masternode-sync.h"
#include "net.h"
#include "netbase.h"
#include "rpc/server.h"
#include "spork.h"
#include "timedata.h"
#include "util.h"
#ifdef ENABLE_WALLET
#include "wallet.h"
#include "walletdb.h"
#endif
#include <stdint.h>
#include <boost/assign/list_of.hpp>
#include <univalue.h>
/**
* @note Do not add or change anything in the information returned by this
* method. `getinfo` exists for backwards-compatibility only. It combines
* information from wildly different sources in the program, which is a mess,
* and is thus planned to be deprecated eventually.
*
* Based on the source of the information, new information should be added to:
* - `getblockchaininfo`,
* - `getnetworkinfo` or
* - `getwalletinfo`
*
* Or alternatively, create a specific query method for the information.
**/
UniValue getinfo(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 0)
throw std::runtime_error(
"getinfo\n"
"\nReturns an object containing various state info.\n"
"\nResult:\n"
"{\n"
" \"version\": xxxxx, (numeric) the server version\n"
" \"protocolversion\": xxxxx, (numeric) the protocol version\n"
" \"walletversion\": xxxxx, (numeric) the wallet version\n"
" \"balance\": xxxxxxx, (numeric) the total unnycore balance of the wallet\n"
" \"blocks\": xxxxxx, (numeric) the current number of blocks processed in the server\n"
" \"timeoffset\": xxxxx, (numeric) the time offset\n"
" \"connections\": xxxxx, (numeric) the number of connections\n"
" \"proxy\": \"host:port\", (string, optional) the proxy used by the server\n"
" \"difficulty\": xxxxxx, (numeric) the current difficulty\n"
" \"testnet\": true|false, (boolean) if the server is using testnet or not\n"
" \"moneysupply\" : \"supply\" (numeric) The money supply when this block was added to the blockchain\n"
" \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since GMT epoch) of the oldest pre-generated key in the key pool\n"
" \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated\n"
" \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
" \"paytxfee\": x.xxxx, (numeric) the transaction fee set in unny/kb\n"
" \"relayfee\": x.xxxx, (numeric) minimum relay fee for non-free transactions in unny/kb\n"
" \"staking status\": true|false, (boolean) if the wallet is staking or not\n"
" \"errors\": \"...\" (string) any error messages\n"
"}\n"
"\nExamples:\n" +
HelpExampleCli("getinfo", "") + HelpExampleRpc("getinfo", ""));
#ifdef ENABLE_WALLET
LOCK2(cs_main, pwalletMain ? &pwalletMain->cs_wallet : NULL);
#else
LOCK(cs_main);
#endif
proxyType proxy;
GetProxy(NET_IPV4, proxy);
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("version", CLIENT_VERSION));
obj.push_back(Pair("protocolversion", PROTOCOL_VERSION));
#ifdef ENABLE_WALLET
if (pwalletMain) {
obj.push_back(Pair("walletversion", pwalletMain->GetVersion()));
obj.push_back(Pair("balance", ValueFromAmount(pwalletMain->GetBalance())));
}
#endif
obj.push_back(Pair("blocks", (int)chainActive.Height()));
obj.push_back(Pair("timeoffset", GetTimeOffset()));
obj.push_back(Pair("connections", (int)vNodes.size()));
obj.push_back(Pair("proxy", (proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string())));
obj.push_back(Pair("difficulty", (double)GetDifficulty()));
obj.push_back(Pair("testnet", Params().TestnetToBeDeprecatedFieldRPC()));
// During inital block verification chainActive.Tip() might be not yet initialized
if (chainActive.Tip() == NULL) {
obj.push_back(Pair("status", "Blockchain information not yet available"));
return obj;
}
obj.push_back(Pair("moneysupply",ValueFromAmount(chainActive.Tip()->nMoneySupply)));
#ifdef ENABLE_WALLET
if (pwalletMain) {
obj.push_back(Pair("keypoololdest", pwalletMain->GetOldestKeyPoolTime()));
obj.push_back(Pair("keypoolsize", (int)pwalletMain->GetKeyPoolSize()));
}
if (pwalletMain && pwalletMain->IsCrypted())
obj.push_back(Pair("unlocked_until", nWalletUnlockTime));
obj.push_back(Pair("paytxfee", ValueFromAmount(payTxFee.GetFeePerK())));
#endif
obj.push_back(Pair("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())));
bool nStaking = false;
if (mapHashedBlocks.count(chainActive.Tip()->nHeight))
nStaking = true;
else if (mapHashedBlocks.count(chainActive.Tip()->nHeight - 1) && nLastCoinStakeSearchInterval)
nStaking = true;
obj.push_back(Pair("staking status", (nStaking ? "Staking Active" : "Staking Not Active")));
obj.push_back(Pair("errors", GetWarnings("statusbar")));
return obj;
}
UniValue mnsync(const UniValue& params, bool fHelp)
{
std::string strMode;
if (params.size() == 1)
strMode = params[0].get_str();
if (fHelp || params.size() != 1 || (strMode != "status" && strMode != "reset" && strMode != "next")) {
throw std::runtime_error(
"mnsync \"status|reset|next\"\n"
"\nReturns the sync status or resets sync or move to the next asset.\n"
"\nArguments:\n"
"1. \"mode\" (string, required) either 'status' or 'reset' or 'next'\n"
"\nResult ('status' mode):\n"
"{\n"
" \"IsBlockchainSynced\": true|false, (boolean) 'true' if blockchain is synced\n"
" \"lastMasternodeList\": xxxx, (numeric) Timestamp of last MN list message\n"
" \"lastMasternodeWinner\": xxxx, (numeric) Timestamp of last MN winner message\n"
" \"lastBudgetItem\": xxxx, (numeric) Timestamp of last MN budget message\n"
" \"lastFailure\": xxxx, (numeric) Timestamp of last failed sync\n"
" \"nCountFailures\": n, (numeric) Number of failed syncs (total)\n"
" \"sumMasternodeList\": n, (numeric) Number of MN list messages (total)\n"
" \"sumMasternodeWinner\": n, (numeric) Number of MN winner messages (total)\n"
" \"sumBudgetItemProp\": n, (numeric) Number of MN budget messages (total)\n"
" \"sumBudgetItemFin\": n, (numeric) Number of MN budget finalization messages (total)\n"
" \"countMasternodeList\": n, (numeric) Number of MN list messages (local)\n"
" \"countMasternodeWinner\": n, (numeric) Number of MN winner messages (local)\n"
" \"countBudgetItemProp\": n, (numeric) Number of MN budget messages (local)\n"
" \"countBudgetItemFin\": n, (numeric) Number of MN budget finalization messages (local)\n"
" \"RequestedMasternodeAssets\": n, (numeric) Status code of last sync phase\n"
" \"RequestedMasternodeAttempt\": n, (numeric) Status code of last sync attempt\n"
"}\n"
"\nResult ('reset' mode):\n"
"\"status\" (string) 'success'\n"
"\nExamples:\n" +
HelpExampleCli("mnsync", "\"status\"") + HelpExampleRpc("mnsync", "\"status\""));
}
if (strMode == "status") {
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("IsBlockchainSynced", masternodeSync.IsBlockchainSynced()));
obj.push_back(Pair("lastMasternodeList", masternodeSync.lastMasternodeList));
obj.push_back(Pair("lastMasternodeWinner", masternodeSync.lastMasternodeWinner));
obj.push_back(Pair("lastBudgetItem", masternodeSync.lastBudgetItem));
obj.push_back(Pair("lastFailure", masternodeSync.lastFailure));
obj.push_back(Pair("nCountFailures", masternodeSync.nCountFailures));
obj.push_back(Pair("sumMasternodeList", masternodeSync.sumMasternodeList));
obj.push_back(Pair("sumMasternodeWinner", masternodeSync.sumMasternodeWinner));
obj.push_back(Pair("sumBudgetItemProp", masternodeSync.sumBudgetItemProp));
obj.push_back(Pair("sumBudgetItemFin", masternodeSync.sumBudgetItemFin));
obj.push_back(Pair("countMasternodeList", masternodeSync.countMasternodeList));
obj.push_back(Pair("countMasternodeWinner", masternodeSync.countMasternodeWinner));
obj.push_back(Pair("countBudgetItemProp", masternodeSync.countBudgetItemProp));
obj.push_back(Pair("countBudgetItemFin", masternodeSync.countBudgetItemFin));
obj.push_back(Pair("RequestedMasternodeAssets", masternodeSync.RequestedMasternodeAssets));
obj.push_back(Pair("RequestedMasternodeAttempt", masternodeSync.RequestedMasternodeAttempt));
obj.push_back(Pair("SyncStatus", masternodeSync.GetSyncStatus()));
return obj;
}
if (strMode == "reset") {
masternodeSync.Reset();
return "success";
}
if (strMode == "next") {
masternodeSync.GetNextAsset();
return masternodeSync.GetSyncStatus();
}
return "failure";
}
#ifdef ENABLE_WALLET
class DescribeAddressVisitor : public boost::static_visitor<UniValue>
{
private:
isminetype mine;
public:
DescribeAddressVisitor(isminetype mineIn) : mine(mineIn) {}
UniValue operator()(const CNoDestination &dest) const { return UniValue(UniValue::VOBJ); }
UniValue operator()(const CKeyID &keyID) const {
UniValue obj(UniValue::VOBJ);
CPubKey vchPubKey;
obj.push_back(Pair("isscript", false));
if (mine == ISMINE_SPENDABLE) {
pwalletMain->GetPubKey(keyID, vchPubKey);
obj.push_back(Pair("pubkey", HexStr(vchPubKey)));
obj.push_back(Pair("iscompressed", vchPubKey.IsCompressed()));
}
return obj;
}
UniValue operator()(const CScriptID &scriptID) const {
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("isscript", true));
if (mine != ISMINE_NO) {
CScript subscript;
pwalletMain->GetCScript(scriptID, subscript);
std::vector<CTxDestination> addresses;
txnouttype whichType;
int nRequired;
ExtractDestinations(subscript, whichType, addresses, nRequired);
obj.push_back(Pair("script", GetTxnOutputType(whichType)));
obj.push_back(Pair("hex", HexStr(subscript.begin(), subscript.end())));
UniValue a(UniValue::VARR);
for (const CTxDestination& addr : addresses)
a.push_back(CBitcoinAddress(addr).ToString());
obj.push_back(Pair("addresses", a));
if (whichType == TX_MULTISIG)
obj.push_back(Pair("sigsrequired", nRequired));
}
return obj;
}
};
#endif
/*
Used for updating/reading spork settings on the network
*/
UniValue spork(const UniValue& params, bool fHelp)
{
if (params.size() == 1 && params[0].get_str() == "show") {
UniValue ret(UniValue::VOBJ);
for (int nSporkID = SPORK_START; nSporkID <= SPORK_END; nSporkID++) {
if (sporkManager.GetSporkNameByID(nSporkID) != "Unknown")
ret.push_back(Pair(sporkManager.GetSporkNameByID(nSporkID), GetSporkValue(nSporkID)));
}
return ret;
} else if (params.size() == 1 && params[0].get_str() == "active") {
UniValue ret(UniValue::VOBJ);
for (int nSporkID = SPORK_START; nSporkID <= SPORK_END; nSporkID++) {
if (sporkManager.GetSporkNameByID(nSporkID) != "Unknown")
ret.push_back(Pair(sporkManager.GetSporkNameByID(nSporkID), IsSporkActive(nSporkID)));
}
return ret;
} else if (params.size() == 2) {
int nSporkID = sporkManager.GetSporkIDByName(params[0].get_str());
if (nSporkID == -1) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid spork name");
}
// SPORK VALUE
int64_t nValue = params[1].get_int64();
//broadcast new spork
if (sporkManager.UpdateSpork(nSporkID, nValue)) {
return "success";
} else {
return "failure";
}
}
throw std::runtime_error(
"spork <name> [<value>]\n"
"<name> is the corresponding spork name, or 'show' to show all current spork settings, active to show which sporks are active"
"<value> is a epoch datetime to enable or disable spork" +
HelpRequiringPassphrase());
}
UniValue validateaddress(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw std::runtime_error(
"validateaddress \"unnyaddress\"\n"
"\nReturn information about the given unny address.\n"
"\nArguments:\n"
"1. \"unnyaddress\" (string, required) The unny address to validate\n"
"\nResult:\n"
"{\n"
" \"isvalid\" : true|false, (boolean) If the address is valid or not. If not, this is the only property returned.\n"
" \"address\" : \"unnyaddress\", (string) The unny address validated\n"
" \"scriptPubKey\" : \"hex\", (string) The hex encoded scriptPubKey generated by the address\n"
" \"ismine\" : true|false, (boolean) If the address is yours or not\n"
" \"iswatchonly\" : true|false, (boolean) If the address is watchonly\n"
" \"isscript\" : true|false, (boolean) If the key is a script\n"
" \"hex\" : \"hex\", (string, optional) The redeemscript for the P2SH address\n"
" \"pubkey\" : \"publickeyhex\", (string) The hex value of the raw public key\n"
" \"iscompressed\" : true|false, (boolean) If the address is compressed\n"
" \"account\" : \"account\" (string) The account associated with the address, \"\" is the default account\n"
"}\n"
"\nExamples:\n" +
HelpExampleCli("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"") + HelpExampleRpc("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\""));
#ifdef ENABLE_WALLET
LOCK2(cs_main, pwalletMain ? &pwalletMain->cs_wallet : NULL);
#else
LOCK(cs_main);
#endif
CBitcoinAddress address(params[0].get_str());
bool isValid = address.IsValid();
UniValue ret(UniValue::VOBJ);
ret.push_back(Pair("isvalid", isValid));
if (isValid) {
CTxDestination dest = address.Get();
std::string currentAddress = address.ToString();
ret.push_back(Pair("address", currentAddress));
CScript scriptPubKey = GetScriptForDestination(dest);
ret.push_back(Pair("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end())));
#ifdef ENABLE_WALLET
isminetype mine = pwalletMain ? IsMine(*pwalletMain, dest) : ISMINE_NO;
ret.push_back(Pair("ismine", bool(mine & ISMINE_SPENDABLE)));
ret.push_back(Pair("iswatchonly", bool(mine & ISMINE_WATCH_ONLY)));
UniValue detail = boost::apply_visitor(DescribeAddressVisitor(mine), dest);
ret.pushKVs(detail);
if (pwalletMain && pwalletMain->mapAddressBook.count(dest))
ret.push_back(Pair("account", pwalletMain->mapAddressBook[dest].name));
#endif
}
return ret;
}
/**
* Used by addmultisigaddress / createmultisig:
*/
CScript _createmultisig_redeemScript(const UniValue& params)
{
int nRequired = params[0].get_int();
const UniValue& keys = params[1].get_array();
// Gather public keys
if (nRequired < 1)
throw std::runtime_error("a multisignature address must require at least one key to redeem");
if ((int)keys.size() < nRequired)
throw std::runtime_error(
strprintf("not enough keys supplied "
"(got %u keys, but need at least %d to redeem)",
keys.size(), nRequired));
if (keys.size() > 16)
throw std::runtime_error("Number of addresses involved in the multisignature address creation > 16\nReduce the number");
std::vector<CPubKey> pubkeys;
pubkeys.resize(keys.size());
for (unsigned int i = 0; i < keys.size(); i++) {
const std::string& ks = keys[i].get_str();
#ifdef ENABLE_WALLET
// Case 1: UNNY address and we have full public key:
CBitcoinAddress address(ks);
if (pwalletMain && address.IsValid()) {
CKeyID keyID;
if (!address.GetKeyID(keyID))
throw std::runtime_error(
strprintf("%s does not refer to a key", ks));
CPubKey vchPubKey;
if (!pwalletMain->GetPubKey(keyID, vchPubKey))
throw std::runtime_error(
strprintf("no full public key for address %s", ks));
if (!vchPubKey.IsFullyValid())
throw std::runtime_error(" Invalid public key: " + ks);
pubkeys[i] = vchPubKey;
}
// Case 2: hex public key
else
#endif
if (IsHex(ks)) {
CPubKey vchPubKey(ParseHex(ks));
if (!vchPubKey.IsFullyValid())
throw std::runtime_error(" Invalid public key: " + ks);
pubkeys[i] = vchPubKey;
} else {
throw std::runtime_error(" Invalid public key: " + ks);
}
}
CScript result = GetScriptForMultisig(nRequired, pubkeys);
if (result.size() > MAX_SCRIPT_ELEMENT_SIZE)
throw std::runtime_error(
strprintf("redeemScript exceeds size limit: %d > %d", result.size(), MAX_SCRIPT_ELEMENT_SIZE));
return result;
}
UniValue createmultisig(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() < 2 || params.size() > 2) {
string msg = "createmultisig nrequired [\"key\",...]\n"
"\nCreates a multi-signature address with n signature of m keys required.\n"
"It returns a json object with the address and redeemScript.\n"
"\nArguments:\n"
"1. nrequired (numeric, required) The number of required signatures out of the n keys or addresses.\n"
"2. \"keys\" (string, required) A json array of keys which are unny addresses or hex-encoded public keys\n"
" [\n"
" \"key\" (string) unny address or hex-encoded public key\n"
" ,...\n"
" ]\n"
"\nResult:\n"
"{\n"
" \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
" \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
"}\n"
"\nExamples:\n"
"\nCreate a multisig address from 2 addresses\n" +
HelpExampleCli("createmultisig", "2 \"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\",\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"") +
"\nAs a json rpc call\n" + HelpExampleRpc("createmultisig", "2, \"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\",\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"");
throw runtime_error(msg);
}
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
CScriptID innerID(inner);
CBitcoinAddress address(innerID);
UniValue result(UniValue::VOBJ);
result.push_back(Pair("address", address.ToString()));
result.push_back(Pair("redeemScript", HexStr(inner.begin(), inner.end())));
return result;
}
UniValue verifymessage(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 3)
throw std::runtime_error(
"verifymessage \"unnyaddress\" \"signature\" \"message\"\n"
"\nVerify a signed message\n"
"\nArguments:\n"
"1. \"unnyaddress\" (string, required) The unny address to use for the signature.\n"
"2. \"signature\" (string, required) The signature provided by the signer in base 64 encoding (see signmessage).\n"
"3. \"message\" (string, required) The message that was signed.\n"
"\nResult:\n"
"true|false (boolean) If the signature is verified or not.\n"
"\nExamples:\n"
"\nUnlock the wallet for 30 seconds\n" +
HelpExampleCli("walletpassphrase", "\"mypassphrase\" 30") +
"\nCreate the signature\n" + HelpExampleCli("signmessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XZ\" \"my message\"") +
"\nVerify the signature\n" + HelpExampleCli("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XZ\" \"signature\" \"my message\"") +
"\nAs json rpc\n" + HelpExampleRpc("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XZ\", \"signature\", \"my message\""));
LOCK(cs_main);
std::string strAddress = params[0].get_str();
std::string strSign = params[1].get_str();
std::string strMessage = params[2].get_str();
CBitcoinAddress addr(strAddress);
if (!addr.IsValid())
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address");
CKeyID keyID;
if (!addr.GetKeyID(keyID))
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
bool fInvalid = false;
std::vector<unsigned char> vchSig = DecodeBase64(strSign.c_str(), &fInvalid);
if (fInvalid)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding");
CHashWriter ss(SER_GETHASH, 0);
ss << strMessageMagic;
ss << strMessage;
CPubKey pubkey;
if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
return false;
return (pubkey.GetID() == keyID);
}
UniValue setmocktime(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw std::runtime_error(
"setmocktime timestamp\n"
"\nSet the local time to given timestamp (-regtest only)\n"
"\nArguments:\n"
"1. timestamp (integer, required) Unix seconds-since-epoch timestamp\n"
" Pass 0 to go back to using the system time.");
if (!Params().MineBlocksOnDemand())
throw std::runtime_error("setmocktime for regression testing (-regtest mode) only");
LOCK(cs_main);
RPCTypeCheck(params, boost::assign::list_of(UniValue::VNUM));
SetMockTime(params[0].get_int64());
return NullUniValue;
}
#ifdef ENABLE_WALLET
UniValue getstakingstatus(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 0)
throw std::runtime_error(
"getstakingstatus\n"
"Returns an object containing various staking information.\n"
"\nResult:\n"
"{\n"
" \"validtime\": true|false, (boolean) if the chain tip is within staking phases\n"
" \"haveconnections\": true|false, (boolean) if network connections are present\n"
" \"walletunlocked\": true|false, (boolean) if the wallet is unlocked\n"
" \"mintablecoins\": true|false, (boolean) if the wallet has mintable coins\n"
" \"enoughcoins\": true|false, (boolean) if available coins are greater than reserve balance\n"
" \"mnsync\": true|false, (boolean) if masternode data is synced\n"
" \"staking status\": true|false, (boolean) if the wallet is staking or not\n"
"}\n"
"\nExamples:\n" +
HelpExampleCli("getstakingstatus", "") + HelpExampleRpc("getstakingstatus", ""));
if (!pwalletMain)
throw JSONRPCError(RPC_IN_WARMUP, "Try again after active chain is loaded");
UniValue obj(UniValue::VOBJ);
{
LOCK2(cs_main, &pwalletMain->cs_wallet);
obj.push_back(Pair("validtime", chainActive.Tip()->nTime > Params().GenesisBlock().nTime));
obj.push_back(Pair("haveconnections", !vNodes.empty()));
if (pwalletMain) {
obj.push_back(Pair("walletunlocked", !pwalletMain->IsLocked()));
obj.push_back(Pair("mintablecoins", pwalletMain->MintableCoins()));
obj.push_back(Pair("enoughcoins", nReserveBalance <= pwalletMain->GetBalance()));
}
obj.push_back(Pair("mnsync", masternodeSync.IsSynced()));
bool nStaking = false;
if (mapHashedBlocks.count(chainActive.Tip()->nHeight))
nStaking = true;
else if (mapHashedBlocks.count(chainActive.Tip()->nHeight - 1) && nLastCoinStakeSearchInterval)
nStaking = true;
obj.push_back(Pair("staking status", nStaking));
}
return obj;
}
#endif // ENABLE_WALLET
UniValue makekeypair(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() > 1) {
throw runtime_error(
"makekeypair [\"prefix\"]\n"
"\nCreates a new key pair.\n"
"It returns a json object with the public and private key.\n"
"\nArguments:\n"
"1. prefix (string, optional) The prefix for the address.\n"
"\nResult:\n"
"[\n"
" \"PublicKey\":\"public key\", (string) The public key.\n"
" \"PrivateKey\":\"private key\" (string) The private key.\n"
"]\n");
}
string strPrefix = "";
if (params.size() > 0)
strPrefix = params[0].get_str();
CKey key;
CPubKey pubkey;
string pubkeyhex;
int nCount = 0;
do
{
key.MakeNewKey(false);
nCount++;
pubkey = key.GetPubKey();
pubkeyhex = HexStr(pubkey.begin(), pubkey.end());
} while (nCount < 10000 && strPrefix != pubkeyhex.substr(0, strPrefix.size()));
if (strPrefix != pubkeyhex.substr(0, strPrefix.size()))
return NullUniValue;
UniValue result(UniValue::VOBJ);
result.push_back(Pair("PublicKey", pubkeyhex));
result.push_back(Pair("PrivateKey", CBitcoinSecret(key).ToString()));
return result;
}
|
#ifndef __GAME_ACT_CHEMICAL_FACTORY_HPP__
#define __GAME_ACT_CHEMICAL_FACTORY_HPP__
#include "ObjectsFactory.hpp"
namespace GameAct
{
class ChemicalFactory :
public ObjectsFactory
{
public:
Object * createObject( const std::string & name ) const override;
};
}
#endif
|
#ifndef __PIPE_OP__
#define __PIPE_OP__
#include <unistd.h>
#include <sys/wait.h>
#include "Executable.hpp"
#include "Connector.hpp"
#include "Cmnd.hpp"
class Pipe_op : public Connector {
public:
Pipe_op();
string getType() { return "PIPE"; }
bool run_command();
void set_left(Executable*);
void set_right(Executable*);
Executable* get_left();
Executable* get_right();
string show();
};
#endif
|
/**
* IR receiver (Version 0.0.4)
*
*/
#include "ReceiverIR.h"
#define LOCK()
#define UNLOCK()
#define InRange(x,y) ((((y) * 0.7) < (x)) && ((x) < ((y) * 1.3)))
/**
* Constructor.
*
* @param rxpin Pin for receive IR signal.
*/
ReceiverIR::ReceiverIR(PinName rxpin) : evt(rxpin) {
init_state();
evt.fall(this, &ReceiverIR::isr_fall);
evt.rise(this, &ReceiverIR::isr_rise);
evt.mode(PullUp);
ticker.attach_us(this, &ReceiverIR::isr_wdt, 10 * 1000);
}
/**
* Destructor.
*/
ReceiverIR::~ReceiverIR() {
}
/**
* Get state.
*
* @return Current state.
*/
ReceiverIR::State ReceiverIR::getState() {
LOCK();
State s = work.state;
UNLOCK();
return s;
}
/**
* Get data.
*
* @param format Pointer to format.
* @param buf Buffer of a data.
* @param bitlength Bit length of the buffer.
*
* @return Data bit length.
*/
int ReceiverIR::getData(RemoteIR::Format *format, uint8_t *buf, int bitlength) {
LOCK();
if (bitlength < data.bitcount) {
UNLOCK();
return -1;
}
const int nbits = data.bitcount;
const int nbytes = data.bitcount / 8 + (((data.bitcount % 8) != 0) ? 1 : 0);
*format = data.format;
for (int i = 0; i < nbytes; i++) {
buf[i] = data.buffer[i];
}
init_state();
UNLOCK();
return nbits;
}
void ReceiverIR::init_state(void) {
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
work.state = Idle;
data.format = RemoteIR::UNKNOWN;
data.bitcount = 0;
timer.stop();
timer.reset();
for (int i = 0; i < sizeof(data.buffer); i++) {
data.buffer[i] = 0;
}
}
void ReceiverIR::isr_wdt(void) {
LOCK();
static int cnt = 0;
if ((Idle != work.state) || ((0 <= work.c1) || (0 <= work.c2) || (0 <= work.c3) || (0 <= work.d1) || (0 <= work.d2))) {
cnt++;
if (cnt > 50) {
#if 0
printf("# WDT [c1=%d, c2=%d, c3=%d, d1=%d, d2=%d, state=%d, format=%d, bitcount=%d]\n",
work.c1,
work.c2,
work.c3,
work.d1,
work.d2,
work.state,
data.format,
data.bitcount);
#endif
init_state();
cnt = 0;
}
} else {
cnt = 0;
}
UNLOCK();
}
void ReceiverIR::isr_fall(void) {
LOCK();
switch (work.state) {
case Idle:
if (work.c1 < 0) {
timer.start();
work.c1 = timer.read_us();
} else {
work.c3 = timer.read_us();
int a = work.c2 - work.c1;
int b = work.c3 - work.c2;
if (InRange(a, RemoteIR::TUS_NEC * 16) && InRange(b, RemoteIR::TUS_NEC * 8)) {
/*
* NEC.
*/
data.format = RemoteIR::NEC;
work.state = Receiving;
data.bitcount = 0;
} else if (InRange(a, RemoteIR::TUS_NEC * 16) && InRange(b, RemoteIR::TUS_NEC * 4)) {
/*
* NEC Repeat.
*/
data.format = RemoteIR::NEC_REPEAT;
work.state = Received;
data.bitcount = 0;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
} else if (InRange(a, RemoteIR::TUS_AEHA * 8) && InRange(b, RemoteIR::TUS_AEHA * 4)) {
/*
* AEHA.
*/
data.format = RemoteIR::AEHA;
work.state = Receiving;
data.bitcount = 0;
} else if (InRange(a, RemoteIR::TUS_AEHA * 8) && InRange(b, RemoteIR::TUS_AEHA * 8)) {
/*
* AEHA Repeat.
*/
data.format = RemoteIR::AEHA_REPEAT;
work.state = Received;
data.bitcount = 0;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
} else {
init_state();
}
}
break;
case Receiving:
if (RemoteIR::NEC == data.format) {
work.d2 = timer.read_us();
int a = work.d2 - work.d1;
if (InRange(a, RemoteIR::TUS_NEC * 3)) {
data.buffer[data.bitcount / 8] |= (1 << (data.bitcount % 8));
} else if (InRange(a, RemoteIR::TUS_NEC * 1)) {
data.buffer[data.bitcount / 8] &= ~(1 << (data.bitcount % 8));
}
data.bitcount++;
#if 1
/*
* Length of NEC is always 32 bits.
*/
if (32 <= data.bitcount) {
work.state = Received;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
}
#else
/*
* Set timeout for tail detection automatically.
*/
timeout.detach();
timeout.attach_us(this, &ReceiverIR::isr_timeout, RemoteIR::TUS_NEC * 5);
#endif
} else if (RemoteIR::AEHA == data.format) {
work.d2 = timer.read_us();
int a = work.d2 - work.d1;
if (InRange(a, RemoteIR::TUS_AEHA * 3)) {
data.buffer[data.bitcount / 8] |= (1 << (data.bitcount % 8));
} else if (InRange(a, RemoteIR::TUS_AEHA * 1)) {
data.buffer[data.bitcount / 8] &= ~(1 << (data.bitcount % 8));
}
data.bitcount++;
#if 0
/*
* Typical length of AEHA is 48 bits.
* Please check a specification of your remote controller if you find a problem.
*/
if (48 <= data.bitcount) {
data.state = Received;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
}
#else
/*
* Set timeout for tail detection automatically.
*/
timeout.detach();
timeout.attach_us(this, &ReceiverIR::isr_timeout, RemoteIR::TUS_AEHA * 5);
#endif
} else if (RemoteIR::SONY == data.format) {
work.d1 = timer.read_us();
}
break;
case Received:
break;
default:
break;
}
UNLOCK();
}
void ReceiverIR::isr_rise(void) {
LOCK();
switch (work.state) {
case Idle:
if (0 <= work.c1) {
work.c2 = timer.read_us();
int a = work.c2 - work.c1;
if (InRange(a, RemoteIR::TUS_SONY * 4)) {
data.format = RemoteIR::SONY;
work.state = Receiving;
data.bitcount = 0;
} else {
static const int MINIMUM_LEADER_WIDTH = 150;
if (a < MINIMUM_LEADER_WIDTH) {
init_state();
}
}
} else {
init_state();
}
break;
case Receiving:
if (RemoteIR::NEC == data.format) {
work.d1 = timer.read_us();
} else if (RemoteIR::AEHA == data.format) {
work.d1 = timer.read_us();
} else if (RemoteIR::SONY == data.format) {
work.d2 = timer.read_us();
int a = work.d2 - work.d1;
if (InRange(a, RemoteIR::TUS_SONY * 2)) {
data.buffer[data.bitcount / 8] |= (1 << (data.bitcount % 8));
} else if (InRange(a, RemoteIR::TUS_SONY * 1)) {
data.buffer[data.bitcount / 8] &= ~(1 << (data.bitcount % 8));
}
data.bitcount++;
#if 1
/*
* How do I know the correct length? (6bits, 12bits, 15bits, 20bits...)
* By a model only?
* Please check a specification of your remote controller if you find a problem.
*/
if (32 <= data.bitcount) {
work.state = Received;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
}
#else
/*
* Set timeout for tail detection automatically.
*/
timeout.detach();
timeout.attach_us(this, &ReceiverIR::isr_timeout, RemoteIR::TUS_SONY * 4);
#endif
}
break;
case Received:
break;
default:
break;
}
UNLOCK();
}
void ReceiverIR::isr_timeout(void) {
LOCK();
#if 0
printf("# TIMEOUT [c1=%d, c2=%d, c3=%d, d1=%d, d2=%d, state=%d, format=%d, bitcount=%d]\n",
work.c1,
work.c2,
work.c3,
work.d1,
work.d2,
work.state,
data.format,
data.bitcount);
#endif
if (work.state == Receiving) {
printf("done\r\n");
work.state = Received;
work.c1 = -1;
work.c2 = -1;
work.c3 = -1;
work.d1 = -1;
work.d2 = -1;
}
UNLOCK();
}
|
/**********************************************************************
This source file is a part of Demi3D
__ ___ __ __ __
| \|_ |\/|| _)| \
|__/|__| || __)|__/
Copyright (c) 2013-2014 Demi team
https://github.com/wangyanxing/Demi3D
Released under the MIT License
https://github.com/wangyanxing/Demi3D/blob/master/License.txt
***********************************************************************/
#include "ArenaPch.h"
#include "ArenaHeroAttribute.h"
#include "ArenaMoveProperty.h"
#include "ArenaGameApp.h"
#include "ArenaEntityManager.h"
#include "ArenaHero.h"
#include "ArenaGame.h"
#include "ArenaConfigsLoader.h"
#include "ArenaLevel.h"
#include "K2Clip.h"
#include "K2Terrain.h"
#include "K2World.h"
#include "K2RenderObjects.h"
#include "CullNode.h"
#include "GfxDriver.h"
#include "SceneManager.h"
#include "RenderWindow.h"
#include "XMLFile.h"
#include "AssetManager.h"
namespace Demi
{
ArHeroEntity::ArHeroEntity()
{
}
ArHeroEntity::~ArHeroEntity()
{
}
void ArHeroEntity::OnKeyInput(const K2KeyEvent& event)
{
}
void ArHeroEntity::OnMouseInput(const K2MouseEvent& event)
{
#ifdef DEMI_KEYMOUSE
if (event.button == OIS::MB_Left && event.type == K2MouseEvent::MOUSE_PRESS)
#elif defined(DEMI_TOUCH)
if (event.type == K2MouseEvent::MOUSE_PRESS)
#endif
{
// click on map
auto terrain = ArGameApp::Get()->GetWorld()->GetTerrain();
DiRay ray = ArInput::GetPickupRay(event);
DiTransUnitPtr result;
if (!DiBase::Driver->GetSceneManager()->GetSceneCuller()->RayQuery(ray, result, QUERY_NPC))
{
DiVec3 clickout;
if (terrain->RayIntersects(ray, clickout))
{
DiK2Pos k2pos;
k2pos.FromWorldPos(clickout);
DiK2Pos source = mRenderObj->GetPosition();
GetMoveProperty()->MoveTo(source, k2pos);
GetAIProperty()->ClearAI();
}
} else {
// attack enemies
DI_ASSERT(result);
GetAIProperty()->CommandAttack(result->GetCustomID());
}
}
}
void ArHeroEntity::InitAttribute()
{
SetAttribute<ArHeroAttr>();
}
void ArHeroEntity::SetupAttribute()
{
auto attr = GetAttribute<ArHeroAttr>();
ArEntityConfigs* entityConfig = attr->GetEntityConfig();
if(entityConfig->model.empty())
{
DI_WARNING("Cannot locate the model name.");
}
else
{
SetModel(entityConfig->path + entityConfig->model[0]);
}
SetupEntityConfig(attr->GetEntityConfig());
}
void ArHeroEntity::Update(float dt)
{
ArGameEntity::Update(dt);
}
}
|
/* ------------------------------------------------------------------
* Copyright (C) 1998-2009 PacketVideo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
* -------------------------------------------------------------------
*/
/*
* ------------------------------------------------------------------- *
* MPEG-4 Simple Profile Video Decoder *
* ------------------------------------------------------------------- *
*
* This software module was originally developed by
*
* Paulo Nunes (IST / ACTS-MoMuSyS)
* Robert Danielsen (Telenor / ACTS-MoMuSyS)
*
* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.
* This software module is an implementation of a part of one or more MPEG-4
* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC
* 14496-2) standard.
*
* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free
* license to this software module or modifications thereof for use in hardware
* or software products claiming conformance to the MPEG-4 Video (ISO/IEC
* 14496-2) standard.
*
* Those intending to use this software module in hardware or software products
* are advised that its use may infringe existing patents. The original
* developer of this software module and his/her company, the subsequent
* editors and their companies, and ISO/IEC have no liability for use of this
* software module or modifications thereof in an implementation. Copyright is
* not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming
* products.
*
* ACTS-MoMuSys partners retain full right to use the code for his/her own
* purpose, assign or donate the code to a third party and to inhibit third
* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard
* conforming products. This copyright notice must be included in all copies or
* derivative works.
*
* Copyright (c) 1996
*
*****************************************************************************/
/***********************************************************HeaderBegin*******
*
* File: vlc_dec.c
*
* Author: Paulo Nunes (IST) - Paulo.Nunes@lx.it.pt
* Created: 1-Mar-96
*
* Description: This file contains the VLC functions needed to decode a
* bitstream.
*
* Notes:
* The functions contained in this file were adapted from
* tmndecode
* Written by Karl Olav Lillevold <kol@nta.no>,
* 1995 Telenor R&D.
* Donated to the Momusys-project as background code by
* Telenor.
*
* based on mpeg2decode, (C) 1994, MPEG Software Simulation Group
* and mpeg2play, (C) 1994 Stefan Eckart
* <stefan@lis.e-technik.tu-muenchen.de>
*
*
* Modified: 9-May-96 Paulo Nunes: Reformatted. New headers.
* 17-Jan-97 Jan De Lameillieure (HHI) : corrected in
* 01.05.97 Luis Ducla-Soares: added RvlcDecTCOEF() to allow decoding
* of Reversible VLCs.
* 09.03.98 Paulo Nunes: Cleaning.
*
***********************************************************HeaderEnd*********/
#include "mp4dec_lib.h"
#include "vlc_dec_tab.h"
#include "vlc_decode.h"
#include "bitstream.h"
#include "max_level.h"
/* ====================================================================== /
Function : DecodeUserData()
Date : 04/10/2000
History :
Modified : 04/16/2001 : removed status checking of PV_BitstreamFlushBits
This is simply a realization of the user_data() function
in the ISO/IEC 14496-2 manual.
/ ====================================================================== */
PV_STATUS DecodeUserData(BitstreamDecVideo *stream)
{
PV_STATUS status;
uint32 code;
BitstreamReadBits32HC(stream);
BitstreamShowBits32(stream, 24, &code);
while (code != 1)
{
/* Discard user data for now. 04/05/2000 */
BitstreamReadBits16(stream, 8);
BitstreamShowBits32(stream, 24, &code);
status = BitstreamCheckEndBuffer(stream);
if (status == PV_END_OF_VOP) return status; /* 03/19/2002 */
}
return PV_SUCCESS;
}
/***********************************************************CommentBegin******
*
* 3/10/00 : initial modification to the
* new PV-Decoder Lib format.
* 3/29/00 : added return code check to some functions and
* optimize the code.
*
***********************************************************CommentEnd********/
PV_STATUS PV_GetMBvectors(VideoDecData *video, uint mode)
{
PV_STATUS status;
BitstreamDecVideo *stream = video->bitstream;
int f_code_f = video->currVop->fcodeForward;
int vlc_code_mag;
MOT *mot_x = video->motX;
MOT *mot_y = video->motY;
int k, offset;
int x_pos = video->mbnum_col;
int y_pos = video->mbnum_row;
int doubleWidth = video->nMBPerRow << 1;
int pos = (x_pos + y_pos * doubleWidth) << 1;
MOT mvx = 0, mvy = 0;
if (f_code_f == 1)
{
#ifdef PV_ANNEX_IJKT_SUPPORT
if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q)
#else
if (mode == MODE_INTER4V)
#endif
{
for (k = 0; k < 4; k++)
{
offset = (k & 1) + (k >> 1) * doubleWidth;
mv_prediction(video, k, &mvx, &mvy);
/* decode component x */
status = PV_VlcDecMV(stream, &vlc_code_mag);
if (status != PV_SUCCESS)
{
return status;
}
mvx += (MOT)vlc_code_mag;
mvx = (MOT)(((mvx + 32) & 0x3F) - 32);
status = PV_VlcDecMV(stream, &vlc_code_mag);
if (status != PV_SUCCESS)
{
return status;
}
mvy += (MOT)vlc_code_mag;
mvy = (MOT)(((mvy + 32) & 0x3F) - 32);
mot_x[pos+offset] = (MOT) mvx;
mot_y[pos+offset] = (MOT) mvy;
}
}
else
{
mv_prediction(video, 0, &mvx, &mvy);
/* For PVOPs, field appears only in MODE_INTER & MODE_INTER_Q */
status = PV_VlcDecMV(stream, &vlc_code_mag);
if (status != PV_SUCCESS)
{
return status;
}
mvx += (MOT)vlc_code_mag;
mvx = (MOT)(((mvx + 32) & 0x3F) - 32);
status = PV_VlcDecMV(stream, &vlc_code_mag);
if (status != PV_SUCCESS)
{
return status;
}
mvy += (MOT)vlc_code_mag;
mvy = (MOT)(((mvy + 32) & 0x3F) - 32);
mot_x[pos] = mot_x[pos+1] = (MOT) mvx;
mot_y[pos] = mot_y[pos+1] = (MOT) mvy;
pos += doubleWidth;
mot_x[pos] = mot_x[pos+1] = (MOT) mvx;
mot_y[pos] = mot_y[pos+1] = (MOT) mvy;
}
}
else
{
#ifdef PV_ANNEX_IJKT_SUPPORT
if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q)
#else
if (mode == MODE_INTER4V)
#endif
{
for (k = 0; k < 4; k++)
{
offset = (k & 1) + (k >> 1) * doubleWidth;
mv_prediction(video, k, &mvx, &mvy);
status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f);
mot_x[pos+offset] = (MOT) mvx;
mot_y[pos+offset] = (MOT) mvy;
if (status != PV_SUCCESS)
{
return status;
}
}
}
else
{
mv_prediction(video, 0, &mvx, &mvy);
/* For PVOPs, field appears only in MODE_INTER & MODE_INTER_Q */
status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f);
mot_x[pos] = mot_x[pos+1] = (MOT) mvx;
mot_y[pos] = mot_y[pos+1] = (MOT) mvy;
pos += doubleWidth;
mot_x[pos] = mot_x[pos+1] = (MOT) mvx;
mot_y[pos] = mot_y[pos+1] = (MOT) mvy;
if (status != PV_SUCCESS)
{
return status;
}
}
}
return PV_SUCCESS;
}
/***********************************************************CommentBegin******
* 3/10/00 : initial modification to the
* new PV-Decoder Lib format.
* 3/29/00 : added return code check to some functions
* 5/10/00 : check whether the decoded vector is legal.
* 4/17/01 : use MOT type
***********************************************************CommentEnd********/
PV_STATUS PV_DecodeMBVec(BitstreamDecVideo *stream, MOT *mv_x, MOT *mv_y, int f_code_f)
{
PV_STATUS status;
int vlc_code_magx, vlc_code_magy;
int residualx = 0, residualy = 0;
/* decode component x */
status = PV_VlcDecMV(stream, &vlc_code_magx);
if (status != PV_SUCCESS)
{
return status;
}
if (vlc_code_magx)
{
residualx = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1));
}
/* decode component y */
status = PV_VlcDecMV(stream, &vlc_code_magy);
if (status != PV_SUCCESS)
{
return status;
}
if (vlc_code_magy)
{
residualy = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1));
}
if (PV_DeScaleMVD(f_code_f, residualx, vlc_code_magx, mv_x) != PV_SUCCESS)
{
return PV_FAIL;
}
if (PV_DeScaleMVD(f_code_f, residualy, vlc_code_magy, mv_y) != PV_SUCCESS)
{
return PV_FAIL;
}
return PV_SUCCESS;
}
/***********************************************************CommentBegin******
* 3/31/2000 : initial modification to the new PV-Decoder Lib format.
* 5/10/2000 : check to see if the decoded vector falls within
* the legal fcode range.
*
***********************************************************CommentEnd********/
PV_STATUS PV_DeScaleMVD(
int f_code, /* <-- MV range in 1/2 units: 1=32,2=64,...,7=2048 */
int residual, /* <-- part of the MV Diff. FLC coded */
int vlc_code_mag, /* <-- part of the MV Diff. VLC coded */
MOT *vector /* --> Obtained MV component in 1/2 units */
)
{
int half_range = (1 << (f_code + 4));
int mask = (half_range << 1) - 1;
int diff_vector;
if (vlc_code_mag == 0)
{
diff_vector = vlc_code_mag;
}
else
{
diff_vector = ((PV_ABS(vlc_code_mag) - 1) << (f_code - 1)) + residual + 1;
if (vlc_code_mag < 0)
{
diff_vector = -diff_vector;
}
}
*vector += (MOT)(diff_vector);
*vector = (MOT)((*vector + half_range) & mask) - half_range;
return PV_SUCCESS;
}
void mv_prediction(
VideoDecData *video,
int block,
MOT *mvx,
MOT *mvy
)
{
/*----------------------------------------------------------------------------
; Define all local variables
----------------------------------------------------------------------------*/
MOT *motxdata = video->motX;
MOT *motydata = video->motY;
int mbnum_col = video->mbnum_col;
int mbnum_row = video->mbnum_row;
uint8 *slice_nb = video->sliceNo;
int nMBPerRow = video->nMBPerRow;
int nMVPerRow = nMBPerRow << 1;
int mbnum = video->mbnum;
int p1x = 0, p2x = 0, p3x = 0;
int p1y = 0, p2y = 0, p3y = 0;
int rule1 = 0, rule2 = 0, rule3 = 0;
int indx;
indx = ((mbnum_col << 1) + (block & 1)) + ((mbnum_row << 1) + (block >> 1)) * nMVPerRow - 1; /* left block */
if (block & 1) /* block 1, 3 */
{
p1x = motxdata[indx];
p1y = motydata[indx];
rule1 = 1;
}
else /* block 0, 2 */
{
if (mbnum_col > 0 && slice_nb[mbnum] == slice_nb[mbnum-1])
{
p1x = motxdata[indx];
p1y = motydata[indx];
rule1 = 1;
}
}
indx = indx + 1 - nMVPerRow; /* upper_block */
if (block >> 1)
{
indx -= (block & 1);
p2x = motxdata[indx];
p2y = motydata[indx];
p3x = motxdata[indx + 1];
p3y = motydata[indx + 1];
rule2 = rule3 = 1;
}
else
{ /* block 0,1 */
if (mbnum_row)
{
if (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])
{
p2x = motxdata[indx];
p2y = motydata[indx];
rule2 = 1;
}
if (mbnum_col < nMBPerRow - 1 && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow+1])
{
indx = indx + 2 - (block & 1);
p3x = motxdata[indx];
p3y = motydata[indx];
rule3 = 1;
}
}
}
if (rule1 + rule2 + rule3 > 1)
{
*mvx = (MOT)PV_MEDIAN(p1x, p2x, p3x);
*mvy = (MOT)PV_MEDIAN(p1y, p2y, p3y);
}
else if (rule1 + rule2 + rule3 == 1)
{
/* two of three are zero */
*mvx = (MOT)(p1x + p2x + p3x);
*mvy = (MOT)(p1y + p2y + p3y);
}
else
{
/* all MBs are outside the VOP */
*mvx = *mvy = 0;
}
/*----------------------------------------------------------------------------
; Return nothing or data or data pointer
----------------------------------------------------------------------------*/
return;
}
/***********************************************************CommentBegin******
*
* 3/30/2000 : initial modification to the new PV-Decoder Lib format.
* 4/16/2001 : removed checking of status for PV_BitstreamFlushBits
***********************************************************CommentEnd********/
PV_STATUS PV_VlcDecMV(BitstreamDecVideo *stream, int *mv)
{
PV_STATUS status = PV_SUCCESS;
uint code;
BitstreamShow13Bits(stream, &code);
if (code >> 12)
{
*mv = 0; /* Vector difference = 0 */
PV_BitstreamFlushBits(stream, 1);
return PV_SUCCESS;
}
if (code >= 512)
{
code = (code >> 8) - 2;
PV_BitstreamFlushBits(stream, PV_TMNMVtab0[code].len + 1);
*mv = PV_TMNMVtab0[code].val;
return status;
}
if (code >= 128)
{
code = (code >> 2) - 32;
PV_BitstreamFlushBits(stream, PV_TMNMVtab1[code].len + 1);
*mv = PV_TMNMVtab1[code].val;
return status;
}
if (code < 4)
{
*mv = -1;
return PV_FAIL;
}
code -= 4;
PV_BitstreamFlushBits(stream, PV_TMNMVtab2[code].len + 1);
*mv = PV_TMNMVtab2[code].val;
return status;
}
/***********************************************************CommentBegin******
* 3/30/2000 : initial modification to the new PV-Decoder Lib
* format and the change of error-handling method.
* 4/16/01 : removed status checking of PV_BitstreamFlushBits
***********************************************************CommentEnd********/
int PV_VlcDecMCBPC_com_intra(BitstreamDecVideo *stream)
{
uint code;
BitstreamShowBits16(stream, 9, &code);
if (code < 8)
{
return VLC_CODE_ERROR;
}
code >>= 3;
if (code >= 32)
{
PV_BitstreamFlushBits(stream, 1);
return 3;
}
PV_BitstreamFlushBits(stream, PV_MCBPCtabintra[code].len);
return PV_MCBPCtabintra[code].val;
}
/***********************************************************CommentBegin******
*
* 3/30/2000 : initial modification to the new PV-Decoder Lib
* format and the change of error-handling method.
* 4/16/2001 : removed checking of return status of PV_BitstreamFlushBits
***********************************************************CommentEnd********/
int PV_VlcDecMCBPC_com_inter(BitstreamDecVideo *stream)
{
uint code;
BitstreamShowBits16(stream, 9, &code);
if (code == 0)
{
return VLC_CODE_ERROR;
}
else if (code >= 256)
{
PV_BitstreamFlushBits(stream, 1);
return 0;
}
PV_BitstreamFlushBits(stream, PV_MCBPCtab[code].len);
return PV_MCBPCtab[code].val;
}
#ifdef PV_ANNEX_IJKT_SUPPORT
int PV_VlcDecMCBPC_com_inter_H263(BitstreamDecVideo *stream)
{
uint code;
BitstreamShow13Bits(stream, &code);
if (code == 0)
{
return VLC_CODE_ERROR;
}
else if (code >= 4096)
{
PV_BitstreamFlushBits(stream, 1);
return 0;
}
if (code >= 16)
{
PV_BitstreamFlushBits(stream, PV_MCBPCtab[code >> 4].len);
return PV_MCBPCtab[code >> 4].val;
}
else
{
PV_BitstreamFlushBits(stream, PV_MCBPCtab1[code - 8].len);
return PV_MCBPCtab1[code - 8].val;
}
}
#endif
/***********************************************************CommentBegin******
* 3/30/2000 : initial modification to the new PV-Decoder Lib
* format and the change of error-handling method.
* 4/16/2001 : removed status checking for PV_BitstreamFlushBits
***********************************************************CommentEnd********/
int PV_VlcDecCBPY(BitstreamDecVideo *stream, int intra)
{
int CBPY = 0;
uint code;
BitstreamShowBits16(stream, 6, &code);
if (code < 2)
{
return -1;
}
else if (code >= 48)
{
PV_BitstreamFlushBits(stream, 2);
CBPY = 15;
}
else
{
PV_BitstreamFlushBits(stream, PV_CBPYtab[code].len);
CBPY = PV_CBPYtab[code].val;
}
if (intra == 0) CBPY = 15 - CBPY;
CBPY = CBPY & 15;
return CBPY;
}
/***********************************************************CommentBegin******
* 3/31/2000 : initial modification to the new PV-Decoder Lib format.
*
* 8/23/2000 : optimize the function by removing unnecessary BitstreamShowBits()
* function calls.
*
* 9/6/2000 : change the API to check for end-of-buffer for proper
* termination of decoding process.
***********************************************************CommentEnd********/
PV_STATUS PV_VlcDecIntraDCPredSize(BitstreamDecVideo *stream, int compnum, uint *DC_size)
{
PV_STATUS status = PV_FAIL; /* 07/09/01 */
uint code;
*DC_size = 0;
if (compnum < 4) /* luminance block */
{
BitstreamShowBits16(stream, 11, &code);
if (code == 1)
{
*DC_size = 12;
PV_BitstreamFlushBits(stream, 11);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 11;
PV_BitstreamFlushBits(stream, 10);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 10;
PV_BitstreamFlushBits(stream, 9);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 9;
PV_BitstreamFlushBits(stream, 8);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 8;
PV_BitstreamFlushBits(stream, 7);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 7;
PV_BitstreamFlushBits(stream, 6);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 6;
PV_BitstreamFlushBits(stream, 5);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 5;
PV_BitstreamFlushBits(stream, 4);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 4;
PV_BitstreamFlushBits(stream, 3);
return PV_SUCCESS;
}
else if (code == 2)
{
*DC_size = 3;
PV_BitstreamFlushBits(stream, 3);
return PV_SUCCESS;
}
else if (code == 3)
{
*DC_size = 0;
PV_BitstreamFlushBits(stream, 3);
return PV_SUCCESS;
}
code >>= 1;
if (code == 2)
{
*DC_size = 2;
PV_BitstreamFlushBits(stream, 2);
return PV_SUCCESS;
}
else if (code == 3)
{
*DC_size = 1;
PV_BitstreamFlushBits(stream, 2);
return PV_SUCCESS;
}
}
else /* chrominance block */
{
BitstreamShow13Bits(stream, &code);
code >>= 1;
if (code == 1)
{
*DC_size = 12;
PV_BitstreamFlushBits(stream, 12);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 11;
PV_BitstreamFlushBits(stream, 11);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 10;
PV_BitstreamFlushBits(stream, 10);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 9;
PV_BitstreamFlushBits(stream, 9);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 8;
PV_BitstreamFlushBits(stream, 8);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 7;
PV_BitstreamFlushBits(stream, 7);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 6;
PV_BitstreamFlushBits(stream, 6);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 5;
PV_BitstreamFlushBits(stream, 5);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 4;
PV_BitstreamFlushBits(stream, 4);
return PV_SUCCESS;
}
code >>= 1;
if (code == 1)
{
*DC_size = 3;
PV_BitstreamFlushBits(stream, 3);
return PV_SUCCESS;
}
code >>= 1;
{
*DC_size = (int)(3 - code);
PV_BitstreamFlushBits(stream, 2);
return PV_SUCCESS;
}
}
return status;
}
/***********************************************************CommentBegin******
*
*
* 3/30/2000 : initial modification to the new PV-Decoder Lib
* format and the change of error-handling method.
*
***********************************************************CommentEnd********/
PV_STATUS VlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */
/* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab3[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab4[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab5[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint) tab->run; //(tab->val >> 8) & 255;
pTcoef->level = (int) tab->level; //tab->val & 255;
pTcoef->last = (uint) tab->last; //(tab->val >> 16) & 1;
/* the following is modified for 3-mode escape -- boon */
if (tab->level != 0xFF)
{
return PV_SUCCESS;
}
//if (((tab->run<<8)|(tab->level)|(tab->last<<16)) == VLC_ESCAPE_CODE)
if (!pTcoef->sign)
{
/* first escape mode. level is offset */
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */
/* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab3[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab4[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab5[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
/* sign bit */
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255;
pTcoef->level = (int)tab->level; //tab->val & 255;
pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1;
/* need to add back the max level */
if ((pTcoef->last == 0 && pTcoef->run > 14) || (pTcoef->last == 1 && pTcoef->run > 20))
{
return PV_FAIL;
}
pTcoef->level = pTcoef->level + intra_max_level[pTcoef->last][pTcoef->run];
}
else
{
uint run_offset;
run_offset = BitstreamRead1Bits_INLINE(stream);
if (!run_offset)
{
/* second escape mode. run is offset */
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */
/* if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab3[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab4[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab5[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
/* sign bit */
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255;
pTcoef->level = (int)tab->level; //tab->val & 255;
pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1;
/* need to add back the max run */
if (pTcoef->last)
{
if (pTcoef->level > 8)
{
return PV_FAIL;
}
pTcoef->run = pTcoef->run + intra_max_run1[pTcoef->level] + 1;
}
else
{
if (pTcoef->level > 27)
{
return PV_FAIL;
}
pTcoef->run = pTcoef->run + intra_max_run0[pTcoef->level] + 1;
}
}
else
{
code = BitstreamReadBits16_INLINE(stream, 8);
pTcoef->last = code >> 7;
pTcoef->run = (code >> 1) & 0x3F;
pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1);
if (pTcoef->level >= 2048)
{
pTcoef->sign = 1;
pTcoef->level = 4096 - pTcoef->level;
}
else
{
pTcoef->sign = 0;
}
} /* flc */
}
return PV_SUCCESS;
} /* VlcDecTCOEFIntra */
PV_STATUS VlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */
/* if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab0[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab1[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab2[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level; //tab->val & 15;
pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1;
/* the following is modified for 3-mode escape -- boon */
if (tab->run != 0xBF)
{
return PV_SUCCESS;
}
//if (((tab->run<<4)|(tab->level)|(tab->last<<12)) == VLC_ESCAPE_CODE)
if (!pTcoef->sign)
{
/* first escape mode. level is offset */
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */
/* if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab0[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab1[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab2[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level; //tab->val & 15;
pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1;
/* need to add back the max level */
if ((pTcoef->last == 0 && pTcoef->run > 26) || (pTcoef->last == 1 && pTcoef->run > 40))
{
return PV_FAIL;
}
pTcoef->level = pTcoef->level + inter_max_level[pTcoef->last][pTcoef->run];
}
else
{
uint run_offset;
run_offset = BitstreamRead1Bits_INLINE(stream);
if (!run_offset)
{
/* second escape mode. run is offset */
BitstreamShow13Bits(stream, &code);
/* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */
/*if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/
if (code >= 1024)
{
tab = &PV_DCT3Dtab0[(code >> 6) - 16];
}
else
{
if (code >= 256)
{
tab = &PV_DCT3Dtab1[(code >> 3) - 32];
}
else
{
if (code >= 16)
{
tab = &PV_DCT3Dtab2[(code>>1) - 8];
}
else
{
return PV_FAIL;
}
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run; //(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level; //tab->val & 15;
pTcoef->last = (uint)tab->last; //(tab->val >> 12) & 1;
/* need to add back the max run */
if (pTcoef->last)
{
if (pTcoef->level > 3)
{
return PV_FAIL;
}
pTcoef->run = pTcoef->run + inter_max_run1[pTcoef->level] + 1;
}
else
{
if (pTcoef->level > 12)
{
return PV_FAIL;
}
pTcoef->run = pTcoef->run + inter_max_run0[pTcoef->level] + 1;
}
}
else
{
code = BitstreamReadBits16_INLINE(stream, 8);
pTcoef->last = code >> 7;
pTcoef->run = (code >> 1) & 0x3F;
pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1);
if (pTcoef->level >= 2048)
{
pTcoef->sign = 1;
pTcoef->level = 4096 - pTcoef->level;
}
else
{
pTcoef->sign = 0;
}
} /* flc */
}
return PV_SUCCESS;
} /* VlcDecTCOEFInter */
/*=======================================================
Function: VlcDecTCOEFShortHeader()
Date : 04/27/99
Purpose : New function used in decoding of video planes
with short header
Modified: 05/23/2000
for new decoder structure.
=========================================================*/
PV_STATUS VlcDecTCOEFShortHeader(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/*intra = 0;*/
if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16];
else
{
if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32];
else
{
if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8];
else return PV_FAIL;
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level;//tab->val & 15;
pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;
/* the following is modified for 3-mode escape -- boon */
if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */
{
return PV_SUCCESS;
}
/* escape mode 4 - H.263 type */
pTcoef->last = pTcoef->sign; /* Last */
pTcoef->run = BitstreamReadBits16_INLINE(stream, 6); /* Run */
pTcoef->level = (int) BitstreamReadBits16_INLINE(stream, 8); /* Level */
if (pTcoef->level == 0 || pTcoef->level == 128)
{
return PV_FAIL;
}
if (pTcoef->level > 128)
{
pTcoef->sign = 1;
pTcoef->level = 256 - pTcoef->level;
}
else
{
pTcoef->sign = 0;
}
return PV_SUCCESS;
} /* VlcDecTCOEFShortHeader */
#ifdef PV_ANNEX_IJKT_SUPPORT
PV_STATUS VlcDecTCOEFShortHeader_AnnexI(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/*intra = 0;*/
if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16];
else
{
if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32];
else
{
if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8];
else return PV_FAIL;
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level;//tab->val & 15;
pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;
/* the following is modified for 3-mode escape -- boon */
if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */
{
return PV_SUCCESS;
}
/* escape mode 4 - H.263 type */
pTcoef->last = pTcoef->sign; /* Last */
pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */
pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */
if (pTcoef->level == 0 || pTcoef->level == 128)
{
return PV_FAIL;
}
if (pTcoef->level > 128)
{
pTcoef->sign = 1;
pTcoef->level = 256 - pTcoef->level;
}
else pTcoef->sign = 0;
return PV_SUCCESS;
} /* VlcDecTCOEFShortHeader_AnnexI */
PV_STATUS VlcDecTCOEFShortHeader_AnnexT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/*intra = 0;*/
if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16];
else
{
if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32];
else
{
if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8];
else return PV_FAIL;
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level;//tab->val & 15;
pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;
/* the following is modified for 3-mode escape -- */
if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */
{
return PV_SUCCESS;
}
/* escape mode 4 - H.263 type */
pTcoef->last = pTcoef->sign; /* Last */
pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */
pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */
if (pTcoef->level == 0)
{
return PV_FAIL;
}
if (pTcoef->level >= 128)
{
pTcoef->sign = 1;
pTcoef->level = 256 - pTcoef->level;
}
else
{
pTcoef->sign = 0;
}
if (pTcoef->level == 128)
{
code = BitstreamReadBits16(stream, 11); /* ANNEX_T */
code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff);
if (code > 1024)
{
pTcoef->sign = 1;
pTcoef->level = (2048 - code);
}
else
{
pTcoef->sign = 0;
pTcoef->level = code;
}
}
return PV_SUCCESS;
} /* VlcDecTCOEFShortHeader */
PV_STATUS VlcDecTCOEFShortHeader_AnnexIT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)
{
uint code;
const VLCtab2 *tab;
BitstreamShow13Bits(stream, &code);
/*intra = 0;*/
if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16];
else
{
if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32];
else
{
if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8];
else return PV_FAIL;
}
}
PV_BitstreamFlushBits(stream, tab->len + 1);
pTcoef->sign = (code >> (12 - tab->len)) & 1;
pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;
pTcoef->level = (int)tab->level;//tab->val & 15;
pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;
/* the following is modified for 3-mode escape -- */
if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE) /* ESCAPE */
{
return PV_SUCCESS;
}
/* escape mode 4 - H.263 type */
pTcoef->last = pTcoef->sign; /* Last */
pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */
pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */
if (pTcoef->level == 0)
{
return PV_FAIL;
}
if (pTcoef->level >= 128)
{
pTcoef->sign = 1;
pTcoef->level = 256 - pTcoef->level;
}
else
{
pTcoef->sign = 0;
}
if (pTcoef->level == 128)
{
code = BitstreamReadBits16(stream, 11); /* ANNEX_T */
code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff);
if (code > 1024)
{
pTcoef->sign = 1;
pTcoef->level = (2048 - code);
}
else
{
pTcoef->sign = 0;
pTcoef->level = code;
}
}
return PV_SUCCESS;
} /* VlcDecTCOEFShortHeader_AnnexI */
#endif
/***********************************************************CommentBegin******
* 3/30/2000 : initial modification to the new PV-Decoder Lib
* format and the change of error-handling method.
* The coefficient is now returned thru a pre-
* initialized parameters for speedup.
*
***********************************************************CommentEnd********/
PV_STATUS RvlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef)
{
uint code, mask;
const VLCtab2 *tab2;
int count, len, num[2] = {0, 0} /* 01/30/01 */;
mask = 0x4000; /* mask 100000000000000 */
BitstreamShow15Bits(stream, &code); /* 03/07/01 */
len = 1;
// 09/20/99 Escape mode
/// Bitstream Exchange
if (code < 2048)
{
PV_BitstreamFlushBits(stream, 5);
pTcoef->last = BitstreamRead1Bits_INLINE(stream);
pTcoef->run = BitstreamReadBits16_INLINE(stream, 6);
// 09/20/99 New marker bit
PV_BitstreamFlushBits(stream, 1);
// 09/20/99 The length for LEVEL used to be 7 in the old version
pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1);
// 09/20/99 Another new marker bit
// PV_BitstreamFlushBitsCheck(stream, 1);
pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1; /* fix 3/13/01 */
return PV_SUCCESS;
}
if (code & mask)
{
count = 1;
while (mask && count > 0) /* fix 3/28/01 */
{
mask = mask >> 1;
if (code & mask)
count--;
else
num[0]++; /* number of zeros in the middle */
len++;
}
}
else
{
count = 2;
while (mask && count > 0) /* fix 3/28/01 */
{
mask = mask >> 1;
if (!(code & mask))
count--;
else
num[count-1]++; /* number of ones in the middle */
len++;
}
}
code = code & 0x7fff;
code = code >> (15 - (len + 1));
/* 1/30/01, add fast decoding algorithm here */
/* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01
num[1] and num[0] x
or : 1xxxxx10 or 1xxxxx11
num[0] x */
/* len+1 is the length of the above */
if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */
return PV_FAIL;
if (code&(1 << len))
tab2 = RvlcDCTtabInter + 146 + (num[0] << 1) + (code & 1);
else
tab2 = RvlcDCTtabInter + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1);
PV_BitstreamFlushBits(stream, (int) tab2->len);
pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255;
pTcoef->level = (int)tab2->level;//tab->val & 255;
pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1;
pTcoef->sign = BitstreamRead1Bits_INLINE(stream);
return PV_SUCCESS;
} /* RvlcDecTCOEFInter */
PV_STATUS RvlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef)
{
uint code, mask;
const VLCtab2 *tab2;
int count, len, num[2] = {0, 0} /* 01/30/01 */;
mask = 0x4000; /* mask 100000000000000 */
BitstreamShow15Bits(stream, &code);
len = 1;
// 09/20/99 Escape mode
/// Bitstream Exchange
if (code < 2048)
{
PV_BitstreamFlushBits(stream, 5);
pTcoef->last = BitstreamRead1Bits_INLINE(stream);
pTcoef->run = BitstreamReadBits16_INLINE(stream, 6);
// 09/20/99 New marker bit
PV_BitstreamFlushBits(stream, 1);
// 09/20/99 The length for LEVEL used to be 7 in the old version
pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1);
// 09/20/99 Another new marker bit
// PV_BitstreamFlushBitsCheck(stream, 1);
pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1; /* fix 03/13/01 */
return PV_SUCCESS;
}
if (code & mask)
{
count = 1;
while (mask && count > 0) /* fix 03/28/01 */
{
mask = mask >> 1;
if (code & mask)
count--;
else
num[0]++; /* number of zeros in the middle */
len++;
}
}
else
{
count = 2;
while (mask && count > 0) /* fix 03/28/01 */
{
mask = mask >> 1;
if (!(code & mask))
count--;
else
num[count-1]++; /* number of ones in the middle */
len++;
}
}
code = code & 0x7fff;
code = code >> (15 - (len + 1));
/* 1/30/01, add fast decoding algorithm here */
/* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01
num[1] and num[0] x
or : 1xxxxx10 or 1xxxxx11
num[0] x */
/* len+1 is the length of the above */
if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */
return PV_FAIL;
if (code & (1 << len))
tab2 = RvlcDCTtabIntra + 146 + (num[0] << 1) + (code & 1);
else
tab2 = RvlcDCTtabIntra + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1);
PV_BitstreamFlushBits(stream, (int) tab2->len);
pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255;
pTcoef->level = (int)tab2->level;//tab->val & 255;
pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1;
pTcoef->sign = BitstreamRead1Bits_INLINE(stream);
return PV_SUCCESS;
} /* RvlcDecTCOEFIntra */
|
/*=========================================================================
Copyright (c) Kitware Inc.
All rights reserved.
=========================================================================*/
// .SECTION Description
// This program illustrates the use of the vtkHyperTreeGrid
// data set and various filters acting upon hyper it.
// It generates output files in VTK format.
//
// .SECTION Usage
//
// .SECTION Thanks
// This program was written by Daniel Aguilera and Philippe Pebay
// This work was supported by Commissariat a l'Energie Atomique (CEA/DIF)
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkPolyDataMapper.h>
#include <vtkPolyData.h>
#include <vtkActor.h>
#include <vtkDataSetSurfaceFilter.h>
#include <vtkUnstructuredGrid.h>
#include <vtkShrinkFilter.h>
#include <vtkProperty.h>
#include <vtkUnstructuredGridWriter.h>
#include <vtkInteractorStyleSwitch.h>
#include "Mesh.h"
#include "Cell.h"
#include "Node.h"
using namespace std;
#define SHIFT_ARGS() for (int j=i;j<(argc-1);j++) argv[j] = argv[j+1]; argc--; i--
#define SHIFT_NARGS(n) for (int j=i;j<(argc-(n));j++) argv[j] = argv[j+(n)]; argc-=(n); i--
void usage ()
{
cout << "Usage : amr [-level <int>] [-refine <int>] [-nx <int>] [-ny <int>] [-nz <int>] [-write <file>] [-shrink] [-help]" << endl;
cout << " -depth : Number of refinement levels. Defaut = 3" << endl;
cout << " -factor : Refinement branching factor. Defaut = 3" << endl;
cout << " -n[xyz] : Number of grid points in each direction. Defaut = 5" << endl;
cout << " -write : Output mesh in a VTK unstructured grid file. Defaut = no output" << endl;
cout << " -shrink : Apply shrink filter before rendering geometry. Defaut = do not shrink" << endl;
cout << " -help : Print available options" << endl;
exit (0);
}
int main( int argc, char *argv[] )
{
// Default values
int nx = 5;
int ny = 5;
int nz = 5;
int depth = 3;
int factor = 3;
bool shrink = false;
string datafile = "";
double R = 0.0;
for (int i = 1; i < argc; i++)
{
// Refinement depth
if (strcmp (argv[i], "-depth") == 0)
{
if (i+1 < argc) {depth = atoi (argv[i+1]); SHIFT_NARGS(2);}
else usage();
}
// Branch factor
else if (strcmp (argv[i], "-factor") == 0)
{
if (i+1 < argc) {factor = atoi (argv[i+1]); SHIFT_NARGS(2);}
else usage();
}
// Dimensions
else if (strcmp (argv[i], "-nx") == 0)
{
if (i+1 < argc) {nx = atoi (argv[i+1]); SHIFT_NARGS(2);}
else usage();
}
else if (strcmp (argv[i], "-ny") == 0)
{
if (i+1 < argc) {ny = atoi (argv[i+1]); SHIFT_NARGS(2);}
else usage();
}
else if (strcmp (argv[i], "-nz") == 0)
{
if (i+1 < argc) {nz = atoi (argv[i+1]); SHIFT_NARGS(2);}
else usage();
}
else if (strcmp (argv[i], "-write") == 0)
{
if (i+1 < argc) {datafile = argv[i+1]; SHIFT_NARGS(2);}
else usage();
}
else if (strcmp (argv[i], "-shrink") == 0)
{
shrink = true; SHIFT_ARGS();
}
else usage();
}
// If no radius is defined, then take the number of grid points along X axis
if (R == 0.0) R = nx;
Cell::setR(R);
Node * n1 = new Node (0.0, 0.0, 0.0);
Node * n2 = new Node ((double) nx+1, 0.0, 0.0);
Node * n3 = new Node ((double) nx+1, 0.0, (double) nz+1);
Node * n4 = new Node (0.0, 0.0, (double) nz+1);
Node * n5 = new Node (0.0, (double) ny+1, 0.0);
Node * n6 = new Node ((double) nx+1, (double) ny+1, 0.0);
Node * n7 = new Node ((double) nx+1, (double) ny+1, (double) nz+1);
Node * n8 = new Node (0.0, (double) ny+1, (double) nz+1);
// Create mesh
Mesh * mesh = new Mesh (nx, ny, nz, n1, n2, n3, n4, n5, n6, n7, n8);
mesh->setFactor (factor);
for (int i = 0; i < depth; i++) mesh->refine();
// Reduce points
mesh->mergePoints();
// Generate dataset
vtkDataSet * ds = mesh->getDataSet();
// Reduce cells des mailles
vtkShrinkFilter * shrinkFilter = vtkShrinkFilter::New();
if (shrink)
{
shrinkFilter->SetShrinkFactor (0.9);
shrinkFilter->SetInputData (ds);
shrinkFilter->Update();
ds = shrinkFilter->GetOutput();
}
// Write out dataset
if (datafile != "")
{
vtkUnstructuredGridWriter * writer = vtkUnstructuredGridWriter::New();
writer->SetInputData(ds);
writer->SetFileName (datafile.c_str());
writer->Write();
writer->Delete();
}
// Geometry filter
vtkDataSetSurfaceFilter * dataSetSurfaceFilter = vtkDataSetSurfaceFilter::New();
dataSetSurfaceFilter->SetInputData(ds);
// Mappers
vtkPolyDataMapper * polyDataMapper1 = vtkPolyDataMapper::New();
polyDataMapper1->SetInputConnection(dataSetSurfaceFilter->GetOutputPort());
polyDataMapper1->SetResolveCoincidentTopologyToPolygonOffset();
vtkPolyDataMapper * polyDataMapper2 = vtkPolyDataMapper::New();
polyDataMapper2->SetInputConnection(dataSetSurfaceFilter->GetOutputPort());
polyDataMapper2->SetResolveCoincidentTopologyToPolygonOffset();
// Actors
vtkActor *actor1 = vtkActor::New();
actor1->GetProperty()->SetColor(.8,.2,.2);
actor1->SetMapper (polyDataMapper1);
vtkActor *actor2 = vtkActor::New();
actor2->GetProperty()->SetRepresentationToWireframe();
actor2->GetProperty()->SetColor( .5, .5, .5 );
actor2->SetMapper (polyDataMapper2);
// Window and interactor
vtkRenderer * ren = vtkRenderer::New();
ren->SetBackground (1.,1.,1.);
ren->AddActor(actor1);
ren->AddActor(actor2);
vtkRenderWindow * renWindow = vtkRenderWindow::New();
renWindow->SetSize (800,800);
renWindow->AddRenderer(ren);
vtkRenderWindowInteractor * interacteur = vtkRenderWindowInteractor::New();
vtkInteractorStyleSwitch * style = vtkInteractorStyleSwitch::SafeDownCast (interacteur->GetInteractorStyle());
interacteur->SetRenderWindow(renWindow);
if (style) style->SetCurrentStyleToTrackballCamera ();
// Render
//renWindow->Render();
//interacteur->Start();
// Clean up
delete mesh;
delete n1;
delete n2;
delete n3;
delete n4;
delete n5;
delete n6;
delete n7;
delete n8;
shrinkFilter->Delete();
dataSetSurfaceFilter->Delete();
polyDataMapper1->Delete();
polyDataMapper2->Delete();
actor1->Delete();
actor2->Delete();
ren->Delete();
renWindow->Delete();
interacteur->Delete();
return 0;
}
|
/*
* Copyright 2010, Haiku.
* Distributed under the terms of the MIT License.
*
* Authors:
* Clemens Zeidler <haiku@clemens-zeidler.de>
*/
#include "Stacking.h"
#include <Debug.h>
#include "StackAndTilePrivate.h"
#include "Desktop.h"
#include "SATWindow.h"
#include "Window.h"
//#define DEBUG_STACKING
#ifdef DEBUG_STACKING
# define STRACE_STACKING(x...) debug_printf("SAT Stacking: "x)
#else
# define STRACE_STACKING(x...) ;
#endif
using namespace BPrivate;
const float kMaxTabWidth = 165.;
bool
StackingEventHandler::HandleMessage(SATWindow* sender,
BPrivate::LinkReceiver& link, BPrivate::LinkSender& reply)
{
Desktop* desktop = sender->GetDesktop();
StackAndTile* stackAndTile = sender->GetStackAndTile();
int32 what;
link.Read<int32>(&what);
switch (what) {
case kAddWindowToStack:
{
port_id port;
int32 token;
team_id team;
link.Read<port_id>(&port);
link.Read<int32>(&token);
link.Read<team_id>(&team);
int32 position;
if (link.Read<int32>(&position) != B_OK)
return false;
WindowArea* area = sender->GetWindowArea();
if (!area)
return false;
if (position < 0)
position = area->WindowList().CountItems() - 1;
SATWindow* parent = area->WindowList().ItemAt(position);
Window* window = desktop->WindowForClientLooperPort(port);
if (!parent || !window) {
reply.StartMessage(B_BAD_VALUE);
reply.Flush();
break;
}
SATWindow* candidate = stackAndTile->GetSATWindow(window);
if (!candidate)
return false;
if (!parent->StackWindow(candidate))
return false;
reply.StartMessage(B_OK);
reply.Flush();
break;
}
case kRemoveWindowFromStack:
{
port_id port;
int32 token;
team_id team;
link.Read<port_id>(&port);
link.Read<int32>(&token);
if (link.Read<team_id>(&team) != B_OK)
return false;
SATGroup* group = sender->GetGroup();
if (!group)
return false;
Window* window = desktop->WindowForClientLooperPort(port);
if (!window) {
reply.StartMessage(B_BAD_VALUE);
reply.Flush();
break;
}
SATWindow* candidate = stackAndTile->GetSATWindow(window);
if (!candidate)
return false;
if (!group->RemoveWindow(candidate, false))
return false;
break;
}
case kRemoveWindowFromStackAt:
{
int32 position;
if (link.Read<int32>(&position) != B_OK)
return false;
SATGroup* group = sender->GetGroup();
WindowArea* area = sender->GetWindowArea();
if (!area || !group)
return false;
SATWindow* removeWindow = area->WindowList().ItemAt(position);
if (!removeWindow) {
reply.StartMessage(B_BAD_VALUE);
reply.Flush();
break;
}
if (!group->RemoveWindow(removeWindow, false))
return false;
ServerWindow* window = removeWindow->GetWindow()->ServerWindow();
reply.StartMessage(B_OK);
reply.Attach<port_id>(window->ClientLooperPort());
reply.Attach<int32>(window->ClientToken());
reply.Attach<team_id>(window->ClientTeam());
reply.Flush();
break;
}
case kCountWindowsOnStack:
{
WindowArea* area = sender->GetWindowArea();
if (!area)
return false;
reply.StartMessage(B_OK);
reply.Attach<int32>(area->WindowList().CountItems());
reply.Flush();
break;
}
case kWindowOnStackAt:
{
int32 position;
if (link.Read<int32>(&position) != B_OK)
return false;
WindowArea* area = sender->GetWindowArea();
if (!area)
return false;
SATWindow* satWindow = area->WindowList().ItemAt(position);
if (!satWindow) {
reply.StartMessage(B_BAD_VALUE);
reply.Flush();
break;
}
ServerWindow* window = satWindow->GetWindow()->ServerWindow();
reply.StartMessage(B_OK);
reply.Attach<port_id>(window->ClientLooperPort());
reply.Attach<int32>(window->ClientToken());
reply.Attach<team_id>(window->ClientTeam());
reply.Flush();
break;
}
case kStackHasWindow:
{
port_id port;
int32 token;
team_id team;
link.Read<port_id>(&port);
link.Read<int32>(&token);
if (link.Read<team_id>(&team) != B_OK)
return false;
Window* window = desktop->WindowForClientLooperPort(port);
if (!window) {
reply.StartMessage(B_BAD_VALUE);
reply.Flush();
break;
}
SATWindow* candidate = stackAndTile->GetSATWindow(window);
if (!candidate)
return false;
WindowArea* area = sender->GetWindowArea();
if (!area)
return false;
reply.StartMessage(B_OK);
reply.Attach<bool>(area->WindowList().HasItem(candidate));
reply.Flush();
break;
}
default:
return false;
}
return true;
}
SATStacking::SATStacking(SATWindow* window)
:
fSATWindow(window),
fStackingParent(NULL)
{
}
SATStacking::~SATStacking()
{
}
bool
SATStacking::FindSnappingCandidates(SATGroup* group)
{
_ClearSearchResult();
Window* window = fSATWindow->GetWindow();
if (!window->Decorator())
return false;
BPoint mousePosition;
int32 buttons;
fSATWindow->GetDesktop()->GetLastMouseState(&mousePosition, &buttons);
if (!window->Decorator()->TitleBarRect().Contains(mousePosition))
return false;
// use the upper edge of the candidate window to find the parent window
mousePosition.y = window->Decorator()->TitleBarRect().top;
for (int i = 0; i < group->CountItems(); i++) {
SATWindow* satWindow = group->WindowAt(i);
// search for stacking parent
Window* parentWindow = satWindow->GetWindow();
if (parentWindow == window || parentWindow->Decorator() == NULL)
continue;
if (_IsStackableWindow(parentWindow) == false
|| _IsStackableWindow(window) == false)
continue;
Decorator::Tab* tab = parentWindow->Decorator()->TabAt(
parentWindow->PositionInStack());
if (tab == NULL)
continue;
if (tab->tabRect.Contains(mousePosition)) {
// remember window as the parent for stacking
fStackingParent = satWindow;
_HighlightWindows(true);
return true;
}
}
return false;
}
bool
SATStacking::JoinCandidates()
{
if (!fStackingParent)
return false;
bool result = fStackingParent->StackWindow(fSATWindow);
_ClearSearchResult();
return result;
}
void
SATStacking::RemovedFromArea(WindowArea* area)
{
const SATWindowList& list = area->WindowList();
if (list.CountItems() > 0)
list.ItemAt(0)->DoGroupLayout();
}
void
SATStacking::WindowLookChanged(window_look look)
{
Window* window = fSATWindow->GetWindow();
WindowStack* stack = window->GetWindowStack();
if (stack == NULL)
return;
SATGroup* group = fSATWindow->GetGroup();
if (group == NULL)
return;
if (stack->CountWindows() > 1 && _IsStackableWindow(window) == false)
group->RemoveWindow(fSATWindow);
}
bool
SATStacking::_IsStackableWindow(Window* window)
{
if (window->Look() == B_DOCUMENT_WINDOW_LOOK)
return true;
if (window->Look() == B_TITLED_WINDOW_LOOK)
return true;
return false;
}
void
SATStacking::_ClearSearchResult()
{
if (!fStackingParent)
return;
_HighlightWindows(false);
fStackingParent = NULL;
}
void
SATStacking::_HighlightWindows(bool highlight)
{
Desktop* desktop = fSATWindow->GetWindow()->Desktop();
if (!desktop)
return;
fStackingParent->HighlightTab(highlight);
fSATWindow->HighlightTab(highlight);
}
|
/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
file Copyright.txt or https://cmake.org/licensing for details. */
#include "cmSystemTools.h"
#include "cmAlgorithms.h"
#include "cmProcessOutput.h"
#include "cm_sys_stat.h"
#if defined(CMAKE_BUILD_WITH_CMAKE)
#include "cmArchiveWrite.h"
#include "cmLocale.h"
#include "cm_libarchive.h"
#ifndef __LA_INT64_T
#define __LA_INT64_T la_int64_t
#endif
#endif
#if defined(CMAKE_BUILD_WITH_CMAKE)
#include "cmCryptoHash.h"
#endif
#if defined(CMAKE_USE_ELF_PARSER)
#include "cmELF.h"
#endif
#if defined(CMAKE_USE_MACH_PARSER)
#include "cmMachO.h"
#endif
#include "cmsys/Directory.hxx"
#include "cmsys/Encoding.hxx"
#include "cmsys/FStream.hxx"
#include "cmsys/RegularExpression.hxx"
#include "cmsys/System.h"
#include "cmsys/Terminal.h"
#include <algorithm>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <iostream>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <utility>
#if defined(_WIN32)
#include <windows.h>
// include wincrypt.h after windows.h
#include <wincrypt.h>
#else
#include <sys/time.h>
#include <unistd.h>
#include <utime.h>
#endif
#if defined(_WIN32) && \
(defined(_MSC_VER) || defined(__WATCOMC__) || defined(__MINGW32__))
#include <io.h>
#endif
#if defined(__APPLE__)
#include <mach-o/dyld.h>
#endif
#ifdef __QNX__
#include <malloc.h> /* for malloc/free on QNX */
#endif
static bool cm_isspace(char c)
{
return ((c & 0x80) == 0) && isspace(c);
}
class cmSystemToolsFileTime
{
public:
#if defined(_WIN32) && !defined(__CYGWIN__)
FILETIME timeCreation;
FILETIME timeLastAccess;
FILETIME timeLastWrite;
#else
struct utimbuf timeBuf;
#endif
};
#if !defined(HAVE_ENVIRON_NOT_REQUIRE_PROTOTYPE)
// For GetEnvironmentVariables
#if defined(_WIN32)
extern __declspec(dllimport) char** environ;
#else
extern char** environ;
#endif
#endif
#if defined(CMAKE_BUILD_WITH_CMAKE)
static std::string cm_archive_entry_pathname(struct archive_entry* entry)
{
#if cmsys_STL_HAS_WSTRING
return cmsys::Encoding::ToNarrow(archive_entry_pathname_w(entry));
#else
return archive_entry_pathname(entry);
#endif
}
static int cm_archive_read_open_file(struct archive* a, const char* file,
int block_size)
{
#if cmsys_STL_HAS_WSTRING
std::wstring wfile = cmsys::Encoding::ToWide(file);
return archive_read_open_filename_w(a, wfile.c_str(), block_size);
#else
return archive_read_open_filename(a, file, block_size);
#endif
}
#endif
#ifdef _WIN32
class cmSystemToolsWindowsHandle
{
public:
cmSystemToolsWindowsHandle(HANDLE h)
: handle_(h)
{
}
~cmSystemToolsWindowsHandle()
{
if (this->handle_ != INVALID_HANDLE_VALUE) {
CloseHandle(this->handle_);
}
}
operator bool() const { return this->handle_ != INVALID_HANDLE_VALUE; }
bool operator!() const { return this->handle_ == INVALID_HANDLE_VALUE; }
operator HANDLE() const { return this->handle_; }
private:
HANDLE handle_;
};
#elif defined(__APPLE__)
#include <crt_externs.h>
#define environ (*_NSGetEnviron())
#endif
bool cmSystemTools::s_RunCommandHideConsole = false;
bool cmSystemTools::s_DisableRunCommandOutput = false;
bool cmSystemTools::s_ErrorOccured = false;
bool cmSystemTools::s_FatalErrorOccured = false;
bool cmSystemTools::s_DisableMessages = false;
bool cmSystemTools::s_ForceUnixPaths = false;
cmSystemTools::MessageCallback cmSystemTools::s_MessageCallback;
cmSystemTools::OutputCallback cmSystemTools::s_StdoutCallback;
cmSystemTools::OutputCallback cmSystemTools::s_StderrCallback;
cmSystemTools::InterruptCallback cmSystemTools::s_InterruptCallback;
void* cmSystemTools::s_MessageCallbackClientData;
void* cmSystemTools::s_StdoutCallbackClientData;
void* cmSystemTools::s_StderrCallbackClientData;
void* cmSystemTools::s_InterruptCallbackClientData;
// replace replace with with as many times as it shows up in source.
// write the result into source.
#if defined(_WIN32) && !defined(__CYGWIN__)
void cmSystemTools::ExpandRegistryValues(std::string& source, KeyWOW64 view)
{
// Regular expression to match anything inside [...] that begins in HKEY.
// Note that there is a special rule for regular expressions to match a
// close square-bracket inside a list delimited by square brackets.
// The "[^]]" part of this expression will match any character except
// a close square-bracket. The ']' character must be the first in the
// list of characters inside the [^...] block of the expression.
cmsys::RegularExpression regEntry("\\[(HKEY[^]]*)\\]");
// check for black line or comment
while (regEntry.find(source)) {
// the arguments are the second match
std::string key = regEntry.match(1);
std::string val;
if (ReadRegistryValue(key.c_str(), val, view)) {
std::string reg = "[";
reg += key + "]";
cmSystemTools::ReplaceString(source, reg.c_str(), val.c_str());
} else {
std::string reg = "[";
reg += key + "]";
cmSystemTools::ReplaceString(source, reg.c_str(), "/registry");
}
}
}
#else
void cmSystemTools::ExpandRegistryValues(std::string& source,
KeyWOW64 /*unused*/)
{
cmsys::RegularExpression regEntry("\\[(HKEY[^]]*)\\]");
while (regEntry.find(source)) {
// the arguments are the second match
std::string key = regEntry.match(1);
std::string reg = "[";
reg += key + "]";
cmSystemTools::ReplaceString(source, reg.c_str(), "/registry");
}
}
#endif
std::string cmSystemTools::EscapeQuotes(const std::string& str)
{
std::string result;
result.reserve(str.size());
for (const char* ch = str.c_str(); *ch != '\0'; ++ch) {
if (*ch == '"') {
result += '\\';
}
result += *ch;
}
return result;
}
std::string cmSystemTools::HelpFileName(std::string name)
{
cmSystemTools::ReplaceString(name, "<", "");
cmSystemTools::ReplaceString(name, ">", "");
return name;
}
std::string cmSystemTools::TrimWhitespace(const std::string& s)
{
std::string::const_iterator start = s.begin();
while (start != s.end() && cm_isspace(*start)) {
++start;
}
if (start == s.end()) {
return "";
}
std::string::const_iterator stop = s.end() - 1;
while (cm_isspace(*stop)) {
--stop;
}
return std::string(start, stop + 1);
}
void cmSystemTools::Error(const char* m1, const char* m2, const char* m3,
const char* m4)
{
std::string message = "CMake Error: ";
if (m1) {
message += m1;
}
if (m2) {
message += m2;
}
if (m3) {
message += m3;
}
if (m4) {
message += m4;
}
cmSystemTools::s_ErrorOccured = true;
cmSystemTools::Message(message.c_str(), "Error");
}
void cmSystemTools::SetInterruptCallback(InterruptCallback f, void* clientData)
{
s_InterruptCallback = f;
s_InterruptCallbackClientData = clientData;
}
bool cmSystemTools::GetInterruptFlag()
{
if (s_InterruptCallback) {
return (*s_InterruptCallback)(s_InterruptCallbackClientData);
}
return false;
}
void cmSystemTools::SetMessageCallback(MessageCallback f, void* clientData)
{
s_MessageCallback = f;
s_MessageCallbackClientData = clientData;
}
void cmSystemTools::SetStdoutCallback(OutputCallback f, void* clientData)
{
s_StdoutCallback = f;
s_StdoutCallbackClientData = clientData;
}
void cmSystemTools::SetStderrCallback(OutputCallback f, void* clientData)
{
s_StderrCallback = f;
s_StderrCallbackClientData = clientData;
}
void cmSystemTools::Stdout(const char* s)
{
cmSystemTools::Stdout(s, strlen(s));
}
void cmSystemTools::Stderr(const char* s)
{
cmSystemTools::Stderr(s, strlen(s));
}
void cmSystemTools::Stderr(const char* s, size_t length)
{
if (s_StderrCallback) {
(*s_StderrCallback)(s, length, s_StderrCallbackClientData);
} else {
std::cerr.write(s, length);
std::cerr.flush();
}
}
void cmSystemTools::Stdout(const char* s, size_t length)
{
if (s_StdoutCallback) {
(*s_StdoutCallback)(s, length, s_StdoutCallbackClientData);
} else {
std::cout.write(s, length);
std::cout.flush();
}
}
void cmSystemTools::Message(const char* m1, const char* title)
{
if (s_DisableMessages) {
return;
}
if (s_MessageCallback) {
(*s_MessageCallback)(m1, title, s_DisableMessages,
s_MessageCallbackClientData);
return;
}
std::cerr << m1 << std::endl << std::flush;
}
void cmSystemTools::ReportLastSystemError(const char* msg)
{
std::string m = msg;
m += ": System Error: ";
m += Superclass::GetLastSystemError();
cmSystemTools::Error(m.c_str());
}
bool cmSystemTools::IsInternallyOn(const char* val)
{
if (!val) {
return false;
}
std::string v = val;
if (v.size() > 4) {
return false;
}
for (std::string::iterator c = v.begin(); c != v.end(); c++) {
*c = static_cast<char>(toupper(*c));
}
return v == "I_ON";
}
bool cmSystemTools::IsOn(const char* val)
{
if (!val) {
return false;
}
size_t len = strlen(val);
if (len > 4) {
return false;
}
std::string v(val, len);
static std::set<std::string> onValues;
if (onValues.empty()) {
onValues.insert("ON");
onValues.insert("1");
onValues.insert("YES");
onValues.insert("TRUE");
onValues.insert("Y");
}
for (std::string::iterator c = v.begin(); c != v.end(); c++) {
*c = static_cast<char>(toupper(*c));
}
return (onValues.count(v) > 0);
}
bool cmSystemTools::IsNOTFOUND(const char* val)
{
if (strcmp(val, "NOTFOUND") == 0) {
return true;
}
return cmHasLiteralSuffix(val, "-NOTFOUND");
}
bool cmSystemTools::IsOff(const char* val)
{
if (!val || !*val) {
return true;
}
size_t len = strlen(val);
// Try and avoid toupper() for large strings.
if (len > 6) {
return cmSystemTools::IsNOTFOUND(val);
}
static std::set<std::string> offValues;
if (offValues.empty()) {
offValues.insert("OFF");
offValues.insert("0");
offValues.insert("NO");
offValues.insert("FALSE");
offValues.insert("N");
offValues.insert("IGNORE");
}
// Try and avoid toupper().
std::string v(val, len);
for (std::string::iterator c = v.begin(); c != v.end(); c++) {
*c = static_cast<char>(toupper(*c));
}
return (offValues.count(v) > 0);
}
void cmSystemTools::ParseWindowsCommandLine(const char* command,
std::vector<std::string>& args)
{
// See the MSDN document "Parsing C Command-Line Arguments" at
// http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx for rules
// of parsing the windows command line.
bool in_argument = false;
bool in_quotes = false;
int backslashes = 0;
std::string arg;
for (const char* c = command; *c; ++c) {
if (*c == '\\') {
++backslashes;
in_argument = true;
} else if (*c == '"') {
int backslash_pairs = backslashes >> 1;
int backslash_escaped = backslashes & 1;
arg.append(backslash_pairs, '\\');
backslashes = 0;
if (backslash_escaped) {
/* An odd number of backslashes precede this quote.
It is escaped. */
arg.append(1, '"');
} else {
/* An even number of backslashes precede this quote.
It is not escaped. */
in_quotes = !in_quotes;
}
in_argument = true;
} else {
arg.append(backslashes, '\\');
backslashes = 0;
if (cm_isspace(*c)) {
if (in_quotes) {
arg.append(1, *c);
} else if (in_argument) {
args.push_back(arg);
arg = "";
in_argument = false;
}
} else {
in_argument = true;
arg.append(1, *c);
}
}
}
arg.append(backslashes, '\\');
if (in_argument) {
args.push_back(arg);
}
}
class cmSystemToolsArgV
{
char** ArgV;
public:
cmSystemToolsArgV(char** argv)
: ArgV(argv)
{
}
~cmSystemToolsArgV()
{
for (char** arg = this->ArgV; arg && *arg; ++arg) {
free(*arg);
}
free(this->ArgV);
}
void Store(std::vector<std::string>& args) const
{
for (char** arg = this->ArgV; arg && *arg; ++arg) {
args.push_back(*arg);
}
}
};
void cmSystemTools::ParseUnixCommandLine(const char* command,
std::vector<std::string>& args)
{
// Invoke the underlying parser.
cmSystemToolsArgV argv = cmsysSystem_Parse_CommandForUnix(command, 0);
argv.Store(args);
}
std::vector<std::string> cmSystemTools::HandleResponseFile(
std::vector<std::string>::const_iterator argBeg,
std::vector<std::string>::const_iterator argEnd)
{
std::vector<std::string> arg_full;
for (std::vector<std::string>::const_iterator a = argBeg; a != argEnd; ++a) {
std::string const& arg = *a;
if (cmHasLiteralPrefix(arg, "@")) {
cmsys::ifstream responseFile(arg.substr(1).c_str(), std::ios::in);
if (!responseFile) {
std::string error = "failed to open for reading (";
error += cmSystemTools::GetLastSystemError();
error += "):\n ";
error += arg.substr(1);
cmSystemTools::Error(error.c_str());
} else {
std::string line;
cmSystemTools::GetLineFromStream(responseFile, line);
std::vector<std::string> args2;
#ifdef _WIN32
cmSystemTools::ParseWindowsCommandLine(line.c_str(), args2);
#else
cmSystemTools::ParseUnixCommandLine(line.c_str(), args2);
#endif
arg_full.insert(arg_full.end(), args2.begin(), args2.end());
}
} else {
arg_full.push_back(arg);
}
}
return arg_full;
}
std::vector<std::string> cmSystemTools::ParseArguments(const char* command)
{
std::vector<std::string> args;
std::string arg;
bool win_path = false;
if ((command[0] != '/' && command[1] == ':' && command[2] == '\\') ||
(command[0] == '\"' && command[1] != '/' && command[2] == ':' &&
command[3] == '\\') ||
(command[0] == '\'' && command[1] != '/' && command[2] == ':' &&
command[3] == '\\') ||
(command[0] == '\\' && command[1] == '\\')) {
win_path = true;
}
// Split the command into an argv array.
for (const char* c = command; *c;) {
// Skip over whitespace.
while (*c == ' ' || *c == '\t') {
++c;
}
arg = "";
if (*c == '"') {
// Parse a quoted argument.
++c;
while (*c && *c != '"') {
arg.append(1, *c);
++c;
}
if (*c) {
++c;
}
args.push_back(arg);
} else if (*c == '\'') {
// Parse a quoted argument.
++c;
while (*c && *c != '\'') {
arg.append(1, *c);
++c;
}
if (*c) {
++c;
}
args.push_back(arg);
} else if (*c) {
// Parse an unquoted argument.
while (*c && *c != ' ' && *c != '\t') {
if (*c == '\\' && !win_path) {
++c;
if (*c) {
arg.append(1, *c);
++c;
}
} else {
arg.append(1, *c);
++c;
}
}
args.push_back(arg);
}
}
return args;
}
size_t cmSystemTools::CalculateCommandLineLengthLimit()
{
size_t sz =
#ifdef _WIN32
// There's a maximum of 65536 bytes and thus 32768 WCHARs on Windows
// However, cmd.exe itself can only handle 8191 WCHARs and Ninja for
// example uses it to spawn processes.
size_t(8191);
#elif defined(__linux)
// MAX_ARG_STRLEN is the maximum length of a string permissible for
// the execve() syscall on Linux. It's defined as (PAGE_SIZE * 32)
// in Linux's binfmts.h
static_cast<size_t>(sysconf(_SC_PAGESIZE) * 32);
#else
size_t(0);
#endif
#if defined(_SC_ARG_MAX)
// ARG_MAX is the maximum size of the command and environment
// that can be passed to the exec functions on UNIX.
// The value in limits.h does not need to be present and may
// depend upon runtime memory constraints, hence sysconf()
// should be used to query it.
long szArgMax = sysconf(_SC_ARG_MAX);
// A return value of -1 signifies an undetermined limit, but
// it does not imply an infinite limit, and thus is ignored.
if (szArgMax != -1) {
// We estimate the size of the environment block to be 1000.
// This isn't accurate at all, but leaves some headroom.
szArgMax = szArgMax < 1000 ? 0 : szArgMax - 1000;
#if defined(_WIN32) || defined(__linux)
sz = std::min(sz, static_cast<size_t>(szArgMax));
#else
sz = static_cast<size_t>(szArgMax);
#endif
}
#endif
return sz;
}
bool cmSystemTools::RunSingleCommand(std::vector<std::string> const& command,
std::string* captureStdOut,
std::string* captureStdErr, int* retVal,
const char* dir, OutputOption outputflag,
double timeout, Encoding encoding)
{
std::vector<const char*> argv;
for (std::vector<std::string>::const_iterator a = command.begin();
a != command.end(); ++a) {
argv.push_back(a->c_str());
}
argv.push_back(CM_NULLPTR);
cmsysProcess* cp = cmsysProcess_New();
cmsysProcess_SetCommand(cp, &*argv.begin());
cmsysProcess_SetWorkingDirectory(cp, dir);
if (cmSystemTools::GetRunCommandHideConsole()) {
cmsysProcess_SetOption(cp, cmsysProcess_Option_HideWindow, 1);
}
if (outputflag == OUTPUT_PASSTHROUGH) {
cmsysProcess_SetPipeShared(cp, cmsysProcess_Pipe_STDOUT, 1);
cmsysProcess_SetPipeShared(cp, cmsysProcess_Pipe_STDERR, 1);
captureStdOut = CM_NULLPTR;
captureStdErr = CM_NULLPTR;
} else if (outputflag == OUTPUT_MERGE ||
(captureStdErr && captureStdErr == captureStdOut)) {
cmsysProcess_SetOption(cp, cmsysProcess_Option_MergeOutput, 1);
captureStdErr = CM_NULLPTR;
}
assert(!captureStdErr || captureStdErr != captureStdOut);
cmsysProcess_SetTimeout(cp, timeout);
cmsysProcess_Execute(cp);
std::vector<char> tempStdOut;
std::vector<char> tempStdErr;
char* data;
int length;
int pipe;
cmProcessOutput processOutput(encoding);
std::string strdata;
if (outputflag != OUTPUT_PASSTHROUGH &&
(captureStdOut || captureStdErr || outputflag != OUTPUT_NONE)) {
while ((pipe = cmsysProcess_WaitForData(cp, &data, &length, CM_NULLPTR)) >
0) {
// Translate NULL characters in the output into valid text.
for (int i = 0; i < length; ++i) {
if (data[i] == '\0') {
data[i] = ' ';
}
}
if (pipe == cmsysProcess_Pipe_STDOUT) {
if (outputflag != OUTPUT_NONE) {
processOutput.DecodeText(data, length, strdata, 1);
cmSystemTools::Stdout(strdata.c_str(), strdata.size());
}
if (captureStdOut) {
tempStdOut.insert(tempStdOut.end(), data, data + length);
}
} else if (pipe == cmsysProcess_Pipe_STDERR) {
if (outputflag != OUTPUT_NONE) {
processOutput.DecodeText(data, length, strdata, 2);
cmSystemTools::Stderr(strdata.c_str(), strdata.size());
}
if (captureStdErr) {
tempStdErr.insert(tempStdErr.end(), data, data + length);
}
}
}
if (outputflag != OUTPUT_NONE) {
processOutput.DecodeText(std::string(), strdata, 1);
if (!strdata.empty()) {
cmSystemTools::Stdout(strdata.c_str(), strdata.size());
}
processOutput.DecodeText(std::string(), strdata, 2);
if (!strdata.empty()) {
cmSystemTools::Stderr(strdata.c_str(), strdata.size());
}
}
}
cmsysProcess_WaitForExit(cp, CM_NULLPTR);
if (captureStdOut) {
captureStdOut->assign(tempStdOut.begin(), tempStdOut.end());
processOutput.DecodeText(*captureStdOut, *captureStdOut);
}
if (captureStdErr) {
captureStdErr->assign(tempStdErr.begin(), tempStdErr.end());
processOutput.DecodeText(*captureStdErr, *captureStdErr);
}
bool result = true;
if (cmsysProcess_GetState(cp) == cmsysProcess_State_Exited) {
if (retVal) {
*retVal = cmsysProcess_GetExitValue(cp);
} else {
if (cmsysProcess_GetExitValue(cp) != 0) {
result = false;
}
}
} else if (cmsysProcess_GetState(cp) == cmsysProcess_State_Exception) {
const char* exception_str = cmsysProcess_GetExceptionString(cp);
if (outputflag != OUTPUT_NONE) {
std::cerr << exception_str << std::endl;
}
if (captureStdErr) {
captureStdErr->append(exception_str, strlen(exception_str));
}
result = false;
} else if (cmsysProcess_GetState(cp) == cmsysProcess_State_Error) {
const char* error_str = cmsysProcess_GetErrorString(cp);
if (outputflag != OUTPUT_NONE) {
std::cerr << error_str << std::endl;
}
if (captureStdErr) {
captureStdErr->append(error_str, strlen(error_str));
}
result = false;
} else if (cmsysProcess_GetState(cp) == cmsysProcess_State_Expired) {
const char* error_str = "Process terminated due to timeout\n";
if (outputflag != OUTPUT_NONE) {
std::cerr << error_str << std::endl;
}
if (captureStdErr) {
captureStdErr->append(error_str, strlen(error_str));
}
result = false;
}
cmsysProcess_Delete(cp);
return result;
}
bool cmSystemTools::RunSingleCommand(const char* command,
std::string* captureStdOut,
std::string* captureStdErr, int* retVal,
const char* dir, OutputOption outputflag,
double timeout)
{
if (s_DisableRunCommandOutput) {
outputflag = OUTPUT_NONE;
}
std::vector<std::string> args = cmSystemTools::ParseArguments(command);
if (args.empty()) {
return false;
}
return cmSystemTools::RunSingleCommand(args, captureStdOut, captureStdErr,
retVal, dir, outputflag, timeout);
}
std::string cmSystemTools::PrintSingleCommand(
std::vector<std::string> const& command)
{
if (command.empty()) {
return std::string();
}
return cmWrap('"', command, '"', " ");
}
bool cmSystemTools::DoesFileExistWithExtensions(
const char* name, const std::vector<std::string>& headerExts)
{
std::string hname;
for (std::vector<std::string>::const_iterator ext = headerExts.begin();
ext != headerExts.end(); ++ext) {
hname = name;
hname += ".";
hname += *ext;
if (cmSystemTools::FileExists(hname.c_str())) {
return true;
}
}
return false;
}
std::string cmSystemTools::FileExistsInParentDirectories(const char* fname,
const char* directory,
const char* toplevel)
{
std::string file = fname;
cmSystemTools::ConvertToUnixSlashes(file);
std::string dir = directory;
cmSystemTools::ConvertToUnixSlashes(dir);
std::string prevDir;
while (dir != prevDir) {
std::string path = dir + "/" + file;
if (cmSystemTools::FileExists(path.c_str())) {
return path;
}
if (dir.size() < strlen(toplevel)) {
break;
}
prevDir = dir;
dir = cmSystemTools::GetParentDirectory(dir);
}
return "";
}
bool cmSystemTools::cmCopyFile(const char* source, const char* destination)
{
return Superclass::CopyFileAlways(source, destination);
}
bool cmSystemTools::CopyFileIfDifferent(const char* source,
const char* destination)
{
return Superclass::CopyFileIfDifferent(source, destination);
}
#ifdef _WIN32
cmSystemTools::WindowsFileRetry cmSystemTools::GetWindowsFileRetry()
{
static WindowsFileRetry retry = { 0, 0 };
if (!retry.Count) {
unsigned int data[2] = { 0, 0 };
HKEY const keys[2] = { HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE };
wchar_t const* const values[2] = { L"FilesystemRetryCount",
L"FilesystemRetryDelay" };
for (int k = 0; k < 2; ++k) {
HKEY hKey;
if (RegOpenKeyExW(keys[k], L"Software\\Kitware\\CMake\\Config", 0,
KEY_QUERY_VALUE, &hKey) == ERROR_SUCCESS) {
for (int v = 0; v < 2; ++v) {
DWORD dwData, dwType, dwSize = 4;
if (!data[v] &&
RegQueryValueExW(hKey, values[v], 0, &dwType, (BYTE*)&dwData,
&dwSize) == ERROR_SUCCESS &&
dwType == REG_DWORD && dwSize == 4) {
data[v] = static_cast<unsigned int>(dwData);
}
}
RegCloseKey(hKey);
}
}
retry.Count = data[0] ? data[0] : 5;
retry.Delay = data[1] ? data[1] : 500;
}
return retry;
}
#endif
bool cmSystemTools::RenameFile(const char* oldname, const char* newname)
{
#ifdef _WIN32
#ifndef INVALID_FILE_ATTRIBUTES
#define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
#endif
/* Windows MoveFileEx may not replace read-only or in-use files. If it
fails then remove the read-only attribute from any existing destination.
Try multiple times since we may be racing against another process
creating/opening the destination file just before our MoveFileEx. */
WindowsFileRetry retry = cmSystemTools::GetWindowsFileRetry();
while (
!MoveFileExW(SystemTools::ConvertToWindowsExtendedPath(oldname).c_str(),
SystemTools::ConvertToWindowsExtendedPath(newname).c_str(),
MOVEFILE_REPLACE_EXISTING) &&
--retry.Count) {
DWORD last_error = GetLastError();
// Try again only if failure was due to access/sharing permissions.
if (last_error != ERROR_ACCESS_DENIED &&
last_error != ERROR_SHARING_VIOLATION) {
return false;
}
DWORD attrs = GetFileAttributesW(
SystemTools::ConvertToWindowsExtendedPath(newname).c_str());
if ((attrs != INVALID_FILE_ATTRIBUTES) &&
(attrs & FILE_ATTRIBUTE_READONLY)) {
// Remove the read-only attribute from the destination file.
SetFileAttributesW(
SystemTools::ConvertToWindowsExtendedPath(newname).c_str(),
attrs & ~FILE_ATTRIBUTE_READONLY);
} else {
// The file may be temporarily in use so wait a bit.
cmSystemTools::Delay(retry.Delay);
}
}
return retry.Count > 0;
#else
/* On UNIX we have an OS-provided call to do this atomically. */
return rename(oldname, newname) == 0;
#endif
}
bool cmSystemTools::ComputeFileMD5(const std::string& source, char* md5out)
{
#if defined(CMAKE_BUILD_WITH_CMAKE)
cmCryptoHash md5(cmCryptoHash::AlgoMD5);
std::string const str = md5.HashFile(source);
strncpy(md5out, str.c_str(), 32);
return !str.empty();
#else
(void)source;
(void)md5out;
cmSystemTools::Message("md5sum not supported in bootstrapping mode",
"Error");
return false;
#endif
}
std::string cmSystemTools::ComputeStringMD5(const std::string& input)
{
#if defined(CMAKE_BUILD_WITH_CMAKE)
cmCryptoHash md5(cmCryptoHash::AlgoMD5);
return md5.HashString(input);
#else
(void)input;
cmSystemTools::Message("md5sum not supported in bootstrapping mode",
"Error");
return "";
#endif
}
std::string cmSystemTools::ComputeCertificateThumbprint(
const std::string& source)
{
std::string thumbprint;
#if defined(CMAKE_BUILD_WITH_CMAKE) && defined(_WIN32)
BYTE* certData = NULL;
CRYPT_INTEGER_BLOB cryptBlob;
HCERTSTORE certStore = NULL;
PCCERT_CONTEXT certContext = NULL;
HANDLE certFile = CreateFileW(
cmsys::Encoding::ToWide(source.c_str()).c_str(), GENERIC_READ,
FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (certFile != INVALID_HANDLE_VALUE && certFile != NULL) {
DWORD fileSize = GetFileSize(certFile, NULL);
if (fileSize != INVALID_FILE_SIZE) {
certData = new BYTE[fileSize];
if (certData != NULL) {
DWORD dwRead = 0;
if (ReadFile(certFile, certData, fileSize, &dwRead, NULL)) {
cryptBlob.cbData = fileSize;
cryptBlob.pbData = certData;
// Verify that this is a valid cert
if (PFXIsPFXBlob(&cryptBlob)) {
// Open the certificate as a store
certStore = PFXImportCertStore(&cryptBlob, NULL, CRYPT_EXPORTABLE);
if (certStore != NULL) {
// There should only be 1 cert.
certContext =
CertEnumCertificatesInStore(certStore, certContext);
if (certContext != NULL) {
// The hash is 20 bytes
BYTE hashData[20];
DWORD hashLength = 20;
// Buffer to print the hash. Each byte takes 2 chars +
// terminating character
char hashPrint[41];
char* pHashPrint = hashPrint;
// Get the hash property from the certificate
if (CertGetCertificateContextProperty(
certContext, CERT_HASH_PROP_ID, hashData, &hashLength)) {
for (DWORD i = 0; i < hashLength; i++) {
// Convert each byte to hexadecimal
sprintf(pHashPrint, "%02X", hashData[i]);
pHashPrint += 2;
}
*pHashPrint = '\0';
thumbprint = hashPrint;
}
CertFreeCertificateContext(certContext);
}
CertCloseStore(certStore, 0);
}
}
}
delete[] certData;
}
}
CloseHandle(certFile);
}
#else
(void)source;
cmSystemTools::Message("ComputeCertificateThumbprint is not implemented",
"Error");
#endif
return thumbprint;
}
void cmSystemTools::Glob(const std::string& directory,
const std::string& regexp,
std::vector<std::string>& files)
{
cmsys::Directory d;
cmsys::RegularExpression reg(regexp.c_str());
if (d.Load(directory)) {
size_t numf;
unsigned int i;
numf = d.GetNumberOfFiles();
for (i = 0; i < numf; i++) {
std::string fname = d.GetFile(i);
if (reg.find(fname)) {
files.push_back(fname);
}
}
}
}
void cmSystemTools::GlobDirs(const std::string& path,
std::vector<std::string>& files)
{
std::string::size_type pos = path.find("/*");
if (pos == std::string::npos) {
files.push_back(path);
return;
}
std::string startPath = path.substr(0, pos);
std::string finishPath = path.substr(pos + 2);
cmsys::Directory d;
if (d.Load(startPath)) {
for (unsigned int i = 0; i < d.GetNumberOfFiles(); ++i) {
if ((std::string(d.GetFile(i)) != ".") &&
(std::string(d.GetFile(i)) != "..")) {
std::string fname = startPath;
fname += "/";
fname += d.GetFile(i);
if (cmSystemTools::FileIsDirectory(fname)) {
fname += finishPath;
cmSystemTools::GlobDirs(fname, files);
}
}
}
}
}
void cmSystemTools::ExpandList(std::vector<std::string> const& arguments,
std::vector<std::string>& newargs)
{
std::vector<std::string>::const_iterator i;
for (i = arguments.begin(); i != arguments.end(); ++i) {
cmSystemTools::ExpandListArgument(*i, newargs);
}
}
void cmSystemTools::ExpandListArgument(const std::string& arg,
std::vector<std::string>& newargs,
bool emptyArgs)
{
// If argument is empty, it is an empty list.
if (!emptyArgs && arg.empty()) {
return;
}
// if there are no ; in the name then just copy the current string
if (arg.find(';') == std::string::npos) {
newargs.push_back(arg);
return;
}
std::string newArg;
const char* last = arg.c_str();
// Break the string at non-escaped semicolons not nested in [].
int squareNesting = 0;
for (const char* c = last; *c; ++c) {
switch (*c) {
case '\\': {
// We only want to allow escaping of semicolons. Other
// escapes should not be processed here.
const char* next = c + 1;
if (*next == ';') {
newArg.append(last, c - last);
// Skip over the escape character
last = c = next;
}
} break;
case '[': {
++squareNesting;
} break;
case ']': {
--squareNesting;
} break;
case ';': {
// Break the string here if we are not nested inside square
// brackets.
if (squareNesting == 0) {
newArg.append(last, c - last);
// Skip over the semicolon
last = c + 1;
if (!newArg.empty() || emptyArgs) {
// Add the last argument if the string is not empty.
newargs.push_back(newArg);
newArg = "";
}
}
} break;
default: {
// Just append this character.
} break;
}
}
newArg.append(last);
if (!newArg.empty() || emptyArgs) {
// Add the last argument if the string is not empty.
newargs.push_back(newArg);
}
}
bool cmSystemTools::SimpleGlob(const std::string& glob,
std::vector<std::string>& files,
int type /* = 0 */)
{
files.clear();
if (glob[glob.size() - 1] != '*') {
return false;
}
std::string path = cmSystemTools::GetFilenamePath(glob);
std::string ppath = cmSystemTools::GetFilenameName(glob);
ppath = ppath.substr(0, ppath.size() - 1);
if (path.empty()) {
path = "/";
}
bool res = false;
cmsys::Directory d;
if (d.Load(path)) {
for (unsigned int i = 0; i < d.GetNumberOfFiles(); ++i) {
if ((std::string(d.GetFile(i)) != ".") &&
(std::string(d.GetFile(i)) != "..")) {
std::string fname = path;
if (path[path.size() - 1] != '/') {
fname += "/";
}
fname += d.GetFile(i);
std::string sfname = d.GetFile(i);
if (type > 0 && cmSystemTools::FileIsDirectory(fname)) {
continue;
}
if (type < 0 && !cmSystemTools::FileIsDirectory(fname)) {
continue;
}
if (sfname.size() >= ppath.size() &&
sfname.substr(0, ppath.size()) == ppath) {
files.push_back(fname);
res = true;
}
}
}
}
return res;
}
cmSystemTools::FileFormat cmSystemTools::GetFileFormat(const char* cext)
{
if (!cext || *cext == 0) {
return cmSystemTools::NO_FILE_FORMAT;
}
// std::string ext = cmSystemTools::LowerCase(cext);
std::string ext = cext;
if (ext == "c" || ext == ".c" || ext == "m" || ext == ".m") {
return cmSystemTools::C_FILE_FORMAT;
}
if (ext == "C" || ext == ".C" || ext == "M" || ext == ".M" || ext == "c++" ||
ext == ".c++" || ext == "cc" || ext == ".cc" || ext == "cpp" ||
ext == ".cpp" || ext == "cxx" || ext == ".cxx" || ext == "mm" ||
ext == ".mm") {
return cmSystemTools::CXX_FILE_FORMAT;
}
if (ext == "f" || ext == ".f" || ext == "F" || ext == ".F" || ext == "f77" ||
ext == ".f77" || ext == "f90" || ext == ".f90" || ext == "for" ||
ext == ".for" || ext == "f95" || ext == ".f95") {
return cmSystemTools::FORTRAN_FILE_FORMAT;
}
if (ext == "java" || ext == ".java") {
return cmSystemTools::JAVA_FILE_FORMAT;
}
if (ext == "H" || ext == ".H" || ext == "h" || ext == ".h" || ext == "h++" ||
ext == ".h++" || ext == "hm" || ext == ".hm" || ext == "hpp" ||
ext == ".hpp" || ext == "hxx" || ext == ".hxx" || ext == "in" ||
ext == ".in" || ext == "txx" || ext == ".txx") {
return cmSystemTools::HEADER_FILE_FORMAT;
}
if (ext == "rc" || ext == ".rc") {
return cmSystemTools::RESOURCE_FILE_FORMAT;
}
if (ext == "def" || ext == ".def") {
return cmSystemTools::DEFINITION_FILE_FORMAT;
}
if (ext == "lib" || ext == ".lib" || ext == "a" || ext == ".a") {
return cmSystemTools::STATIC_LIBRARY_FILE_FORMAT;
}
if (ext == "o" || ext == ".o" || ext == "obj" || ext == ".obj") {
return cmSystemTools::OBJECT_FILE_FORMAT;
}
#ifdef __APPLE__
if (ext == "dylib" || ext == ".dylib") {
return cmSystemTools::SHARED_LIBRARY_FILE_FORMAT;
}
if (ext == "so" || ext == ".so" || ext == "bundle" || ext == ".bundle") {
return cmSystemTools::MODULE_FILE_FORMAT;
}
#else // __APPLE__
if (ext == "so" || ext == ".so" || ext == "sl" || ext == ".sl" ||
ext == "dll" || ext == ".dll") {
return cmSystemTools::SHARED_LIBRARY_FILE_FORMAT;
}
#endif // __APPLE__
return cmSystemTools::UNKNOWN_FILE_FORMAT;
}
bool cmSystemTools::Split(const char* s, std::vector<std::string>& l)
{
std::vector<std::string> temp;
bool res = Superclass::Split(s, temp);
l.insert(l.end(), temp.begin(), temp.end());
return res;
}
std::string cmSystemTools::ConvertToOutputPath(const char* path)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
if (s_ForceUnixPaths) {
return cmSystemTools::ConvertToUnixOutputPath(path);
}
return cmSystemTools::ConvertToWindowsOutputPath(path);
#else
return cmSystemTools::ConvertToUnixOutputPath(path);
#endif
}
void cmSystemTools::ConvertToOutputSlashes(std::string& path)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
if (!s_ForceUnixPaths) {
// Convert to windows slashes.
std::string::size_type pos = 0;
while ((pos = path.find('/', pos)) != std::string::npos) {
path[pos++] = '\\';
}
}
#else
static_cast<void>(path);
#endif
}
std::string cmSystemTools::ConvertToRunCommandPath(const char* path)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
return cmSystemTools::ConvertToWindowsOutputPath(path);
#else
return cmSystemTools::ConvertToUnixOutputPath(path);
#endif
}
// compute the relative path from here to there
std::string cmSystemTools::RelativePath(const char* local, const char* remote)
{
if (!cmSystemTools::FileIsFullPath(local)) {
cmSystemTools::Error("RelativePath must be passed a full path to local: ",
local);
}
if (!cmSystemTools::FileIsFullPath(remote)) {
cmSystemTools::Error("RelativePath must be passed a full path to remote: ",
remote);
}
return cmsys::SystemTools::RelativePath(local, remote);
}
std::string cmSystemTools::CollapseCombinedPath(std::string const& dir,
std::string const& file)
{
if (dir.empty() || dir == ".") {
return file;
}
std::vector<std::string> dirComponents;
std::vector<std::string> fileComponents;
cmSystemTools::SplitPath(dir, dirComponents);
cmSystemTools::SplitPath(file, fileComponents);
if (fileComponents.empty()) {
return dir;
}
if (fileComponents[0] != "") {
// File is not a relative path.
return file;
}
std::vector<std::string>::iterator i = fileComponents.begin() + 1;
while (i != fileComponents.end() && *i == ".." && dirComponents.size() > 1) {
++i; // Remove ".." file component.
dirComponents.pop_back(); // Remove last dir component.
}
dirComponents.insert(dirComponents.end(), i, fileComponents.end());
return cmSystemTools::JoinPath(dirComponents);
}
#ifdef CMAKE_BUILD_WITH_CMAKE
bool cmSystemTools::UnsetEnv(const char* value)
{
#if !defined(HAVE_UNSETENV)
std::string var = value;
var += "=";
return cmSystemTools::PutEnv(var.c_str());
#else
unsetenv(value);
return true;
#endif
}
std::vector<std::string> cmSystemTools::GetEnvironmentVariables()
{
std::vector<std::string> env;
int cc;
for (cc = 0; environ[cc]; ++cc) {
env.push_back(environ[cc]);
}
return env;
}
void cmSystemTools::AppendEnv(std::vector<std::string> const& env)
{
for (std::vector<std::string>::const_iterator eit = env.begin();
eit != env.end(); ++eit) {
cmSystemTools::PutEnv(*eit);
}
}
cmSystemTools::SaveRestoreEnvironment::SaveRestoreEnvironment()
{
this->Env = cmSystemTools::GetEnvironmentVariables();
}
cmSystemTools::SaveRestoreEnvironment::~SaveRestoreEnvironment()
{
// First clear everything in the current environment:
std::vector<std::string> currentEnv = GetEnvironmentVariables();
for (std::vector<std::string>::const_iterator eit = currentEnv.begin();
eit != currentEnv.end(); ++eit) {
std::string var(*eit);
std::string::size_type pos = var.find('=');
if (pos != std::string::npos) {
var = var.substr(0, pos);
}
cmSystemTools::UnsetEnv(var.c_str());
}
// Then put back each entry from the original environment:
cmSystemTools::AppendEnv(this->Env);
}
#endif
void cmSystemTools::EnableVSConsoleOutput()
{
#ifdef _WIN32
// Visual Studio 8 2005 (devenv.exe or VCExpress.exe) will not
// display output to the console unless this environment variable is
// set. We need it to capture the output of these build tools.
// Note for future work that one could pass "/out \\.\pipe\NAME" to
// either of these executables where NAME is created with
// CreateNamedPipe. This would bypass the internal buffering of the
// output and allow it to be captured on the fly.
cmSystemTools::PutEnv("vsconsoleoutput=1");
#ifdef CMAKE_BUILD_WITH_CMAKE
// VS sets an environment variable to tell MS tools like "cl" to report
// output through a backdoor pipe instead of stdout/stderr. Unset the
// environment variable to close this backdoor for any path of process
// invocations that passes through CMake so we can capture the output.
cmSystemTools::UnsetEnv("VS_UNICODE_OUTPUT");
#endif
#endif
}
bool cmSystemTools::IsPathToFramework(const char* path)
{
return (cmSystemTools::FileIsFullPath(path) &&
cmHasLiteralSuffix(path, ".framework"));
}
bool cmSystemTools::CreateTar(const char* outFileName,
const std::vector<std::string>& files,
cmTarCompression compressType, bool verbose,
std::string const& mtime,
std::string const& format)
{
#if defined(CMAKE_BUILD_WITH_CMAKE)
std::string cwd = cmSystemTools::GetCurrentWorkingDirectory();
cmsys::ofstream fout(outFileName, std::ios::out | std::ios::binary);
if (!fout) {
std::string e = "Cannot open output file \"";
e += outFileName;
e += "\": ";
e += cmSystemTools::GetLastSystemError();
cmSystemTools::Error(e.c_str());
return false;
}
cmArchiveWrite::Compress compress = cmArchiveWrite::CompressNone;
switch (compressType) {
case TarCompressGZip:
compress = cmArchiveWrite::CompressGZip;
break;
case TarCompressBZip2:
compress = cmArchiveWrite::CompressBZip2;
break;
case TarCompressXZ:
compress = cmArchiveWrite::CompressXZ;
break;
case TarCompressNone:
compress = cmArchiveWrite::CompressNone;
break;
}
cmArchiveWrite a(fout, compress, format.empty() ? "paxr" : format);
a.SetMTime(mtime);
a.SetVerbose(verbose);
for (std::vector<std::string>::const_iterator i = files.begin();
i != files.end(); ++i) {
std::string path = *i;
if (cmSystemTools::FileIsFullPath(path.c_str())) {
// Get the relative path to the file.
path = cmSystemTools::RelativePath(cwd.c_str(), path.c_str());
}
if (!a.Add(path)) {
break;
}
}
if (!a) {
cmSystemTools::Error(a.GetError().c_str());
return false;
}
return true;
#else
(void)outFileName;
(void)files;
(void)verbose;
return false;
#endif
}
#if defined(CMAKE_BUILD_WITH_CMAKE)
namespace {
#define BSDTAR_FILESIZE_PRINTF "%lu"
#define BSDTAR_FILESIZE_TYPE unsigned long
void list_item_verbose(FILE* out, struct archive_entry* entry)
{
char tmp[100];
size_t w;
const char* p;
const char* fmt;
time_t tim;
static time_t now;
size_t u_width = 6;
size_t gs_width = 13;
/*
* We avoid collecting the entire list in memory at once by
* listing things as we see them. However, that also means we can't
* just pre-compute the field widths. Instead, we start with guesses
* and just widen them as necessary. These numbers are completely
* arbitrary.
*/
if (!now) {
time(&now);
}
fprintf(out, "%s %d ", archive_entry_strmode(entry),
archive_entry_nlink(entry));
/* Use uname if it's present, else uid. */
p = archive_entry_uname(entry);
if ((p == CM_NULLPTR) || (*p == '\0')) {
sprintf(tmp, "%lu ", (unsigned long)archive_entry_uid(entry));
p = tmp;
}
w = strlen(p);
if (w > u_width) {
u_width = w;
}
fprintf(out, "%-*s ", (int)u_width, p);
/* Use gname if it's present, else gid. */
p = archive_entry_gname(entry);
if (p != CM_NULLPTR && p[0] != '\0') {
fprintf(out, "%s", p);
w = strlen(p);
} else {
sprintf(tmp, "%lu", (unsigned long)archive_entry_gid(entry));
w = strlen(tmp);
fprintf(out, "%s", tmp);
}
/*
* Print device number or file size, right-aligned so as to make
* total width of group and devnum/filesize fields be gs_width.
* If gs_width is too small, grow it.
*/
if (archive_entry_filetype(entry) == AE_IFCHR ||
archive_entry_filetype(entry) == AE_IFBLK) {
sprintf(tmp, "%lu,%lu", (unsigned long)archive_entry_rdevmajor(entry),
(unsigned long)archive_entry_rdevminor(entry));
} else {
/*
* Note the use of platform-dependent macros to format
* the filesize here. We need the format string and the
* corresponding type for the cast.
*/
sprintf(tmp, BSDTAR_FILESIZE_PRINTF,
(BSDTAR_FILESIZE_TYPE)archive_entry_size(entry));
}
if (w + strlen(tmp) >= gs_width) {
gs_width = w + strlen(tmp) + 1;
}
fprintf(out, "%*s", (int)(gs_width - w), tmp);
/* Format the time using 'ls -l' conventions. */
tim = archive_entry_mtime(entry);
#define HALF_YEAR ((time_t)365 * 86400 / 2)
#if defined(_WIN32) && !defined(__CYGWIN__)
/* Windows' strftime function does not support %e format. */
#define DAY_FMT "%d"
#else
#define DAY_FMT "%e" /* Day number without leading zeros */
#endif
if (tim < now - HALF_YEAR || tim > now + HALF_YEAR) {
fmt = DAY_FMT " %b %Y";
} else {
fmt = DAY_FMT " %b %H:%M";
}
strftime(tmp, sizeof(tmp), fmt, localtime(&tim));
fprintf(out, " %s ", tmp);
fprintf(out, "%s", cm_archive_entry_pathname(entry).c_str());
/* Extra information for links. */
if (archive_entry_hardlink(entry)) /* Hard link */
{
fprintf(out, " link to %s", archive_entry_hardlink(entry));
} else if (archive_entry_symlink(entry)) /* Symbolic link */
{
fprintf(out, " -> %s", archive_entry_symlink(entry));
}
fflush(out);
}
long copy_data(struct archive* ar, struct archive* aw)
{
long r;
const void* buff;
size_t size;
#if defined(ARCHIVE_VERSION_NUMBER) && ARCHIVE_VERSION_NUMBER >= 3000000
__LA_INT64_T offset;
#else
off_t offset;
#endif
for (;;) {
r = archive_read_data_block(ar, &buff, &size, &offset);
if (r == ARCHIVE_EOF) {
return (ARCHIVE_OK);
}
if (r != ARCHIVE_OK) {
return (r);
}
r = archive_write_data_block(aw, buff, size, offset);
if (r != ARCHIVE_OK) {
cmSystemTools::Message("archive_write_data_block()",
archive_error_string(aw));
return (r);
}
}
#if !defined(__clang__) && !defined(__HP_aCC)
return r; /* this should not happen but it quiets some compilers */
#endif
}
bool extract_tar(const char* outFileName, bool verbose, bool extract)
{
cmLocaleRAII localeRAII;
static_cast<void>(localeRAII);
struct archive* a = archive_read_new();
struct archive* ext = archive_write_disk_new();
archive_read_support_filter_all(a);
archive_read_support_format_all(a);
struct archive_entry* entry;
int r = cm_archive_read_open_file(a, outFileName, 10240);
if (r) {
cmSystemTools::Error("Problem with archive_read_open_file(): ",
archive_error_string(a));
archive_write_free(ext);
archive_read_close(a);
return false;
}
for (;;) {
r = archive_read_next_header(a, &entry);
if (r == ARCHIVE_EOF) {
break;
}
if (r != ARCHIVE_OK) {
cmSystemTools::Error("Problem with archive_read_next_header(): ",
archive_error_string(a));
break;
}
if (verbose) {
if (extract) {
cmSystemTools::Stdout("x ");
cmSystemTools::Stdout(cm_archive_entry_pathname(entry).c_str());
} else {
list_item_verbose(stdout, entry);
}
cmSystemTools::Stdout("\n");
} else if (!extract) {
cmSystemTools::Stdout(cm_archive_entry_pathname(entry).c_str());
cmSystemTools::Stdout("\n");
}
if (extract) {
r = archive_write_disk_set_options(ext, ARCHIVE_EXTRACT_TIME);
if (r != ARCHIVE_OK) {
cmSystemTools::Error("Problem with archive_write_disk_set_options(): ",
archive_error_string(ext));
break;
}
r = archive_write_header(ext, entry);
if (r == ARCHIVE_OK) {
copy_data(a, ext);
r = archive_write_finish_entry(ext);
if (r != ARCHIVE_OK) {
cmSystemTools::Error("Problem with archive_write_finish_entry(): ",
archive_error_string(ext));
break;
}
}
#ifdef _WIN32
else if (const char* linktext = archive_entry_symlink(entry)) {
std::cerr << "cmake -E tar: warning: skipping symbolic link \""
<< cm_archive_entry_pathname(entry) << "\" -> \"" << linktext
<< "\"." << std::endl;
}
#endif
else {
cmSystemTools::Error("Problem with archive_write_header(): ",
archive_error_string(ext));
cmSystemTools::Error("Current file: ",
cm_archive_entry_pathname(entry).c_str());
break;
}
}
}
archive_write_free(ext);
archive_read_close(a);
archive_read_free(a);
return r == ARCHIVE_EOF || r == ARCHIVE_OK;
}
}
#endif
bool cmSystemTools::ExtractTar(const char* outFileName, bool verbose)
{
#if defined(CMAKE_BUILD_WITH_CMAKE)
return extract_tar(outFileName, verbose, true);
#else
(void)outFileName;
(void)verbose;
return false;
#endif
}
bool cmSystemTools::ListTar(const char* outFileName, bool verbose)
{
#if defined(CMAKE_BUILD_WITH_CMAKE)
return extract_tar(outFileName, verbose, false);
#else
(void)outFileName;
(void)verbose;
return false;
#endif
}
int cmSystemTools::WaitForLine(cmsysProcess* process, std::string& line,
double timeout, std::vector<char>& out,
std::vector<char>& err)
{
line = "";
std::vector<char>::iterator outiter = out.begin();
std::vector<char>::iterator erriter = err.begin();
cmProcessOutput processOutput;
std::string strdata;
while (true) {
// Check for a newline in stdout.
for (; outiter != out.end(); ++outiter) {
if ((*outiter == '\r') && ((outiter + 1) == out.end())) {
break;
}
if (*outiter == '\n' || *outiter == '\0') {
std::vector<char>::size_type length = outiter - out.begin();
if (length > 1 && *(outiter - 1) == '\r') {
--length;
}
if (length > 0) {
line.append(&out[0], length);
}
out.erase(out.begin(), outiter + 1);
return cmsysProcess_Pipe_STDOUT;
}
}
// Check for a newline in stderr.
for (; erriter != err.end(); ++erriter) {
if ((*erriter == '\r') && ((erriter + 1) == err.end())) {
break;
}
if (*erriter == '\n' || *erriter == '\0') {
std::vector<char>::size_type length = erriter - err.begin();
if (length > 1 && *(erriter - 1) == '\r') {
--length;
}
if (length > 0) {
line.append(&err[0], length);
}
err.erase(err.begin(), erriter + 1);
return cmsysProcess_Pipe_STDERR;
}
}
// No newlines found. Wait for more data from the process.
int length;
char* data;
int pipe = cmsysProcess_WaitForData(process, &data, &length, &timeout);
if (pipe == cmsysProcess_Pipe_Timeout) {
// Timeout has been exceeded.
return pipe;
}
if (pipe == cmsysProcess_Pipe_STDOUT) {
processOutput.DecodeText(data, length, strdata, 1);
// Append to the stdout buffer.
std::vector<char>::size_type size = out.size();
out.insert(out.end(), strdata.begin(), strdata.end());
outiter = out.begin() + size;
} else if (pipe == cmsysProcess_Pipe_STDERR) {
processOutput.DecodeText(data, length, strdata, 2);
// Append to the stderr buffer.
std::vector<char>::size_type size = err.size();
err.insert(err.end(), strdata.begin(), strdata.end());
erriter = err.begin() + size;
} else if (pipe == cmsysProcess_Pipe_None) {
// Both stdout and stderr pipes have broken. Return leftover data.
processOutput.DecodeText(std::string(), strdata, 1);
if (!strdata.empty()) {
std::vector<char>::size_type size = out.size();
out.insert(out.end(), strdata.begin(), strdata.end());
outiter = out.begin() + size;
}
processOutput.DecodeText(std::string(), strdata, 2);
if (!strdata.empty()) {
std::vector<char>::size_type size = err.size();
err.insert(err.end(), strdata.begin(), strdata.end());
erriter = err.begin() + size;
}
if (!out.empty()) {
line.append(&out[0], outiter - out.begin());
out.erase(out.begin(), out.end());
return cmsysProcess_Pipe_STDOUT;
}
if (!err.empty()) {
line.append(&err[0], erriter - err.begin());
err.erase(err.begin(), err.end());
return cmsysProcess_Pipe_STDERR;
}
return cmsysProcess_Pipe_None;
}
}
}
void cmSystemTools::DoNotInheritStdPipes()
{
#ifdef _WIN32
// Check to see if we are attached to a console
// if so, then do not stop the inherited pipes
// or stdout and stderr will not show up in dos
// shell windows
CONSOLE_SCREEN_BUFFER_INFO hOutInfo;
HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE);
if (GetConsoleScreenBufferInfo(hOut, &hOutInfo)) {
return;
}
{
HANDLE out = GetStdHandle(STD_OUTPUT_HANDLE);
DuplicateHandle(GetCurrentProcess(), out, GetCurrentProcess(), &out, 0,
FALSE, DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE);
SetStdHandle(STD_OUTPUT_HANDLE, out);
}
{
HANDLE out = GetStdHandle(STD_ERROR_HANDLE);
DuplicateHandle(GetCurrentProcess(), out, GetCurrentProcess(), &out, 0,
FALSE, DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE);
SetStdHandle(STD_ERROR_HANDLE, out);
}
#endif
}
bool cmSystemTools::CopyFileTime(const char* fromFile, const char* toFile)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
cmSystemToolsWindowsHandle hFrom = CreateFileW(
SystemTools::ConvertToWindowsExtendedPath(fromFile).c_str(), GENERIC_READ,
FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0);
cmSystemToolsWindowsHandle hTo = CreateFileW(
SystemTools::ConvertToWindowsExtendedPath(toFile).c_str(),
FILE_WRITE_ATTRIBUTES, 0, 0, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0);
if (!hFrom || !hTo) {
return false;
}
FILETIME timeCreation;
FILETIME timeLastAccess;
FILETIME timeLastWrite;
if (!GetFileTime(hFrom, &timeCreation, &timeLastAccess, &timeLastWrite)) {
return false;
}
return SetFileTime(hTo, &timeCreation, &timeLastAccess, &timeLastWrite) != 0;
#else
struct stat fromStat;
if (stat(fromFile, &fromStat) < 0) {
return false;
}
struct utimbuf buf;
buf.actime = fromStat.st_atime;
buf.modtime = fromStat.st_mtime;
return utime(toFile, &buf) >= 0;
#endif
}
cmSystemToolsFileTime* cmSystemTools::FileTimeNew()
{
return new cmSystemToolsFileTime;
}
void cmSystemTools::FileTimeDelete(cmSystemToolsFileTime* t)
{
delete t;
}
bool cmSystemTools::FileTimeGet(const char* fname, cmSystemToolsFileTime* t)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
cmSystemToolsWindowsHandle h = CreateFileW(
SystemTools::ConvertToWindowsExtendedPath(fname).c_str(), GENERIC_READ,
FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0);
if (!h) {
return false;
}
if (!GetFileTime(h, &t->timeCreation, &t->timeLastAccess,
&t->timeLastWrite)) {
return false;
}
#else
struct stat st;
if (stat(fname, &st) < 0) {
return false;
}
t->timeBuf.actime = st.st_atime;
t->timeBuf.modtime = st.st_mtime;
#endif
return true;
}
bool cmSystemTools::FileTimeSet(const char* fname, cmSystemToolsFileTime* t)
{
#if defined(_WIN32) && !defined(__CYGWIN__)
cmSystemToolsWindowsHandle h = CreateFileW(
SystemTools::ConvertToWindowsExtendedPath(fname).c_str(),
FILE_WRITE_ATTRIBUTES, 0, 0, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0);
if (!h) {
return false;
}
return SetFileTime(h, &t->timeCreation, &t->timeLastAccess,
&t->timeLastWrite) != 0;
#else
return utime(fname, &t->timeBuf) >= 0;
#endif
}
#ifdef _WIN32
#ifndef CRYPT_SILENT
#define CRYPT_SILENT 0x40 /* Not defined by VS 6 version of header. */
#endif
static int WinCryptRandom(void* data, size_t size)
{
int result = 0;
HCRYPTPROV hProvider = 0;
if (CryptAcquireContextW(&hProvider, 0, 0, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) {
result = CryptGenRandom(hProvider, (DWORD)size, (BYTE*)data) ? 1 : 0;
CryptReleaseContext(hProvider, 0);
}
return result;
}
#endif
unsigned int cmSystemTools::RandomSeed()
{
#if defined(_WIN32) && !defined(__CYGWIN__)
unsigned int seed = 0;
// Try using a real random source.
if (WinCryptRandom(&seed, sizeof(seed))) {
return seed;
}
// Fall back to the time and pid.
FILETIME ft;
GetSystemTimeAsFileTime(&ft);
unsigned int t1 = static_cast<unsigned int>(ft.dwHighDateTime);
unsigned int t2 = static_cast<unsigned int>(ft.dwLowDateTime);
unsigned int pid = static_cast<unsigned int>(GetCurrentProcessId());
return t1 ^ t2 ^ pid;
#else
union
{
unsigned int integer;
char bytes[sizeof(unsigned int)];
} seed;
// Try using a real random source.
cmsys::ifstream fin;
fin.rdbuf()->pubsetbuf(CM_NULLPTR, 0); // Unbuffered read.
fin.open("/dev/urandom");
if (fin.good() && fin.read(seed.bytes, sizeof(seed)) &&
fin.gcount() == sizeof(seed)) {
return seed.integer;
}
// Fall back to the time and pid.
struct timeval t;
gettimeofday(&t, CM_NULLPTR);
unsigned int pid = static_cast<unsigned int>(getpid());
unsigned int tv_sec = static_cast<unsigned int>(t.tv_sec);
unsigned int tv_usec = static_cast<unsigned int>(t.tv_usec);
// Since tv_usec never fills more than 11 bits we shift it to fill
// in the slow-changing high-order bits of tv_sec.
return tv_sec ^ (tv_usec << 21) ^ pid;
#endif
}
static std::string cmSystemToolsCMakeCommand;
static std::string cmSystemToolsCTestCommand;
static std::string cmSystemToolsCPackCommand;
static std::string cmSystemToolsCMakeCursesCommand;
static std::string cmSystemToolsCMakeGUICommand;
static std::string cmSystemToolsCMClDepsCommand;
static std::string cmSystemToolsCMakeRoot;
void cmSystemTools::FindCMakeResources(const char* argv0)
{
std::string exe_dir;
#if defined(_WIN32) && !defined(__CYGWIN__)
(void)argv0; // ignore this on windows
wchar_t modulepath[_MAX_PATH];
::GetModuleFileNameW(NULL, modulepath, sizeof(modulepath));
exe_dir =
cmSystemTools::GetFilenamePath(cmsys::Encoding::ToNarrow(modulepath));
#elif defined(__APPLE__)
(void)argv0; // ignore this on OS X
#define CM_EXE_PATH_LOCAL_SIZE 16384
char exe_path_local[CM_EXE_PATH_LOCAL_SIZE];
#if defined(MAC_OS_X_VERSION_10_3) && !defined(MAC_OS_X_VERSION_10_4)
unsigned long exe_path_size = CM_EXE_PATH_LOCAL_SIZE;
#else
uint32_t exe_path_size = CM_EXE_PATH_LOCAL_SIZE;
#endif
#undef CM_EXE_PATH_LOCAL_SIZE
char* exe_path = exe_path_local;
if (_NSGetExecutablePath(exe_path, &exe_path_size) < 0) {
exe_path = (char*)malloc(exe_path_size);
_NSGetExecutablePath(exe_path, &exe_path_size);
}
exe_dir =
cmSystemTools::GetFilenamePath(cmSystemTools::GetRealPath(exe_path));
if (exe_path != exe_path_local) {
free(exe_path);
}
if (cmSystemTools::GetFilenameName(exe_dir) == "MacOS") {
// The executable is inside an application bundle.
// Look for ..<CMAKE_BIN_DIR> (install tree) and then fall back to
// ../../../bin (build tree).
exe_dir = cmSystemTools::GetFilenamePath(exe_dir);
if (cmSystemTools::FileExists(exe_dir + CMAKE_BIN_DIR "/cmake")) {
exe_dir += CMAKE_BIN_DIR;
} else {
exe_dir = cmSystemTools::GetFilenamePath(exe_dir);
exe_dir = cmSystemTools::GetFilenamePath(exe_dir);
}
}
#else
std::string errorMsg;
std::string exe;
if (cmSystemTools::FindProgramPath(argv0, exe, errorMsg)) {
// remove symlinks
exe = cmSystemTools::GetRealPath(exe);
exe_dir = cmSystemTools::GetFilenamePath(exe);
} else {
// ???
}
#endif
exe_dir = cmSystemTools::GetActualCaseForPath(exe_dir);
cmSystemToolsCMakeCommand = exe_dir;
cmSystemToolsCMakeCommand += "/cmake";
cmSystemToolsCMakeCommand += cmSystemTools::GetExecutableExtension();
#ifndef CMAKE_BUILD_WITH_CMAKE
// The bootstrap cmake does not provide the other tools,
// so use the directory where they are about to be built.
exe_dir = CMAKE_BOOTSTRAP_BINARY_DIR "/bin";
#endif
cmSystemToolsCTestCommand = exe_dir;
cmSystemToolsCTestCommand += "/ctest";
cmSystemToolsCTestCommand += cmSystemTools::GetExecutableExtension();
cmSystemToolsCPackCommand = exe_dir;
cmSystemToolsCPackCommand += "/cpack";
cmSystemToolsCPackCommand += cmSystemTools::GetExecutableExtension();
cmSystemToolsCMakeGUICommand = exe_dir;
cmSystemToolsCMakeGUICommand += "/cmake-gui";
cmSystemToolsCMakeGUICommand += cmSystemTools::GetExecutableExtension();
if (!cmSystemTools::FileExists(cmSystemToolsCMakeGUICommand.c_str())) {
cmSystemToolsCMakeGUICommand = "";
}
cmSystemToolsCMakeCursesCommand = exe_dir;
cmSystemToolsCMakeCursesCommand += "/ccmake";
cmSystemToolsCMakeCursesCommand += cmSystemTools::GetExecutableExtension();
if (!cmSystemTools::FileExists(cmSystemToolsCMakeCursesCommand.c_str())) {
cmSystemToolsCMakeCursesCommand = "";
}
cmSystemToolsCMClDepsCommand = exe_dir;
cmSystemToolsCMClDepsCommand += "/cmcldeps";
cmSystemToolsCMClDepsCommand += cmSystemTools::GetExecutableExtension();
if (!cmSystemTools::FileExists(cmSystemToolsCMClDepsCommand.c_str())) {
cmSystemToolsCMClDepsCommand = "";
}
#ifdef CMAKE_BUILD_WITH_CMAKE
// Install tree has
// - "<prefix><CMAKE_BIN_DIR>/cmake"
// - "<prefix><CMAKE_DATA_DIR>"
if (cmHasSuffix(exe_dir, CMAKE_BIN_DIR)) {
std::string const prefix =
exe_dir.substr(0, exe_dir.size() - strlen(CMAKE_BIN_DIR));
cmSystemToolsCMakeRoot = prefix + CMAKE_DATA_DIR;
}
if (cmSystemToolsCMakeRoot.empty() ||
!cmSystemTools::FileExists(
(cmSystemToolsCMakeRoot + "/Modules/CMake.cmake").c_str())) {
// Build tree has "<build>/bin[/<config>]/cmake" and
// "<build>/CMakeFiles/CMakeSourceDir.txt".
std::string dir = cmSystemTools::GetFilenamePath(exe_dir);
std::string src_dir_txt = dir + "/CMakeFiles/CMakeSourceDir.txt";
cmsys::ifstream fin(src_dir_txt.c_str());
std::string src_dir;
if (fin && cmSystemTools::GetLineFromStream(fin, src_dir) &&
cmSystemTools::FileIsDirectory(src_dir)) {
cmSystemToolsCMakeRoot = src_dir;
} else {
dir = cmSystemTools::GetFilenamePath(dir);
src_dir_txt = dir + "/CMakeFiles/CMakeSourceDir.txt";
cmsys::ifstream fin2(src_dir_txt.c_str());
if (fin2 && cmSystemTools::GetLineFromStream(fin2, src_dir) &&
cmSystemTools::FileIsDirectory(src_dir)) {
cmSystemToolsCMakeRoot = src_dir;
}
}
}
#else
// Bootstrap build knows its source.
cmSystemToolsCMakeRoot = CMAKE_BOOTSTRAP_SOURCE_DIR;
#endif
}
std::string const& cmSystemTools::GetCMakeCommand()
{
return cmSystemToolsCMakeCommand;
}
std::string const& cmSystemTools::GetCTestCommand()
{
return cmSystemToolsCTestCommand;
}
std::string const& cmSystemTools::GetCPackCommand()
{
return cmSystemToolsCPackCommand;
}
std::string const& cmSystemTools::GetCMakeCursesCommand()
{
return cmSystemToolsCMakeCursesCommand;
}
std::string const& cmSystemTools::GetCMakeGUICommand()
{
return cmSystemToolsCMakeGUICommand;
}
std::string const& cmSystemTools::GetCMClDepsCommand()
{
return cmSystemToolsCMClDepsCommand;
}
std::string const& cmSystemTools::GetCMakeRoot()
{
return cmSystemToolsCMakeRoot;
}
void cmSystemTools::MakefileColorEcho(int color, const char* message,
bool newline, bool enabled)
{
// On some platforms (an MSYS prompt) cmsysTerminal may not be able
// to determine whether the stream is displayed on a tty. In this
// case it assumes no unless we tell it otherwise. Since we want
// color messages to be displayed for users we will assume yes.
// However, we can test for some situations when the answer is most
// likely no.
int assumeTTY = cmsysTerminal_Color_AssumeTTY;
if (cmSystemTools::HasEnv("DART_TEST_FROM_DART") ||
cmSystemTools::HasEnv("DASHBOARD_TEST_FROM_CTEST") ||
cmSystemTools::HasEnv("CTEST_INTERACTIVE_DEBUG_MODE")) {
// Avoid printing color escapes during dashboard builds.
assumeTTY = 0;
}
if (enabled && color != cmsysTerminal_Color_Normal) {
// Print with color. Delay the newline until later so that
// all color restore sequences appear before it.
cmsysTerminal_cfprintf(color | assumeTTY, stdout, "%s", message);
} else {
// Color is disabled. Print without color.
fprintf(stdout, "%s", message);
}
if (newline) {
fprintf(stdout, "\n");
}
}
bool cmSystemTools::GuessLibrarySOName(std::string const& fullPath,
std::string& soname)
{
// For ELF shared libraries use a real parser to get the correct
// soname.
#if defined(CMAKE_USE_ELF_PARSER)
cmELF elf(fullPath.c_str());
if (elf) {
return elf.GetSOName(soname);
}
#endif
// If the file is not a symlink we have no guess for its soname.
if (!cmSystemTools::FileIsSymlink(fullPath)) {
return false;
}
if (!cmSystemTools::ReadSymlink(fullPath, soname)) {
return false;
}
// If the symlink has a path component we have no guess for the soname.
if (!cmSystemTools::GetFilenamePath(soname).empty()) {
return false;
}
// If the symlink points at an extended version of the same name
// assume it is the soname.
std::string name = cmSystemTools::GetFilenameName(fullPath);
return soname.length() > name.length() &&
soname.compare(0, name.length(), name) == 0;
}
bool cmSystemTools::GuessLibraryInstallName(std::string const& fullPath,
std::string& soname)
{
#if defined(CMAKE_USE_MACH_PARSER)
cmMachO macho(fullPath.c_str());
if (macho) {
return macho.GetInstallName(soname);
}
#else
(void)fullPath;
(void)soname;
#endif
return false;
}
#if defined(CMAKE_USE_ELF_PARSER)
std::string::size_type cmSystemToolsFindRPath(std::string const& have,
std::string const& want)
{
std::string::size_type pos = 0;
while (pos < have.size()) {
// Look for an occurrence of the string.
std::string::size_type const beg = have.find(want, pos);
if (beg == std::string::npos) {
return std::string::npos;
}
// Make sure it is separated from preceding entries.
if (beg > 0 && have[beg - 1] != ':') {
pos = beg + 1;
continue;
}
// Make sure it is separated from following entries.
std::string::size_type const end = beg + want.size();
if (end < have.size() && have[end] != ':') {
pos = beg + 1;
continue;
}
// Return the position of the path portion.
return beg;
}
// The desired rpath was not found.
return std::string::npos;
}
#endif
#if defined(CMAKE_USE_ELF_PARSER)
struct cmSystemToolsRPathInfo
{
unsigned long Position;
unsigned long Size;
std::string Name;
std::string Value;
};
#endif
bool cmSystemTools::ChangeRPath(std::string const& file,
std::string const& oldRPath,
std::string const& newRPath, std::string* emsg,
bool* changed)
{
#if defined(CMAKE_USE_ELF_PARSER)
if (changed) {
*changed = false;
}
int rp_count = 0;
bool remove_rpath = true;
cmSystemToolsRPathInfo rp[2];
{
// Parse the ELF binary.
cmELF elf(file.c_str());
// Get the RPATH and RUNPATH entries from it.
int se_count = 0;
cmELF::StringEntry const* se[2] = { CM_NULLPTR, CM_NULLPTR };
const char* se_name[2] = { CM_NULLPTR, CM_NULLPTR };
if (cmELF::StringEntry const* se_rpath = elf.GetRPath()) {
se[se_count] = se_rpath;
se_name[se_count] = "RPATH";
++se_count;
}
if (cmELF::StringEntry const* se_runpath = elf.GetRunPath()) {
se[se_count] = se_runpath;
se_name[se_count] = "RUNPATH";
++se_count;
}
if (se_count == 0) {
if (newRPath.empty()) {
// The new rpath is empty and there is no rpath anyway so it is
// okay.
return true;
}
if (emsg) {
*emsg = "No valid ELF RPATH or RUNPATH entry exists in the file; ";
*emsg += elf.GetErrorMessage();
}
return false;
}
for (int i = 0; i < se_count; ++i) {
// If both RPATH and RUNPATH refer to the same string literal it
// needs to be changed only once.
if (rp_count && rp[0].Position == se[i]->Position) {
continue;
}
// Make sure the current rpath contains the old rpath.
std::string::size_type pos =
cmSystemToolsFindRPath(se[i]->Value, oldRPath);
if (pos == std::string::npos) {
// If it contains the new rpath instead then it is okay.
if (cmSystemToolsFindRPath(se[i]->Value, newRPath) !=
std::string::npos) {
remove_rpath = false;
continue;
}
if (emsg) {
std::ostringstream e;
/* clang-format off */
e << "The current " << se_name[i] << " is:\n"
<< " " << se[i]->Value << "\n"
<< "which does not contain:\n"
<< " " << oldRPath << "\n"
<< "as was expected.";
/* clang-format on */
*emsg = e.str();
}
return false;
}
// Store information about the entry in the file.
rp[rp_count].Position = se[i]->Position;
rp[rp_count].Size = se[i]->Size;
rp[rp_count].Name = se_name[i];
std::string::size_type prefix_len = pos;
// If oldRPath was at the end of the file's RPath, and newRPath is empty,
// we should remove the unnecessary ':' at the end.
if (newRPath.empty() && pos > 0 && se[i]->Value[pos - 1] == ':' &&
pos + oldRPath.length() == se[i]->Value.length()) {
prefix_len--;
}
// Construct the new value which preserves the part of the path
// not being changed.
rp[rp_count].Value = se[i]->Value.substr(0, prefix_len);
rp[rp_count].Value += newRPath;
rp[rp_count].Value += se[i]->Value.substr(pos + oldRPath.length());
if (!rp[rp_count].Value.empty()) {
remove_rpath = false;
}
// Make sure there is enough room to store the new rpath and at
// least one null terminator.
if (rp[rp_count].Size < rp[rp_count].Value.length() + 1) {
if (emsg) {
*emsg = "The replacement path is too long for the ";
*emsg += se_name[i];
*emsg += " entry.";
}
return false;
}
// This entry is ready for update.
++rp_count;
}
}
// If no runtime path needs to be changed, we are done.
if (rp_count == 0) {
return true;
}
// If the resulting rpath is empty, just remove the entire entry instead.
if (remove_rpath) {
return cmSystemTools::RemoveRPath(file, emsg, changed);
}
{
// Open the file for update.
cmsys::ofstream f(file.c_str(),
std::ios::in | std::ios::out | std::ios::binary);
if (!f) {
if (emsg) {
*emsg = "Error opening file for update.";
}
return false;
}
// Store the new RPATH and RUNPATH strings.
for (int i = 0; i < rp_count; ++i) {
// Seek to the RPATH position.
if (!f.seekp(rp[i].Position)) {
if (emsg) {
*emsg = "Error seeking to ";
*emsg += rp[i].Name;
*emsg += " position.";
}
return false;
}
// Write the new rpath. Follow it with enough null terminators to
// fill the string table entry.
f << rp[i].Value;
for (unsigned long j = rp[i].Value.length(); j < rp[i].Size; ++j) {
f << '\0';
}
// Make sure it wrote correctly.
if (!f) {
if (emsg) {
*emsg = "Error writing the new ";
*emsg += rp[i].Name;
*emsg += " string to the file.";
}
return false;
}
}
}
// Everything was updated successfully.
if (changed) {
*changed = true;
}
return true;
#else
(void)file;
(void)oldRPath;
(void)newRPath;
(void)emsg;
(void)changed;
return false;
#endif
}
bool cmSystemTools::VersionCompare(cmSystemTools::CompareOp op,
const char* lhss, const char* rhss)
{
const char* endl = lhss;
const char* endr = rhss;
unsigned long lhs, rhs;
while (((*endl >= '0') && (*endl <= '9')) ||
((*endr >= '0') && (*endr <= '9'))) {
// Do component-wise comparison.
lhs = strtoul(endl, const_cast<char**>(&endl), 10);
rhs = strtoul(endr, const_cast<char**>(&endr), 10);
if (lhs < rhs) {
// lhs < rhs, so true if operation is LESS
return (op & cmSystemTools::OP_LESS) != 0;
}
if (lhs > rhs) {
// lhs > rhs, so true if operation is GREATER
return (op & cmSystemTools::OP_GREATER) != 0;
}
if (*endr == '.') {
endr++;
}
if (*endl == '.') {
endl++;
}
}
// lhs == rhs, so true if operation is EQUAL
return (op & cmSystemTools::OP_EQUAL) != 0;
}
bool cmSystemTools::VersionCompareEqual(std::string const& lhs,
std::string const& rhs)
{
return cmSystemTools::VersionCompare(cmSystemTools::OP_EQUAL, lhs.c_str(),
rhs.c_str());
}
bool cmSystemTools::VersionCompareGreater(std::string const& lhs,
std::string const& rhs)
{
return cmSystemTools::VersionCompare(cmSystemTools::OP_GREATER, lhs.c_str(),
rhs.c_str());
}
bool cmSystemTools::VersionCompareGreaterEq(std::string const& lhs,
std::string const& rhs)
{
return cmSystemTools::VersionCompare(cmSystemTools::OP_GREATER_EQUAL,
lhs.c_str(), rhs.c_str());
}
static size_t cm_strverscmp_find_first_difference_or_end(const char* lhs,
const char* rhs)
{
size_t i = 0;
/* Step forward until we find a difference or both strings end together.
The difference may lie on the null-terminator of one string. */
while (lhs[i] == rhs[i] && lhs[i] != 0) {
++i;
}
return i;
}
static size_t cm_strverscmp_find_digits_begin(const char* s, size_t i)
{
/* Step back until we are not preceded by a digit. */
while (i > 0 && isdigit(s[i - 1])) {
--i;
}
return i;
}
static size_t cm_strverscmp_find_digits_end(const char* s, size_t i)
{
/* Step forward over digits. */
while (isdigit(s[i])) {
++i;
}
return i;
}
static size_t cm_strverscmp_count_leading_zeros(const char* s, size_t b)
{
size_t i = b;
/* Step forward over zeros that are followed by another digit. */
while (s[i] == '0' && isdigit(s[i + 1])) {
++i;
}
return i - b;
}
static int cm_strverscmp(const char* lhs, const char* rhs)
{
size_t const i = cm_strverscmp_find_first_difference_or_end(lhs, rhs);
if (lhs[i] != rhs[i]) {
/* The strings differ starting at 'i'. Check for a digit sequence. */
size_t const b = cm_strverscmp_find_digits_begin(lhs, i);
if (b != i || (isdigit(lhs[i]) && isdigit(rhs[i]))) {
/* A digit sequence starts at 'b', preceding or at 'i'. */
/* Look for leading zeros, implying a leading decimal point. */
size_t const lhs_zeros = cm_strverscmp_count_leading_zeros(lhs, b);
size_t const rhs_zeros = cm_strverscmp_count_leading_zeros(rhs, b);
if (lhs_zeros != rhs_zeros) {
/* The side with more leading zeros orders first. */
return rhs_zeros > lhs_zeros ? 1 : -1;
}
if (lhs_zeros == 0) {
/* No leading zeros; compare digit sequence lengths. */
size_t const lhs_end = cm_strverscmp_find_digits_end(lhs, i);
size_t const rhs_end = cm_strverscmp_find_digits_end(rhs, i);
if (lhs_end != rhs_end) {
/* The side with fewer digits orders first. */
return lhs_end > rhs_end ? 1 : -1;
}
}
}
}
/* Ordering was not decided by digit sequence lengths; compare bytes. */
return lhs[i] - rhs[i];
}
int cmSystemTools::strverscmp(std::string const& lhs, std::string const& rhs)
{
return cm_strverscmp(lhs.c_str(), rhs.c_str());
}
bool cmSystemTools::RemoveRPath(std::string const& file, std::string* emsg,
bool* removed)
{
#if defined(CMAKE_USE_ELF_PARSER)
if (removed) {
*removed = false;
}
int zeroCount = 0;
unsigned long zeroPosition[2] = { 0, 0 };
unsigned long zeroSize[2] = { 0, 0 };
unsigned long bytesBegin = 0;
std::vector<char> bytes;
{
// Parse the ELF binary.
cmELF elf(file.c_str());
// Get the RPATH and RUNPATH entries from it and sort them by index
// in the dynamic section header.
int se_count = 0;
cmELF::StringEntry const* se[2] = { CM_NULLPTR, CM_NULLPTR };
if (cmELF::StringEntry const* se_rpath = elf.GetRPath()) {
se[se_count++] = se_rpath;
}
if (cmELF::StringEntry const* se_runpath = elf.GetRunPath()) {
se[se_count++] = se_runpath;
}
if (se_count == 0) {
// There is no RPATH or RUNPATH anyway.
return true;
}
if (se_count == 2 && se[1]->IndexInSection < se[0]->IndexInSection) {
std::swap(se[0], se[1]);
}
// Obtain a copy of the dynamic entries
cmELF::DynamicEntryList dentries = elf.GetDynamicEntries();
if (dentries.empty()) {
// This should happen only for invalid ELF files where a DT_NULL
// appears before the end of the table.
if (emsg) {
*emsg = "DYNAMIC section contains a DT_NULL before the end.";
}
return false;
}
// Save information about the string entries to be zeroed.
zeroCount = se_count;
for (int i = 0; i < se_count; ++i) {
zeroPosition[i] = se[i]->Position;
zeroSize[i] = se[i]->Size;
}
// Get size of one DYNAMIC entry
unsigned long const sizeof_dentry =
elf.GetDynamicEntryPosition(1) - elf.GetDynamicEntryPosition(0);
// Adjust the entry list as necessary to remove the run path
unsigned long entriesErased = 0;
for (cmELF::DynamicEntryList::iterator it = dentries.begin();
it != dentries.end();) {
if (it->first == cmELF::TagRPath || it->first == cmELF::TagRunPath) {
it = dentries.erase(it);
entriesErased++;
continue;
}
if (cmELF::TagMipsRldMapRel != 0 &&
it->first == cmELF::TagMipsRldMapRel) {
// Background: debuggers need to know the "linker map" which contains
// the addresses each dynamic object is loaded at. Most arches use
// the DT_DEBUG tag which the dynamic linker writes to (directly) and
// contain the location of the linker map, however on MIPS the
// .dynamic section is always read-only so this is not possible. MIPS
// objects instead contain a DT_MIPS_RLD_MAP tag which contains the
// address where the dyanmic linker will write to (an indirect
// version of DT_DEBUG). Since this doesn't work when using PIE, a
// relative equivalent was created - DT_MIPS_RLD_MAP_REL. Since this
// version contains a relative offset, moving it changes the
// calculated address. This may cause the dyanmic linker to write
// into memory it should not be changing.
//
// To fix this, we adjust the value of DT_MIPS_RLD_MAP_REL here. If
// we move it up by n bytes, we add n bytes to the value of this tag.
it->second += entriesErased * sizeof_dentry;
}
it++;
}
// Encode new entries list
bytes = elf.EncodeDynamicEntries(dentries);
bytesBegin = elf.GetDynamicEntryPosition(0);
}
// Open the file for update.
cmsys::ofstream f(file.c_str(),
std::ios::in | std::ios::out | std::ios::binary);
if (!f) {
if (emsg) {
*emsg = "Error opening file for update.";
}
return false;
}
// Write the new DYNAMIC table header.
if (!f.seekp(bytesBegin)) {
if (emsg) {
*emsg = "Error seeking to DYNAMIC table header for RPATH.";
}
return false;
}
if (!f.write(&bytes[0], bytes.size())) {
if (emsg) {
*emsg = "Error replacing DYNAMIC table header.";
}
return false;
}
// Fill the RPATH and RUNPATH strings with zero bytes.
for (int i = 0; i < zeroCount; ++i) {
if (!f.seekp(zeroPosition[i])) {
if (emsg) {
*emsg = "Error seeking to RPATH position.";
}
return false;
}
for (unsigned long j = 0; j < zeroSize[i]; ++j) {
f << '\0';
}
if (!f) {
if (emsg) {
*emsg = "Error writing the empty rpath string to the file.";
}
return false;
}
}
// Everything was updated successfully.
if (removed) {
*removed = true;
}
return true;
#else
(void)file;
(void)emsg;
(void)removed;
return false;
#endif
}
bool cmSystemTools::CheckRPath(std::string const& file,
std::string const& newRPath)
{
#if defined(CMAKE_USE_ELF_PARSER)
// Parse the ELF binary.
cmELF elf(file.c_str());
// Get the RPATH or RUNPATH entry from it.
cmELF::StringEntry const* se = elf.GetRPath();
if (!se) {
se = elf.GetRunPath();
}
// Make sure the current rpath contains the new rpath.
if (newRPath.empty()) {
if (!se) {
return true;
}
} else {
if (se &&
cmSystemToolsFindRPath(se->Value, newRPath) != std::string::npos) {
return true;
}
}
return false;
#else
(void)file;
(void)newRPath;
return false;
#endif
}
bool cmSystemTools::RepeatedRemoveDirectory(const char* dir)
{
// Windows sometimes locks files temporarily so try a few times.
for (int i = 0; i < 10; ++i) {
if (cmSystemTools::RemoveADirectory(dir)) {
return true;
}
cmSystemTools::Delay(100);
}
return false;
}
std::vector<std::string> cmSystemTools::tokenize(const std::string& str,
const std::string& sep)
{
std::vector<std::string> tokens;
std::string::size_type tokend = 0;
do {
std::string::size_type tokstart = str.find_first_not_of(sep, tokend);
if (tokstart == std::string::npos) {
break; // no more tokens
}
tokend = str.find_first_of(sep, tokstart);
if (tokend == std::string::npos) {
tokens.push_back(str.substr(tokstart));
} else {
tokens.push_back(str.substr(tokstart, tokend - tokstart));
}
} while (tokend != std::string::npos);
if (tokens.empty()) {
tokens.push_back("");
}
return tokens;
}
bool cmSystemTools::StringToLong(const char* str, long* value)
{
errno = 0;
char* endp;
*value = strtol(str, &endp, 10);
return (*endp == '\0') && (endp != str) && (errno == 0);
}
bool cmSystemTools::StringToULong(const char* str, unsigned long* value)
{
errno = 0;
char* endp;
*value = strtoul(str, &endp, 10);
return (*endp == '\0') && (endp != str) && (errno == 0);
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <iostream>
#include "PositionMap.h"
int main(int argc, char** argv) {
if (argc < 2) {
std::cerr << "Usage: linemapdump mapping_file\n";
abort();
}
auto map = read_map(argv[1]);
for (size_t i = 0; i < map->positions_size; ++i) {
auto pi = map->positions[i];
std::cout << map->string_pool[pi.class_id] << "."
<< map->string_pool[pi.method_id] << map->string_pool[pi.file_id]
<< ":" << pi.line << " => " << pi.parent << std::endl;
}
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "protocol.h"
#include "util.h"
#include "netbase.h"
#include "main.h"
#ifndef WIN32
# include <arpa/inet.h>
#endif
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ascii, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
// Public testnet message start
static unsigned char pchMessageStartTest[4] = { 0xfc, 0xc1, 0xb7, 0xdc };
// Fitcoin message start (same Litecoin's)
static unsigned char pchMessageStartLitecoin[4] = { 0xfb, 0xc0, 0xb6, 0xdb };
void GetMessageStart(unsigned char pchMessageStart[], bool fPersistent)
{
if (fTestNet)
memcpy(pchMessageStart, pchMessageStartTest, sizeof(pchMessageStartTest));
else
memcpy(pchMessageStart, pchMessageStartLitecoin, sizeof(pchMessageStartLitecoin));
}
static const char* ppszTypeName[] =
{
"ERROR",
"tx",
"block",
"filtered block"
};
CMessageHeader::CMessageHeader()
{
GetMessageStart(pchMessageStart);
memset(pchCommand, 0, sizeof(pchCommand));
pchCommand[1] = 1;
nMessageSize = -1;
nChecksum = 0;
}
CMessageHeader::CMessageHeader(const char* pszCommand, unsigned int nMessageSizeIn)
{
GetMessageStart(pchMessageStart);
strncpy(pchCommand, pszCommand, COMMAND_SIZE);
nMessageSize = nMessageSizeIn;
nChecksum = 0;
}
std::string CMessageHeader::GetCommand() const
{
if (pchCommand[COMMAND_SIZE-1] == 0)
return std::string(pchCommand, pchCommand + strlen(pchCommand));
else
return std::string(pchCommand, pchCommand + COMMAND_SIZE);
}
bool CMessageHeader::IsValid() const
{
// Check start string
unsigned char pchMessageStartProtocol[4];
GetMessageStart(pchMessageStartProtocol);
if (memcmp(pchMessageStart, pchMessageStartProtocol, sizeof(pchMessageStart)) != 0)
return false;
// Check the command string for errors
for (const char* p1 = pchCommand; p1 < pchCommand + COMMAND_SIZE; p1++)
{
if (*p1 == 0)
{
// Must be all zeros after the first zero
for (; p1 < pchCommand + COMMAND_SIZE; p1++)
if (*p1 != 0)
return false;
}
else if (*p1 < ' ' || *p1 > 0x7E)
return false;
}
// Message size
if (nMessageSize > MAX_SIZE)
{
printf("CMessageHeader::IsValid() : (%s, %u bytes) nMessageSize > MAX_SIZE\n", GetCommand().c_str(), nMessageSize);
return false;
}
return true;
}
CAddress::CAddress() : CService()
{
Init();
}
CAddress::CAddress(CService ipIn, uint64 nServicesIn) : CService(ipIn)
{
Init();
nServices = nServicesIn;
}
void CAddress::Init()
{
nServices = NODE_NETWORK;
nTime = 100000000;
nLastTry = 0;
}
CInv::CInv()
{
type = 0;
hash = 0;
}
CInv::CInv(int typeIn, const uint256& hashIn)
{
type = typeIn;
hash = hashIn;
}
CInv::CInv(const std::string& strType, const uint256& hashIn)
{
unsigned int i;
for (i = 1; i < ARRAYLEN(ppszTypeName); i++)
{
if (strType == ppszTypeName[i])
{
type = i;
break;
}
}
if (i == ARRAYLEN(ppszTypeName))
throw std::out_of_range(strprintf("CInv::CInv(string, uint256) : unknown type '%s'", strType.c_str()));
hash = hashIn;
}
bool operator<(const CInv& a, const CInv& b)
{
return (a.type < b.type || (a.type == b.type && a.hash < b.hash));
}
bool CInv::IsKnownType() const
{
return (type >= 1 && type < (int)ARRAYLEN(ppszTypeName));
}
const char* CInv::GetCommand() const
{
if (!IsKnownType())
throw std::out_of_range(strprintf("CInv::GetCommand() : type=%d unknown type", type));
return ppszTypeName[type];
}
std::string CInv::ToString() const
{
return strprintf("%s %s", GetCommand(), hash.ToString().c_str());
}
void CInv::print() const
{
printf("CInv(%s)\n", ToString().c_str());
}
|
//===----------------------------------------------------------------------===//
// DuckDB
//
// duckdb/execution/operator/join/physical_delim_join.hpp
//
//
//===----------------------------------------------------------------------===//
#pragma once
#include "duckdb/common/types/chunk_collection.hpp"
#include "duckdb/execution/physical_sink.hpp"
namespace duckdb {
class PhysicalHashAggregate;
//! PhysicalDelimJoin represents a join where the LHS will be duplicate eliminated and pushed into a
//! PhysicalChunkCollectionScan in the RHS.
class PhysicalDelimJoin : public PhysicalSink {
public:
PhysicalDelimJoin(vector<LogicalType> types, unique_ptr<PhysicalOperator> original_join,
vector<PhysicalOperator *> delim_scans);
unique_ptr<PhysicalOperator> join;
unique_ptr<PhysicalHashAggregate> distinct;
vector<PhysicalOperator *> delim_scans;
public:
unique_ptr<GlobalOperatorState> GetGlobalState(ClientContext &context) override;
unique_ptr<LocalSinkState> GetLocalSinkState(ExecutionContext &context) override;
void Sink(ExecutionContext &context, GlobalOperatorState &state, LocalSinkState &lstate, DataChunk &input) override;
void Combine(ExecutionContext &context, GlobalOperatorState &state, LocalSinkState &lstate) override;
void Finalize(Pipeline &pipeline, ClientContext &context, unique_ptr<GlobalOperatorState> state) override;
void GetChunkInternal(ExecutionContext &context, DataChunk &chunk, PhysicalOperatorState *state) override;
unique_ptr<PhysicalOperatorState> GetOperatorState() override;
string ParamsToString() const override;
};
} // namespace duckdb
|
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkImageReverseIterator_hxx
#define itkImageReverseIterator_hxx
#include "itkImageReverseIterator.h"
namespace itk
{
template< typename TImage >
ImageReverseIterator< TImage >
::ImageReverseIterator(ImageType *ptr, const RegionType & region):
ImageRegionReverseConstIterator< TImage >(ptr, region)
{}
template< typename TImage >
ImageReverseIterator< TImage >
::ImageReverseIterator(const ImageIteratorWithIndex< TImage > & it):
ImageRegionReverseConstIterator< TImage >(it)
{}
template< typename TImage >
ImageReverseIterator< TImage >
::ImageReverseIterator(const ImageRegionReverseConstIterator< TImage > & it):
ImageRegionReverseConstIterator< TImage >(it)
{}
template< typename TImage >
ImageReverseIterator< TImage > &
ImageReverseIterator< TImage >
::operator=(const ImageRegionReverseConstIterator< TImage > & it)
{
this->ImageRegionReverseConstIterator< TImage >::operator=(it);
return *this;
}
} // end namespace itk
#endif
|
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file MemoryDB.cpp
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#include <libdevcore/Common.h>
#include "MemoryDB.h"
using namespace std;
using namespace dev;
namespace dev
{
const char* DBChannel::name() { return "TDB"; }
const char* DBWarn::name() { return "TDB"; }
std::unordered_map<h256, std::string> MemoryDB::get() const
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
std::unordered_map<h256, std::string> ret;
for (auto const& i: m_main)
if (!m_enforceRefs || i.second.second > 0)
ret.insert(make_pair(i.first, i.second.first));
return ret;
}
MemoryDB& MemoryDB::operator=(MemoryDB const& _c)
{
if (this == &_c)
return *this;
#if DEV_GUARDED_DB
ReadGuard l(_c.x_this);
WriteGuard l2(x_this);
#endif
m_main = _c.m_main;
m_aux = _c.m_aux;
return *this;
}
std::string MemoryDB::lookup(h256 const& _h) const
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
if (!m_enforceRefs || it->second.second > 0)
return it->second.first;
else
cwarn << "Lookup required for value with refcount == 0. This is probably a critical trie issue" << _h;
}
return std::string();
}
bool MemoryDB::exists(h256 const& _h) const
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
auto it = m_main.find(_h);
if (it != m_main.end() && (!m_enforceRefs || it->second.second > 0))
return true;
return false;
}
void MemoryDB::insert(h256 const& _h, bytesConstRef _v)
{
#if DEV_GUARDED_DB
WriteGuard l(x_this);
#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
it->second.first = _v.toString();
it->second.second++;
}
else
m_main[_h] = make_pair(_v.toString(), 1);
#if ETH_PARANOIA
dbdebug << "INST" << _h << "=>" << m_main[_h].second;
#endif
}
bool MemoryDB::kill(h256 const& _h)
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
if (m_main.count(_h))
{
if (m_main[_h].second > 0)
{
m_main[_h].second--;
return true;
}
#if ETH_PARANOIA
else
{
// If we get to this point, then there was probably a node in the level DB which we need to remove and which we have previously
// used as part of the memory-based MemoryDB. Nothing to be worried about *as long as the node exists in the DB*.
dbdebug << "NOKILL-WAS" << _h;
}
dbdebug << "KILL" << _h << "=>" << m_main[_h].second;
}
else
{
dbdebug << "NOKILL" << _h;
#endif
}
return false;
}
bytes MemoryDB::lookupAux(h256 const& _h) const
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
auto it = m_aux.find(_h);
if (it != m_aux.end() && (!m_enforceRefs || it->second.second))
return it->second.first;
return bytes();
}
void MemoryDB::removeAux(h256 const& _h)
{
#if DEV_GUARDED_DB
WriteGuard l(x_this);
#endif
m_aux[_h].second = false;
}
void MemoryDB::insertAux(h256 const& _h, bytesConstRef _v)
{
#if DEV_GUARDED_DB
WriteGuard l(x_this);
#endif
m_aux[_h] = make_pair(_v.toBytes(), true);
}
void MemoryDB::purge()
{
#if DEV_GUARDED_DB
WriteGuard l(x_this);
#endif
for (auto it = m_main.begin(); it != m_main.end(); )
if (it->second.second)
++it;
else
it = m_main.erase(it);
}
h256Hash MemoryDB::keys() const
{
#if DEV_GUARDED_DB
ReadGuard l(x_this);
#endif
h256Hash ret;
for (auto const& i: m_main)
if (i.second.second)
ret.insert(i.first);
return ret;
}
}
|
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2019, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#include <gtest/gtest.h>
#include <resources.hxx>
#include "CoreFixture.hpp"
#include "../UnzipFile.hpp"
#include "../ZipFile.hpp"
#ifdef Q_OS_WIN
std::ostream& operator<<(std::ostream& t_o, const openstudio::path &t_path)
{
return t_o << openstudio::toString(t_path);
}
#endif
TEST_F(CoreFixture, Unzip_NonExistTest)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/blarg.zip");
ASSERT_ANY_THROW(openstudio::UnzipFile f(p));
}
TEST_F(CoreFixture, Unzip_DirListTest)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/test1.zip");
openstudio::UnzipFile uf(p);
std::vector<openstudio::path> list = uf.listFiles();
ASSERT_EQ(list.size(), 6u);
EXPECT_EQ(list[0], openstudio::toPath("file1.txt"));
EXPECT_EQ(list[1], openstudio::toPath("file2.txt"));
}
TEST_F(CoreFixture, Unzip_ExtractFileTest)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/test1.zip");
openstudio::UnzipFile uf(p);
openstudio::path outpath = openstudio::tempDir() / openstudio::toPath("ExtractFileTest");
openstudio::filesystem::remove_all(outpath);
openstudio::path outfile1 = outpath / openstudio::toPath("file2.txt");
openstudio::path outfile2 = outpath / openstudio::toPath("testdir1/testdir2/file3.txt");
openstudio::path outfile3 = outpath / openstudio::toPath("testdir1/testdir2/testpat.db");
EXPECT_EQ(outfile1, uf.extractFile(openstudio::toPath("file2.txt"), outpath));
ASSERT_TRUE(openstudio::filesystem::exists(outfile1));
std::ifstream ifs(openstudio::toSystemFilename(outfile1));
std::string line;
std::getline(ifs, line);
EXPECT_EQ("18 bytes of data.", line);
EXPECT_EQ(outfile2, uf.extractFile(openstudio::toPath("testdir1/testdir2/file3.txt"), outpath));
EXPECT_EQ(outfile3, uf.extractFile(openstudio::toPath("testdir1/testdir2/testpat.db"), outpath));
ASSERT_TRUE(openstudio::filesystem::exists(outfile2));
ASSERT_TRUE(openstudio::filesystem::exists(outfile3));
EXPECT_EQ(0u, openstudio::filesystem::file_size(outfile2));
EXPECT_EQ(112640u, openstudio::filesystem::file_size(outfile3));
}
TEST_F(CoreFixture, Unzip_ExtractAllFilesTest)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/test1.zip");
openstudio::UnzipFile uf(p);
openstudio::path outpath = openstudio::tempDir() / openstudio::toPath("ExtractAllFilesTest");
openstudio::filesystem::remove_all(outpath);
std::vector<openstudio::path> createdFiles = uf.extractAllFiles(outpath);
ASSERT_EQ(4u, createdFiles.size());
for (std::vector<openstudio::path>::const_iterator itr = createdFiles.begin();
itr != createdFiles.end();
++itr)
{
EXPECT_TRUE(openstudio::filesystem::exists(*itr));
}
}
TEST_F(CoreFixture, Zip_CreateFile)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/test1.zip");
openstudio::path outpath = openstudio::tempDir() / openstudio::toPath("CreateFileTest");
openstudio::path outzip = outpath / openstudio::toPath("new.zip");
openstudio::filesystem::remove_all(outpath);
{
openstudio::filesystem::create_directories(outzip.parent_path());
openstudio::ZipFile zf(outzip, false);
zf.addFile(p, openstudio::toPath("added.zip"));
}
openstudio::UnzipFile uf(outzip);
std::vector<openstudio::path> createdFiles = uf.extractAllFiles(outpath);
ASSERT_EQ(1u, createdFiles.size());
ASSERT_TRUE(openstudio::filesystem::exists(createdFiles[0]));
EXPECT_EQ(openstudio::filesystem::file_size(p), openstudio::filesystem::file_size(createdFiles[0]));
}
TEST_F(CoreFixture, Zip_AppendFile)
{
openstudio::path p = resourcesPath()/openstudio::toPath("utilities/Zip/test1.zip");
openstudio::path outpath = openstudio::tempDir() / openstudio::toPath("AppendFileTest");
openstudio::path outzip = outpath / openstudio::toPath("new.zip");
openstudio::filesystem::remove_all(outpath);
{
openstudio::filesystem::create_directories(outzip.parent_path());
openstudio::ZipFile zf(outzip, false);
zf.addFile(p, openstudio::toPath("added.zip"));
}
{
openstudio::ZipFile zf(outzip, true);
zf.addFile(p, openstudio::toPath("in/some/subdir/added2.zip"));
}
openstudio::UnzipFile uf(outzip);
std::vector<openstudio::path> createdFiles = uf.extractAllFiles(outpath);
ASSERT_EQ(2u, createdFiles.size());
ASSERT_TRUE(openstudio::filesystem::exists(createdFiles[0]));
ASSERT_TRUE(openstudio::filesystem::exists(createdFiles[1]));
EXPECT_EQ(openstudio::filesystem::file_size(p), openstudio::filesystem::file_size(createdFiles[0]));
EXPECT_EQ(openstudio::filesystem::file_size(p), openstudio::filesystem::file_size(createdFiles[1]));
EXPECT_EQ(outpath / openstudio::toPath("in/some/subdir/added2.zip"), createdFiles[1]);
}
|
#include "AppHdr.h"
#include "ng-wanderer.h"
#include "item-prop.h"
#include "ng-setup.h"
#include "potion-type.h"
#include "randbook.h"
#include "random.h"
#include "skills.h"
#include "spl-book.h" // you_can_memorise
#include "spl-util.h"
static void _give_wanderer_weapon(skill_type wpn_skill, int plus)
{
if (wpn_skill == SK_THROWING)
{
// Plus is set if we are getting a good item. In that case, we
// get curare here.
if (plus)
{
newgame_make_item(OBJ_MISSILES, MI_DART, 1 + random2(4),
0, SPMSL_CURARE);
}
// Otherwise, we just get some poisoned darts.
else
{
newgame_make_item(OBJ_MISSILES, MI_DART, 5 + roll_dice(2, 5),
0, SPMSL_POISONED);
}
}
weapon_type sub_type;
// Now fill in the type according to the random wpn_skill.
switch (wpn_skill)
{
case SK_SHORT_BLADES:
sub_type = WPN_SHORT_SWORD;
break;
case SK_LONG_BLADES:
sub_type = WPN_FALCHION;
break;
case SK_MACES_FLAILS:
sub_type = WPN_MACE;
break;
case SK_AXES:
sub_type = WPN_HAND_AXE;
break;
case SK_POLEARMS:
sub_type = WPN_SPEAR;
break;
case SK_STAVES:
sub_type = WPN_QUARTERSTAFF;
break;
case SK_BOWS:
sub_type = WPN_SHORTBOW;
break;
case SK_CROSSBOWS:
sub_type = WPN_HAND_CROSSBOW;
break;
default:
sub_type = WPN_DAGGER;
break;
}
newgame_make_item(OBJ_WEAPONS, sub_type, 1, plus);
if (sub_type == WPN_SHORTBOW)
newgame_make_item(OBJ_MISSILES, MI_ARROW, 15 + random2avg(21, 5));
else if (sub_type == WPN_HAND_CROSSBOW)
newgame_make_item(OBJ_MISSILES, MI_BOLT, 15 + random2avg(21, 5));
}
// The overall role choice for wanderers is a weighted chance based on
// stats.
static stat_type _wanderer_choose_role()
{
int total_stats = 0;
for (int i = 0; i < NUM_STATS; ++i)
total_stats += you.stat(static_cast<stat_type>(i));
int target = random2(total_stats);
stat_type role;
if (target < you.strength())
role = STAT_STR;
else if (target < (you.dex() + you.strength()))
role = STAT_DEX;
else
role = STAT_INT;
return role;
}
static skill_type _apt_weighted_choice(const skill_type * skill_array,
unsigned arr_size)
{
int total_apt = 0;
for (unsigned i = 0; i < arr_size; ++i)
{
int reciprocal_apt = 100 / species_apt_factor(skill_array[i]);
total_apt += reciprocal_apt;
}
unsigned probe = random2(total_apt);
unsigned region_covered = 0;
for (unsigned i = 0; i < arr_size; ++i)
{
int reciprocal_apt = 100 / species_apt_factor(skill_array[i]);
region_covered += reciprocal_apt;
if (probe < region_covered)
return skill_array[i];
}
return NUM_SKILLS;
}
static skill_type _wanderer_role_skill_select(stat_type selected_role,
skill_type sk_1,
skill_type sk_2)
{
skill_type selected_skill = SK_NONE;
switch (selected_role)
{
case STAT_DEX:
// Duplicates are intentional.
selected_skill = random_choose(SK_FIGHTING, SK_FIGHTING,
SK_DODGING,
SK_STEALTH,
sk_1, sk_1);
break;
case STAT_STR:
do
{
selected_skill = random_choose(SK_FIGHTING, sk_1, SK_ARMOUR);
}
while (is_useless_skill(selected_skill));
break;
case STAT_INT:
selected_skill = random_choose(SK_SPELLCASTING, sk_1, sk_2);
break;
default:
die("bad skill_type %d", selected_role);
}
if (selected_skill == NUM_SKILLS)
{
ASSERT(you.species == SP_FELID || you.species == SP_CRUSTACEAN || you.species == SP_HYDRA);
selected_skill = SK_UNARMED_COMBAT;
}
return selected_skill;
}
static skill_type _wanderer_role_weapon_select(stat_type role)
{
skill_type skill = NUM_SKILLS;
const skill_type str_weapons[] =
{ SK_AXES, SK_MACES_FLAILS, SK_BOWS, SK_CROSSBOWS };
int str_size = ARRAYSZ(str_weapons);
const skill_type dex_weapons[] =
{ SK_SHORT_BLADES, SK_LONG_BLADES, SK_STAVES, SK_UNARMED_COMBAT,
SK_POLEARMS };
int dex_size = ARRAYSZ(dex_weapons);
const skill_type casting_schools[] =
{ SK_SUMMONINGS, SK_NECROMANCY, SK_TRANSLOCATIONS,
SK_TRANSMUTATIONS, SK_POISON_MAGIC, SK_CONJURATIONS,
SK_HEXES, SK_CHARMS, SK_FIRE_MAGIC, SK_ICE_MAGIC,
SK_AIR_MAGIC, SK_EARTH_MAGIC };
int casting_size = ARRAYSZ(casting_schools);
switch ((int)role)
{
case STAT_STR:
skill = _apt_weighted_choice(str_weapons, str_size);
break;
case STAT_DEX:
skill = _apt_weighted_choice(dex_weapons, dex_size);
break;
case STAT_INT:
skill = _apt_weighted_choice(casting_schools, casting_size);
break;
}
return skill;
}
static void _wanderer_role_skill(stat_type role, int levels)
{
skill_type weapon_type = _wanderer_role_weapon_select(role);
skill_type spell2 = NUM_SKILLS;
if (role == STAT_INT)
spell2 = _wanderer_role_weapon_select(role);
skill_type selected_skill = NUM_SKILLS;
for (int i = 0; i < levels; ++i)
{
selected_skill = _wanderer_role_skill_select(role, weapon_type,
spell2);
you.skills[selected_skill]++;
}
}
// Select a random skill from all skills we have at least 1 level in.
static skill_type _weighted_skill_roll()
{
int total_skill = 0;
for (unsigned i = 0; i < NUM_SKILLS; ++i)
total_skill += you.skills[i];
int probe = random2(total_skill);
int covered_region = 0;
for (unsigned i = 0; i < NUM_SKILLS; ++i)
{
covered_region += you.skills[i];
if (probe < covered_region)
return skill_type(i);
}
return NUM_SKILLS;
}
static void _give_wanderer_book(skill_type skill)
{
book_type book;
switch (skill)
{
default:
case SK_SPELLCASTING:
book = BOOK_MINOR_MAGIC;
break;
case SK_CONJURATIONS:
// minor magic should have only half the likelihood of conj
book = random_choose(BOOK_MINOR_MAGIC,
BOOK_CONJURATIONS, BOOK_CONJURATIONS);
break;
case SK_SUMMONINGS:
book = random_choose(BOOK_MINOR_MAGIC, BOOK_CALLINGS);
break;
case SK_NECROMANCY:
book = BOOK_NECROMANCY;
break;
case SK_TRANSLOCATIONS:
book = BOOK_SPATIAL_TRANSLOCATIONS;
break;
case SK_TRANSMUTATIONS:
book = random_choose(BOOK_GEOMANCY, BOOK_CHANGES);
break;
case SK_FIRE_MAGIC:
book = BOOK_FLAMES;
break;
case SK_ICE_MAGIC:
book = random_choose(BOOK_FROST, BOOK_FROST2);
break;
case SK_AIR_MAGIC:
book = BOOK_AIR;
break;
case SK_EARTH_MAGIC:
book = BOOK_GEOMANCY;
break;
case SK_POISON_MAGIC:
book = BOOK_YOUNG_POISONERS;
break;
case SK_HEXES:
book = BOOK_MALEDICT;
break;
case SK_CHARMS:
book = BOOK_BATTLE;
break;
}
newgame_make_item(OBJ_BOOKS, book);
}
/**
* Can we include the given spell in our themed spellbook?
*
* Guarantees exactly two spells of total spell level 4.
* (I.e., 2+2 or 1+3)
*
* XXX: strongly consider caching this - currently we're n^2 over all spells,
* which seems excessive.
*
* @param discipline_1 The first spellschool of the book.
* @param discipline_2 The second spellschool of the book.
* @param agent The entity creating the book; possibly a god.
* @param prev A list of spells already chosen for the book.
* @param spell The spell to be filtered.
* @return Whether the spell can be included.
*/
static bool exact_level_spell_filter(spschool discipline_1,
spschool discipline_2,
int agent,
const vector<spell_type> &prev,
spell_type spell)
{
if (!basic_themed_spell_filter(discipline_1, discipline_2, agent, prev,
spell))
{
return false;
}
if (!you_can_memorise(spell))
return false;
static const int TOTAL_LEVELS = 4;
const int spell_level = spell_difficulty(spell);
if (prev.size())
return TOTAL_LEVELS == spell_level + spell_difficulty(prev[0]);
// we need to check to see there is some possible second spell; otherwise
// we could be walking into a trap, if we select e.g. a level 2 spell when
// there's only one player-castable level 2 spell in the school.
const vector<spell_type> incl_spell = {spell};
for (int s = 0; s < NUM_SPELLS; ++s)
{
const spell_type second_spell = static_cast<spell_type>(s);
if (exact_level_spell_filter(discipline_1, discipline_2,
agent, incl_spell, second_spell))
{
return true;
}
}
return false;
}
// Give the wanderer a randart book containing two spells of total level 4.
// The theme of the book is the spell school of the chosen skill.
static void _give_wanderer_minor_book(skill_type skill)
{
// Doing a rejection loop for this because I am lazy.
while (skill == SK_SPELLCASTING)
{
int value = SK_LAST_MAGIC - SK_FIRST_MAGIC_SCHOOL + 1;
skill = skill_type(SK_FIRST_MAGIC_SCHOOL + random2(value));
}
spschool school = skill2spell_type(skill);
item_def* item = newgame_make_item(OBJ_BOOKS, BOOK_RANDART_THEME);
if (!item)
return;
build_themed_book(*item, exact_level_spell_filter,
forced_book_theme(school), 2);
}
/**
* Create a consumable as a "good item".
*
* Shouldn't ever create an useless consumable for the player's species.
* e.g., potions for Mu, heal wounds for VS, blinking for Fo.
*/
static void _good_potion_or_scroll()
{
// vector of weighted {object_class_type, subtype} pairs
// xxx: could we use is_useless_item here? (not without dummy items...?)
const vector<pair<pair<object_class_type, int>, int>> options = {
{ { OBJ_SCROLLS, SCR_FEAR }, 1 },
{ { OBJ_SCROLLS, SCR_BLINKING },
you.species == SP_FORMICID ? 0 : 1 },
{ { OBJ_POTIONS, POT_HEAL_WOUNDS },
(you.species == SP_MUMMY
|| you.species == SP_VINE_STALKER) ? 0 : 1 },
{ { OBJ_POTIONS, POT_HASTE },
you.species == SP_MUMMY ? 0 : 1 },
{ { OBJ_POTIONS, POT_BERSERK_RAGE },
(you.species == SP_FORMICID
|| you.is_lifeless_undead(false)) ? 0 : 1},
};
const pair<object_class_type, int> *option
= random_choose_weighted(options);
ASSERT(option);
newgame_make_item(option->first, option->second);
}
/**
* Make a 'decent' consumable for a wanderer to start with.
*
* Shouldn't ever create a completely useless item for the player's species.
*/
static void _decent_potion_or_scroll()
{
// vector of weighted {object_class_type, subtype} pairs
// xxx: could we use is_useless_item here? (not without dummy items...?)
const vector<pair<pair<object_class_type, int>, int>> options = {
{ { OBJ_SCROLLS, SCR_TELEPORTATION },
you.species == SP_FORMICID ? 0 : 1 },
{ { OBJ_POTIONS, POT_CURING },
you.species == SP_MUMMY ? 0 : 1 },
{ { OBJ_POTIONS, POT_LIGNIFY },
you.is_lifeless_undead(false) ? 0 : 1 },
};
const pair<object_class_type, int> *option
= random_choose_weighted(options);
ASSERT(option);
newgame_make_item(option->first, option->second);
}
// Create a random wand in the inventory.
static void _wanderer_random_evokable()
{
if (one_chance_in(3))
{
int selected_evoker =
random_choose(MISC_BOX_OF_BEASTS, MISC_LAMP_OF_FIRE,
MISC_FAN_OF_GALES, MISC_PHIAL_OF_FLOODS);
newgame_make_item(OBJ_MISCELLANY, selected_evoker, 1);
}
else
{
wand_type selected_wand =
random_choose(WAND_ENSLAVEMENT, WAND_PARALYSIS, WAND_FLAME);
newgame_make_item(OBJ_WANDS, selected_wand, 1, 15);
}
}
static void _wanderer_good_equipment(skill_type & skill)
{
const skill_type combined_weapon_skills[] =
{ SK_AXES, SK_MACES_FLAILS, SK_BOWS, SK_CROSSBOWS,
SK_SHORT_BLADES, SK_LONG_BLADES, SK_STAVES, SK_UNARMED_COMBAT,
SK_POLEARMS };
int total_weapons = ARRAYSZ(combined_weapon_skills);
// Normalise the input type.
if (skill == SK_FIGHTING)
{
int max_sklev = 0;
skill_type max_skill = SK_NONE;
for (int i = 0; i < total_weapons; ++i)
{
if (you.skills[combined_weapon_skills[i]] >= max_sklev)
{
max_skill = combined_weapon_skills[i];
max_sklev = you.skills[max_skill];
}
}
skill = max_skill;
}
switch (skill)
{
case SK_MACES_FLAILS:
case SK_AXES:
case SK_POLEARMS:
case SK_THROWING:
case SK_SHORT_BLADES:
case SK_LONG_BLADES:
case SK_BOWS:
case SK_STAVES:
case SK_CROSSBOWS:
_give_wanderer_weapon(skill, 2);
break;
case SK_ARMOUR:
newgame_make_item(OBJ_ARMOUR, ARM_SCALE_MAIL, 1, 2);
break;
case SK_DODGING:
// +2 leather armour or +0 leather armour and also 2-4 nets
if (coinflip())
newgame_make_item(OBJ_ARMOUR, ARM_LEATHER_ARMOUR, 1, 2);
else
{
newgame_make_item(OBJ_ARMOUR, ARM_LEATHER_ARMOUR);
newgame_make_item(OBJ_MISSILES, MI_THROWING_NET, 2 + random2(3));
}
break;
case SK_STEALTH:
// +2 dagger and a good consumable
newgame_make_item(OBJ_WEAPONS, WPN_DAGGER, 1, 2);
_good_potion_or_scroll();
break;
case SK_SHIELDS:
newgame_make_item(OBJ_ARMOUR, ARM_SHIELD);
break;
case SK_SPELLCASTING:
case SK_CONJURATIONS:
case SK_SUMMONINGS:
case SK_NECROMANCY:
case SK_TRANSLOCATIONS:
case SK_TRANSMUTATIONS:
case SK_FIRE_MAGIC:
case SK_ICE_MAGIC:
case SK_AIR_MAGIC:
case SK_EARTH_MAGIC:
case SK_POISON_MAGIC:
case SK_HEXES:
case SK_CHARMS:
_give_wanderer_book(skill);
break;
case SK_UNARMED_COMBAT:
{
// 2 random good potions/scrolls
_good_potion_or_scroll();
_good_potion_or_scroll();
break;
}
case SK_EVOCATIONS:
// Random wand
_wanderer_random_evokable();
break;
default:
break;
}
}
static void _wanderer_decent_equipment(skill_type & skill,
set<skill_type> & gift_skills)
{
const skill_type combined_weapon_skills[] =
{ SK_AXES, SK_MACES_FLAILS, SK_BOWS, SK_CROSSBOWS,
SK_SHORT_BLADES, SK_LONG_BLADES, SK_STAVES, SK_UNARMED_COMBAT,
SK_POLEARMS };
int total_weapons = ARRAYSZ(combined_weapon_skills);
// If fighting comes up, give something from the highest weapon
// skill.
if (skill == SK_FIGHTING)
{
int max_sklev = 0;
skill_type max_skill = SK_NONE;
for (int i = 0; i < total_weapons; ++i)
{
if (you.skills[combined_weapon_skills[i]] >= max_sklev)
{
max_skill = combined_weapon_skills[i];
max_sklev = you.skills[max_skill];
}
}
skill = max_skill;
}
// Don't give a gift from the same skill twice; just default to
// a decent consumable
if (gift_skills.count(skill))
skill = SK_NONE;
switch (skill)
{
case SK_MACES_FLAILS:
case SK_AXES:
case SK_POLEARMS:
case SK_BOWS:
case SK_CROSSBOWS:
case SK_THROWING:
case SK_STAVES:
case SK_SHORT_BLADES:
case SK_LONG_BLADES:
_give_wanderer_weapon(skill, 0);
break;
case SK_ARMOUR:
newgame_make_item(OBJ_ARMOUR, ARM_RING_MAIL);
break;
case SK_SHIELDS:
newgame_make_item(OBJ_ARMOUR, ARM_BUCKLER);
break;
case SK_SPELLCASTING:
case SK_CONJURATIONS:
case SK_SUMMONINGS:
case SK_NECROMANCY:
case SK_TRANSLOCATIONS:
case SK_TRANSMUTATIONS:
case SK_FIRE_MAGIC:
case SK_ICE_MAGIC:
case SK_AIR_MAGIC:
case SK_EARTH_MAGIC:
case SK_POISON_MAGIC:
_give_wanderer_minor_book(skill);
break;
case SK_EVOCATIONS:
newgame_make_item(OBJ_WANDS, WAND_RANDOM_EFFECTS, 1, 15);
break;
case SK_DODGING:
case SK_STEALTH:
case SK_UNARMED_COMBAT:
case SK_NONE:
_decent_potion_or_scroll();
break;
default:
break;
}
}
// We don't actually want to send adventurers wandering naked into the
// dungeon.
static void _wanderer_cover_equip_holes()
{
if (you.equip[EQ_BODY_ARMOUR] == -1)
newgame_make_item(OBJ_ARMOUR, ARM_ROBE);
if (you.equip[EQ_WEAPON] == -1)
{
newgame_make_item(OBJ_WEAPONS,
you.dex() > you.strength() ? WPN_DAGGER : WPN_CLUB);
}
// Give a dagger if you have stealth skill. Maybe this is
// unnecessary?
if (you.skills[SK_STEALTH] > 1)
{
bool has_dagger = false;
for (const item_def& i : you.inv)
{
if (i.is_type(OBJ_WEAPONS, WPN_DAGGER))
{
has_dagger = true;
break;
}
}
if (!has_dagger)
newgame_make_item(OBJ_WEAPONS, WPN_DAGGER);
}
}
// New style wanderers are supposed to be decent in terms of skill
// levels/equipment, but pretty randomised.
void create_wanderer()
{
// intentionally create the subgenerator either way, so that this has the
// same impact on the current main rng for all chars.
rng::subgenerator wn_rng;
if (you.char_class != JOB_WANDERER)
return;
// Decide what our character roles are.
stat_type primary_role = _wanderer_choose_role();
stat_type secondary_role = _wanderer_choose_role();
// Regardless of roles, players get a couple levels in these skills.
const skill_type util_skills[] =
{ SK_THROWING, SK_STEALTH, SK_SHIELDS, SK_EVOCATIONS };
int util_size = ARRAYSZ(util_skills);
// Maybe too many skill levels, given the level 1 floor on skill
// levels for wanderers?
int primary_skill_levels = 5;
int secondary_skill_levels = 3;
// Allocate main skill levels.
_wanderer_role_skill(primary_role, primary_skill_levels);
_wanderer_role_skill(secondary_role, secondary_skill_levels);
skill_type util_skill1 = _apt_weighted_choice(util_skills, util_size);
skill_type util_skill2 = _apt_weighted_choice(util_skills, util_size);
// And a couple levels of utility skills.
you.skills[util_skill1]++;
you.skills[util_skill2]++;
// Keep track of what skills we got items from, mostly to prevent
// giving a good and then a normal version of the same weapon.
set<skill_type> gift_skills;
// Wanderers get 1 good thing, a couple average things, and then
// 1 last stage to fill any glaring equipment holes (no clothes,
// etc.).
skill_type good_equipment = _weighted_skill_roll();
// The first of these goes through the whole role/aptitude weighting
// thing again. It's quite possible that this will give something
// we have no skill in.
const stat_type selected_role = one_chance_in(3) ? secondary_role
: primary_role;
const skill_type sk_1 = _wanderer_role_weapon_select(selected_role);
skill_type sk_2 = SK_NONE;
if (selected_role == STAT_INT)
sk_2 = _wanderer_role_weapon_select(selected_role);
skill_type decent_1 = _wanderer_role_skill_select(selected_role,
sk_1, sk_2);
skill_type decent_2 = _weighted_skill_roll();
_wanderer_good_equipment(good_equipment);
gift_skills.insert(good_equipment);
_wanderer_decent_equipment(decent_1, gift_skills);
gift_skills.insert(decent_1);
_wanderer_decent_equipment(decent_2, gift_skills);
gift_skills.insert(decent_2);
_wanderer_cover_equip_holes();
}
void memorise_wanderer_spell()
{
// If the player got only one level 1 spell, memorise it. Otherwise, let the
// player choose which spell(s) to memorise and don't memorise any.
auto const available_spells = get_sorted_spell_list(true, true);
if (available_spells.size())
{
int num_level_one_spells = 0;
spell_type which_spell;
for (spell_type spell : available_spells)
if (spell_difficulty(spell) == 1)
{
num_level_one_spells += 1;
which_spell = spell;
}
if (num_level_one_spells == 1)
add_spell_to_memory(which_spell);
}
}
|
/****************************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++98, c++03
// <forward_list>
// template <class... Args>
// iterator emplace_after(const_iterator p, Args&&... args);
#include <forward_list>
#include <cassert>
#include "libcxx_tc_common.h"
#include "Emplaceable.h"
int tc_libcxx_containers_forwardlist_modifiers_emplace_after(void)
{
{
typedef Emplaceable T;
typedef std::forward_list<T> C;
typedef C::iterator I;
C c;
I i = c.emplace_after(c.cbefore_begin());
TC_ASSERT_EXPR(i == c.begin());
TC_ASSERT_EXPR(c.front() == Emplaceable());
TC_ASSERT_EXPR(distance(c.begin(), c.end()) == 1);
i = c.emplace_after(c.cbegin(), 1, 2.5);
TC_ASSERT_EXPR(i == next(c.begin()));
TC_ASSERT_EXPR(c.front() == Emplaceable());
TC_ASSERT_EXPR(*next(c.begin()) == Emplaceable(1, 2.5));
TC_ASSERT_EXPR(distance(c.begin(), c.end()) == 2);
i = c.emplace_after(next(c.cbegin()), 2, 3.5);
TC_ASSERT_EXPR(i == next(c.begin(), 2));
TC_ASSERT_EXPR(c.front() == Emplaceable());
TC_ASSERT_EXPR(*next(c.begin()) == Emplaceable(1, 2.5));
TC_ASSERT_EXPR(*next(c.begin(), 2) == Emplaceable(2, 3.5));
TC_ASSERT_EXPR(distance(c.begin(), c.end()) == 3);
i = c.emplace_after(c.cbegin(), 3, 4.5);
TC_ASSERT_EXPR(i == next(c.begin()));
TC_ASSERT_EXPR(c.front() == Emplaceable());
TC_ASSERT_EXPR(*next(c.begin(), 1) == Emplaceable(3, 4.5));
TC_ASSERT_EXPR(*next(c.begin(), 2) == Emplaceable(1, 2.5));
TC_ASSERT_EXPR(*next(c.begin(), 3) == Emplaceable(2, 3.5));
TC_ASSERT_EXPR(distance(c.begin(), c.end()) == 4);
}
TC_SUCCESS_RESULT();
return 0;
}
|
/**
* @file VertexSet_test.cpp
* @brief Unit tests for the VertexSet class.
* @author Dominique LaSalle <dominique@solidlake.com>
* Copyright 2017-2018
* @version 1
* @date 2018-01-14
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "graph/VertexSet.hpp"
#include "solidutils/UnitTest.hpp"
#include <vector>
namespace poros
{
UNITTEST(VertexSet, Size)
{
VertexSet set(0, 10);
testEqual(set.size(), 10U);
}
UNITTEST(VertexSet, Iterator)
{
VertexSet set(0, 10);
vtx_type v = 0;
for (Vertex const & vertex : set) {
testEqual(vertex.index, v);
++v;
}
}
}
|
// Copyright (c) 2014-2016 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Copyright (c) 2017-2020 The Adeptio developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "spork.h"
#include "base58.h"
#include "key.h"
#include "main.h"
#include "masternode-budget.h"
#include "net.h"
#include "protocol.h"
#include "sync.h"
#include "sporkdb.h"
#include "util.h"
#include <boost/lexical_cast.hpp>
using namespace std;
using namespace boost;
class CSporkMessage;
class CSporkManager;
CSporkManager sporkManager;
std::map<uint256, CSporkMessage> mapSporks;
std::map<int, CSporkMessage> mapSporksActive;
// ADE: on startup load spork values from previous session if they exist in the sporkDB
void LoadSporksFromDB()
{
for (int i = SPORK_START; i <= SPORK_END; ++i) {
// Since not all spork IDs are in use, we have to exclude undefined IDs
std::string strSpork = sporkManager.GetSporkNameByID(i);
if (strSpork == "Unknown") continue;
// attempt to read spork from sporkDB
CSporkMessage spork;
if (!pSporkDB->ReadSpork(i, spork)) {
LogPrintf("%s : no previous value for %s found in database\n", __func__, strSpork);
continue;
}
// add spork to memory
mapSporks[spork.GetHash()] = spork;
mapSporksActive[spork.nSporkID] = spork;
std::time_t result = spork.nValue;
// If SPORK Value is greater than 1,000,000 assume it's actually a Date and then convert to a more readable format
if (spork.nValue > 1000000) {
LogPrintf("%s : loaded spork %s with value %d : %s", __func__,
sporkManager.GetSporkNameByID(spork.nSporkID), spork.nValue,
std::ctime(&result));
} else {
LogPrintf("%s : loaded spork %s with value %d\n", __func__,
sporkManager.GetSporkNameByID(spork.nSporkID), spork.nValue);
}
}
}
void ProcessSpork(CNode* pfrom, std::string& strCommand, CDataStream& vRecv)
{
if (fLiteMode) return; //disable all obfuscation/masternode related functionality
if (strCommand == "spork") {
//LogPrintf("ProcessSpork::spork\n");
CDataStream vMsg(vRecv);
CSporkMessage spork;
vRecv >> spork;
if (chainActive.Tip() == NULL) return;
// Ignore spork messages about unknown/deleted sporks
std::string strSpork = sporkManager.GetSporkNameByID(spork.nSporkID);
if (strSpork == "Unknown") return;
uint256 hash = spork.GetHash();
if (mapSporksActive.count(spork.nSporkID)) {
if (mapSporksActive[spork.nSporkID].nTimeSigned >= spork.nTimeSigned) {
if (fDebug) LogPrintf("%s : seen %s block %d \n", __func__, hash.ToString(), chainActive.Tip()->nHeight);
return;
} else {
if (fDebug) LogPrintf("%s : got updated spork %s block %d \n", __func__, hash.ToString(), chainActive.Tip()->nHeight);
}
}
LogPrintf("%s : new %s ID %d Time %d bestHeight %d\n", __func__, hash.ToString(), spork.nSporkID, spork.nValue, chainActive.Tip()->nHeight);
if (spork.nTimeSigned >= Params().NewSporkStart()) {
if (!sporkManager.CheckSignature(spork, true)) {
LogPrintf("%s : Invalid Signature\n", __func__);
Misbehaving(pfrom->GetId(), 100);
return;
}
}
if (!sporkManager.CheckSignature(spork)) {
LogPrintf("%s : Invalid Signature\n", __func__);
Misbehaving(pfrom->GetId(), 100);
return;
}
mapSporks[hash] = spork;
mapSporksActive[spork.nSporkID] = spork;
sporkManager.Relay(spork);
// ADE: add to spork database.
pSporkDB->WriteSpork(spork.nSporkID, spork);
}
if (strCommand == "getsporks") {
std::map<int, CSporkMessage>::iterator it = mapSporksActive.begin();
while (it != mapSporksActive.end()) {
pfrom->PushMessage("spork", it->second);
it++;
}
}
}
// grab the value of the spork on the network, or the default
int64_t GetSporkValue(int nSporkID)
{
int64_t r = -1;
if (mapSporksActive.count(nSporkID)) {
r = mapSporksActive[nSporkID].nValue;
} else {
if (nSporkID == SPORK_2_HYPERSEND) r = SPORK_2_HYPERSEND_DEFAULT;
if (nSporkID == SPORK_3_HYPERSEND_BLOCK_FILTERING) r = SPORK_3_HYPERSEND_BLOCK_FILTERING_DEFAULT;
if (nSporkID == SPORK_5_MAX_VALUE) r = SPORK_5_MAX_VALUE_DEFAULT;
if (nSporkID == SPORK_7_MASTERNODE_SCANNING) r = SPORK_7_MASTERNODE_SCANNING_DEFAULT;
if (nSporkID == SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT) r = SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT_DEFAULT;
if (nSporkID == SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT) r = SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT_DEFAULT;
if (nSporkID == SPORK_10_MASTERNODE_PAY_UPDATED_NODES) r = SPORK_10_MASTERNODE_PAY_UPDATED_NODES_DEFAULT;
if (nSporkID == SPORK_13_ENABLE_SUPERBLOCKS) r = SPORK_13_ENABLE_SUPERBLOCKS_DEFAULT;
if (nSporkID == SPORK_14_NEW_PROTOCOL_ENFORCEMENT) r = SPORK_14_NEW_PROTOCOL_ENFORCEMENT_DEFAULT;
if (nSporkID == SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2) r = SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2_DEFAULT;
if (nSporkID == SPORK_16_ZEROCOIN_MAINTENANCE_MODE) r = SPORK_16_ZEROCOIN_MAINTENANCE_MODE_DEFAULT;
if (r == -1) LogPrintf("%s : Unknown Spork %d\n", __func__, nSporkID);
}
return r;
}
// grab the spork value, and see if it's off
bool IsSporkActive(int nSporkID)
{
int64_t r = GetSporkValue(nSporkID);
if (r == -1) return false;
return r < GetTime();
}
void ReprocessBlocks(int nBlocks)
{
std::map<uint256, int64_t>::iterator it = mapRejectedBlocks.begin();
while (it != mapRejectedBlocks.end()) {
//use a window twice as large as is usual for the nBlocks we want to reset
if ((*it).second > GetTime() - (nBlocks * 60 * 5)) {
BlockMap::iterator mi = mapBlockIndex.find((*it).first);
if (mi != mapBlockIndex.end() && (*mi).second) {
LOCK(cs_main);
CBlockIndex* pindex = (*mi).second;
LogPrintf("ReprocessBlocks - %s\n", (*it).first.ToString());
CValidationState state;
ReconsiderBlock(state, pindex);
}
}
++it;
}
CValidationState state;
{
LOCK(cs_main);
DisconnectBlocksAndReprocess(nBlocks);
}
if (state.IsValid()) {
ActivateBestChain(state);
}
}
bool CSporkManager::CheckSignature(CSporkMessage& spork, bool fCheckSigner)
{
//note: need to investigate why this is failing
std::string strMessage = boost::lexical_cast<std::string>(spork.nSporkID) + boost::lexical_cast<std::string>(spork.nValue) + boost::lexical_cast<std::string>(spork.nTimeSigned);
CPubKey pubkeynew(ParseHex(Params().SporkKey()));
std::string errorMessage = "";
bool fValidWithNewKey = obfuScationSigner.VerifyMessage(pubkeynew, spork.vchSig,strMessage, errorMessage);
if (fCheckSigner && !fValidWithNewKey)
return false;
// See if window is open that allows for old spork key to sign messages
if (!fValidWithNewKey && GetAdjustedTime() < Params().RejectOldSporkKey()) {
CPubKey pubkeyold(ParseHex(Params().SporkKeyOld()));
return obfuScationSigner.VerifyMessage(pubkeyold, spork.vchSig, strMessage, errorMessage);
}
return fValidWithNewKey;
}
bool CSporkManager::Sign(CSporkMessage& spork)
{
std::string strMessage = boost::lexical_cast<std::string>(spork.nSporkID) + boost::lexical_cast<std::string>(spork.nValue) + boost::lexical_cast<std::string>(spork.nTimeSigned);
CKey key2;
CPubKey pubkey2;
std::string errorMessage = "";
if (!obfuScationSigner.SetKey(strMasterPrivKey, errorMessage, key2, pubkey2)) {
LogPrintf("CMasternodePayments::Sign - ERROR: Invalid masternodeprivkey: '%s'\n", errorMessage);
return false;
}
if (!obfuScationSigner.SignMessage(strMessage, errorMessage, spork.vchSig, key2)) {
LogPrintf("CMasternodePayments::Sign - Sign message failed");
return false;
}
if (!obfuScationSigner.VerifyMessage(pubkey2, spork.vchSig, strMessage, errorMessage)) {
LogPrintf("CMasternodePayments::Sign - Verify message failed");
return false;
}
return true;
}
bool CSporkManager::UpdateSpork(int nSporkID, int64_t nValue)
{
CSporkMessage msg;
msg.nSporkID = nSporkID;
msg.nValue = nValue;
msg.nTimeSigned = GetTime();
if (Sign(msg)) {
Relay(msg);
mapSporks[msg.GetHash()] = msg;
mapSporksActive[nSporkID] = msg;
return true;
}
return false;
}
void CSporkManager::Relay(CSporkMessage& msg)
{
CInv inv(MSG_SPORK, msg.GetHash());
RelayInv(inv);
}
bool CSporkManager::SetPrivKey(std::string strPrivKey)
{
CSporkMessage msg;
// Test signing successful, proceed
strMasterPrivKey = strPrivKey;
Sign(msg);
if (CheckSignature(msg, true)) {
LogPrintf("CSporkManager::SetPrivKey - Successfully initialized as spork signer\n");
return true;
} else {
return false;
}
}
int CSporkManager::GetSporkIDByName(std::string strName)
{
if (strName == "SPORK_2_HYPERSEND") return SPORK_2_HYPERSEND;
if (strName == "SPORK_3_HYPERSEND_BLOCK_FILTERING") return SPORK_3_HYPERSEND_BLOCK_FILTERING;
if (strName == "SPORK_5_MAX_VALUE") return SPORK_5_MAX_VALUE;
if (strName == "SPORK_7_MASTERNODE_SCANNING") return SPORK_7_MASTERNODE_SCANNING;
if (strName == "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT") return SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT;
if (strName == "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT") return SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT;
if (strName == "SPORK_10_MASTERNODE_PAY_UPDATED_NODES") return SPORK_10_MASTERNODE_PAY_UPDATED_NODES;
if (strName == "SPORK_13_ENABLE_SUPERBLOCKS") return SPORK_13_ENABLE_SUPERBLOCKS;
if (strName == "SPORK_14_NEW_PROTOCOL_ENFORCEMENT") return SPORK_14_NEW_PROTOCOL_ENFORCEMENT;
if (strName == "SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2") return SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2;
if (strName == "SPORK_16_ZEROCOIN_MAINTENANCE_MODE") return SPORK_16_ZEROCOIN_MAINTENANCE_MODE;
return -1;
}
std::string CSporkManager::GetSporkNameByID(int id)
{
if (id == SPORK_2_HYPERSEND) return "SPORK_2_HYPERSEND";
if (id == SPORK_3_HYPERSEND_BLOCK_FILTERING) return "SPORK_3_HYPERSEND_BLOCK_FILTERING";
if (id == SPORK_5_MAX_VALUE) return "SPORK_5_MAX_VALUE";
if (id == SPORK_7_MASTERNODE_SCANNING) return "SPORK_7_MASTERNODE_SCANNING";
if (id == SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT) return "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT";
if (id == SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT) return "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT";
if (id == SPORK_10_MASTERNODE_PAY_UPDATED_NODES) return "SPORK_10_MASTERNODE_PAY_UPDATED_NODES";
if (id == SPORK_13_ENABLE_SUPERBLOCKS) return "SPORK_13_ENABLE_SUPERBLOCKS";
if (id == SPORK_14_NEW_PROTOCOL_ENFORCEMENT) return "SPORK_14_NEW_PROTOCOL_ENFORCEMENT";
if (id == SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2) return "SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2";
if (id == SPORK_16_ZEROCOIN_MAINTENANCE_MODE) return "SPORK_16_ZEROCOIN_MAINTENANCE_MODE";
return "Unknown";
}
|
#include <graphene/chain/wasm_constraints.hpp>
#include <graphene/chain/wasm_validation.hpp>
#include <graphene/chain/wasm_binary_ops.hpp>
#include <fc/exception/exception.hpp>
#include <graphene/chain/exceptions.hpp>
#include "IR/Module.h"
#include "IR/Operators.h"
#include "WASM/WASM.h"
namespace graphene { namespace chain { namespace wasm_validations {
using namespace IR;
void noop_validation_visitor::validate( const Module& m ) {
// just pass
}
void memories_validation_visitor::validate( const Module& m ) {
if ( m.memories.defs.size() && m.memories.defs[0].type.size.min > wasm_constraints::maximum_linear_memory/(64*1024) )
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract initial memory size must be less than or equal to ${k}KiB",
("k", wasm_constraints::maximum_linear_memory/1024));
}
void data_segments_validation_visitor::validate(const Module& m ) {
for ( const DataSegment& ds : m.dataSegments ) {
if ( ds.baseOffset.type != InitializerExpression::Type::i32_const )
FC_THROW_EXCEPTION( wasm_execution_error, "Smart contract has unexpected memory base offset type" );
if ( static_cast<uint32_t>( ds.baseOffset.i32 ) + ds.data.size() > wasm_constraints::maximum_linear_memory_init )
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract data segments must lie in first ${k}KiB",
("k", wasm_constraints::maximum_linear_memory_init/1024));
}
}
void tables_validation_visitor::validate( const Module& m ) {
if ( m.tables.defs.size() && m.tables.defs[0].type.size.min > wasm_constraints::maximum_table_elements )
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract table limited to ${t} elements",
("t", wasm_constraints::maximum_table_elements));
}
void globals_validation_visitor::validate( const Module& m ) {
unsigned mutable_globals_total_size = 0;
for(const GlobalDef& global_def : m.globals.defs) {
if(!global_def.type.isMutable)
continue;
switch(global_def.type.valueType) {
case ValueType::any:
case ValueType::num:
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract has unexpected global definition value type");
case ValueType::i64:
case ValueType::f64:
mutable_globals_total_size += 4;
case ValueType::i32:
case ValueType::f32:
mutable_globals_total_size += 4;
}
}
if(mutable_globals_total_size > wasm_constraints::maximum_mutable_globals)
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract has more than ${k} bytes of mutable globals",
("k", wasm_constraints::maximum_mutable_globals));
}
void maximum_function_stack_visitor::validate( const IR::Module& m ) {
for(const FunctionDef& func : m.functions.defs) {
unsigned function_stack_usage = 0;
for(const ValueType& local : func.nonParameterLocalTypes)
function_stack_usage += getTypeBitWidth(local)/8;
for(const ValueType& params : m.types[func.type.index]->parameters)
function_stack_usage += getTypeBitWidth(params)/8;
if(function_stack_usage > wasm_constraints::maximum_func_local_bytes)
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract function has more than ${k} bytes of stack usage",
("k", wasm_constraints::maximum_func_local_bytes));
}
}
void ensure_apply_exported_visitor::validate( const IR::Module& m ) {
bool found_it = false;
for(const Export& exprt : m.exports) {
if(exprt.kind != ObjectKind::function)
continue;
if(exprt.name != "apply")
continue;
if(m.types[m.functions.getType(exprt.index).index] == FunctionType::get(ResultType::none, {ValueType::i64, ValueType::i64, ValueType::i64})) {
found_it = true;
break;
}
}
if(!found_it)
FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract's apply function not exported; non-existent; or wrong type");
}
uint16_t nested_validator::depth = 0;
bool nested_validator::disabled = false;
}}} // namespace graphene chain validation
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 Bitcoin developers
// Copyright (c) 2014-2015 Dash developers
// Copyright (c) 2015-2017 PIVX developers
// Copyright (c) 2018-2019 Genero developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/genero-config.h"
#endif
#include "net.h"
#include "addrman.h"
#include "chainparams.h"
#include "clientversion.h"
#include "miner.h"
#include "Darksend.h"
#include "primitives/transaction.h"
#include "ui_interface.h"
#include "wallet.h"
#ifdef WIN32
#include <string.h>
#else
#include <fcntl.h>
#endif
#ifdef USE_UPNP
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/miniwget.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
#endif
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
// Dump addresses to peers.dat every 15 minutes (900s)
#define DUMP_ADDRESSES_INTERVAL 900
#if !defined(HAVE_MSG_NOSIGNAL) && !defined(MSG_NOSIGNAL)
#define MSG_NOSIGNAL 0
#endif
// Fix for ancient MinGW versions, that don't have defined these in ws2tcpip.h.
// Todo: Can be removed when our pull-tester is upgraded to a modern MinGW version.
#ifdef WIN32
#ifndef PROTECTION_LEVEL_UNRESTRICTED
#define PROTECTION_LEVEL_UNRESTRICTED 10
#endif
#ifndef IPV6_PROTECTION_LEVEL
#define IPV6_PROTECTION_LEVEL 23
#endif
#endif
using namespace boost;
using namespace std;
namespace
{
const int MAX_OUTBOUND_CONNECTIONS = 16;
struct ListenSocket {
SOCKET socket;
bool whitelisted;
ListenSocket(SOCKET socket, bool whitelisted) : socket(socket), whitelisted(whitelisted) {}
};
}
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
uint64_t nLocalServices = NODE_NETWORK;
CCriticalSection cs_mapLocalHost;
map<CNetAddr, LocalServiceInfo> mapLocalHost;
static bool vfReachable[NET_MAX] = {};
static bool vfLimited[NET_MAX] = {};
static CNode* pnodeLocalHost = NULL;
uint64_t nLocalHostNonce = 0;
static std::vector<ListenSocket> vhListenSocket;
CAddrMan addrman;
int nMaxConnections = DEFAULT_MAX_PEER_CONNECTIONS;
bool fAddressesInitialized = false;
vector<CNode*> vNodes;
CCriticalSection cs_vNodes;
map<CInv, CDataStream> mapRelay;
deque<pair<int64_t, CInv> > vRelayExpiration;
CCriticalSection cs_mapRelay;
limitedmap<CInv, int64_t> mapAlreadyAskedFor(MAX_INV_SZ);
static deque<string> vOneShots;
CCriticalSection cs_vOneShots;
set<CNetAddr> setservAddNodeAddresses;
CCriticalSection cs_setservAddNodeAddresses;
vector<std::string> vAddedNodes;
CCriticalSection cs_vAddedNodes;
NodeId nLastNodeId = 0;
CCriticalSection cs_nLastNodeId;
static CSemaphore* semOutbound = NULL;
boost::condition_variable messageHandlerCondition;
// Signals for message handling
static CNodeSignals g_signals;
CNodeSignals& GetNodeSignals() { return g_signals; }
void AddOneShot(string strDest)
{
LOCK(cs_vOneShots);
vOneShots.push_back(strDest);
}
unsigned short GetListenPort()
{
return (unsigned short)(GetArg("-port", Params().GetDefaultPort()));
}
// find 'best' local address for a particular peer
bool GetLocal(CService& addr, const CNetAddr* paddrPeer)
{
if (!fListen)
return false;
int nBestScore = -1;
int nBestReachability = -1;
{
LOCK(cs_mapLocalHost);
for (map<CNetAddr, LocalServiceInfo>::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++) {
int nScore = (*it).second.nScore;
int nReachability = (*it).first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) {
addr = CService((*it).first, (*it).second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
}
}
return nBestScore >= 0;
}
// get best local address for a particular peer as a CAddress
// Otherwise, return the unroutable 0.0.0.0 but filled in with
// the normal parameters, since the IP may be changed to a useful
// one by discovery.
CAddress GetLocalAddress(const CNetAddr* paddrPeer)
{
CAddress ret(CService("0.0.0.0", GetListenPort()), 0);
CService addr;
if (GetLocal(addr, paddrPeer)) {
ret = CAddress(addr);
}
ret.nServices = nLocalServices;
ret.nTime = GetAdjustedTime();
return ret;
}
bool RecvLine(SOCKET hSocket, string& strLine)
{
strLine = "";
while (true) {
char c;
int nBytes = recv(hSocket, &c, 1, 0);
if (nBytes > 0) {
if (c == '\n')
continue;
if (c == '\r')
return true;
strLine += c;
if (strLine.size() >= 9000)
return true;
} else if (nBytes <= 0) {
boost::this_thread::interruption_point();
if (nBytes < 0) {
int nErr = WSAGetLastError();
if (nErr == WSAEMSGSIZE)
continue;
if (nErr == WSAEWOULDBLOCK || nErr == WSAEINTR || nErr == WSAEINPROGRESS) {
MilliSleep(10);
continue;
}
}
if (!strLine.empty())
return true;
if (nBytes == 0) {
// socket closed
LogPrint("net", "socket closed\n");
return false;
} else {
// socket error
int nErr = WSAGetLastError();
LogPrint("net", "recv failed: %s\n", NetworkErrorString(nErr));
return false;
}
}
}
}
int GetnScore(const CService& addr)
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == LOCAL_NONE)
return 0;
return mapLocalHost[addr].nScore;
}
// Is our peer's addrLocal potentially useful as an external IP source?
bool IsPeerAddrLocalGood(CNode* pnode)
{
return fDiscover && pnode->addr.IsRoutable() && pnode->addrLocal.IsRoutable() &&
!IsLimited(pnode->addrLocal.GetNetwork());
}
// pushes our own address to a peer
void AdvertizeLocal(CNode* pnode)
{
if (fListen && pnode->fSuccessfullyConnected) {
CAddress addrLocal = GetLocalAddress(&pnode->addr);
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() ||
GetRand((GetnScore(addrLocal) > LOCAL_MANUAL) ? 8 : 2) == 0)) {
addrLocal.SetIP(pnode->addrLocal);
}
if (addrLocal.IsRoutable()) {
pnode->PushAddress(addrLocal);
}
}
}
void SetReachable(enum Network net, bool fFlag)
{
LOCK(cs_mapLocalHost);
vfReachable[net] = fFlag;
if (net == NET_IPV6 && fFlag)
vfReachable[NET_IPV4] = true;
}
// learn a new local address
bool AddLocal(const CService& addr, int nScore)
{
if (!addr.IsRoutable())
return false;
if (!fDiscover && nScore < LOCAL_MANUAL)
return false;
if (IsLimited(addr))
return false;
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
LOCK(cs_mapLocalHost);
bool fAlready = mapLocalHost.count(addr) > 0;
LocalServiceInfo& info = mapLocalHost[addr];
if (!fAlready || nScore >= info.nScore) {
info.nScore = nScore + (fAlready ? 1 : 0);
info.nPort = addr.GetPort();
}
SetReachable(addr.GetNetwork());
}
return true;
}
bool AddLocal(const CNetAddr& addr, int nScore)
{
return AddLocal(CService(addr, GetListenPort()), nScore);
}
/** Make a particular network entirely off-limits (no automatic connects to it) */
void SetLimited(enum Network net, bool fLimited)
{
if (net == NET_UNROUTABLE)
return;
LOCK(cs_mapLocalHost);
vfLimited[net] = fLimited;
}
bool IsLimited(enum Network net)
{
LOCK(cs_mapLocalHost);
return vfLimited[net];
}
bool IsLimited(const CNetAddr& addr)
{
return IsLimited(addr.GetNetwork());
}
/** vote for a local address */
bool SeenLocal(const CService& addr)
{
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == 0)
return false;
mapLocalHost[addr].nScore++;
}
return true;
}
/** check whether a given address is potentially local */
bool IsLocal(const CService& addr)
{
LOCK(cs_mapLocalHost);
return mapLocalHost.count(addr) > 0;
}
/** check whether a given network is one we can probably connect to */
bool IsReachable(enum Network net)
{
LOCK(cs_mapLocalHost);
return vfReachable[net] && !vfLimited[net];
}
/** check whether a given address is in a network we can probably connect to */
bool IsReachable(const CNetAddr& addr)
{
enum Network net = addr.GetNetwork();
return IsReachable(net);
}
void AddressCurrentlyConnected(const CService& addr)
{
addrman.Connected(addr);
}
uint64_t CNode::nTotalBytesRecv = 0;
uint64_t CNode::nTotalBytesSent = 0;
CCriticalSection CNode::cs_totalBytesRecv;
CCriticalSection CNode::cs_totalBytesSent;
CNode* FindNode(const CNetAddr& ip)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
if ((CNetAddr)pnode->addr == ip)
return (pnode);
return NULL;
}
CNode* FindNode(const std::string& addrName)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
if (pnode->addrName == addrName)
return (pnode);
return NULL;
}
CNode* FindNode(const CService& addr)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (Params().NetworkID() == CBaseChainParams::REGTEST) {
//if using regtest, just check the IP
if ((CNetAddr)pnode->addr == (CNetAddr)addr)
return (pnode);
} else {
if (pnode->addr == addr)
return (pnode);
}
}
return NULL;
}
CNode* ConnectNode(CAddress addrConnect, const char* pszDest, bool DarKsendMaster)
{
if (pszDest == NULL) {
// we clean masternode connections in CMasternodeMan::ProcessMasternodeConnections()
// so should be safe to skip this and connect to local Hot MN on CActiveMasternode::ManageStatus()
if (IsLocal(addrConnect) && !DarKsendMaster)
return NULL;
// Look for an existing connection
CNode* pnode = FindNode((CService)addrConnect);
if (pnode) {
pnode->fDarKsendMaster = DarKsendMaster;
pnode->AddRef();
return pnode;
}
}
/// debug print
LogPrint("net", "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0);
// Connect
SOCKET hSocket;
bool proxyConnectionFailed = false;
if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, Params().GetDefaultPort(), nConnectTimeout, &proxyConnectionFailed) :
ConnectSocket(addrConnect, hSocket, nConnectTimeout, &proxyConnectionFailed)) {
if (!IsSelectableSocket(hSocket)) {
LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n");
CloseSocket(hSocket);
return NULL;
}
addrman.Attempt(addrConnect);
// Add node
CNode* pnode = new CNode(hSocket, addrConnect, pszDest ? pszDest : "", false);
pnode->AddRef();
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
pnode->nTimeConnected = GetTime();
if (DarKsendMaster) pnode->fDarKsendMaster = true;
return pnode;
} else if (!proxyConnectionFailed) {
// If connecting to the node failed, and failure is not caused by a problem connecting to
// the proxy, mark this as an attempt.
addrman.Attempt(addrConnect);
}
return NULL;
}
void CNode::CloseSocketDisconnect()
{
fDisconnect = true;
if (hSocket != INVALID_SOCKET) {
LogPrint("net", "disconnecting peer=%d\n", id);
CloseSocket(hSocket);
}
// in case this fails, we'll empty the recv buffer when the CNode is deleted
TRY_LOCK(cs_vRecvMsg, lockRecv);
if (lockRecv)
vRecvMsg.clear();
}
bool CNode::DisconnectOldProtocol(int nVersionRequired, string strLastCommand)
{
fDisconnect = false;
if (nVersion < nVersionRequired) {
LogPrintf("%s : peer=%d using obsolete version %i; disconnecting\n", __func__, id, nVersion);
PushMessage("reject", strLastCommand, REJECT_OBSOLETE, strprintf("Version must be %d or greater", ActiveProtocol()));
fDisconnect = true;
}
return fDisconnect;
}
void CNode::PushVersion()
{
int nBestHeight = g_signals.GetHeight().get_value_or(0);
/// when NTP implemented, change to just nTime = GetAdjustedTime()
int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0", 0)));
CAddress addrMe = GetLocalAddress(&addr);
GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
if (fLogIPs)
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), addrYou.ToString(), id);
else
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), id);
PushMessage("version", PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe,
nLocalHostNonce, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<string>()), nBestHeight, true);
}
std::map<CNetAddr, int64_t> CNode::setBanned;
CCriticalSection CNode::cs_setBanned;
void CNode::ClearBanned()
{
setBanned.clear();
}
bool CNode::IsBanned(CNetAddr ip)
{
bool fResult = false;
{
LOCK(cs_setBanned);
std::map<CNetAddr, int64_t>::iterator i = setBanned.find(ip);
if (i != setBanned.end()) {
int64_t t = (*i).second;
if (GetTime() < t)
fResult = true;
}
}
return fResult;
}
bool CNode::Ban(const CNetAddr& addr)
{
int64_t banTime = GetTime() + GetArg("-bantime", 60 * 60 * 24); // Default 24-hour ban
{
LOCK(cs_setBanned);
if (setBanned[addr] < banTime)
setBanned[addr] = banTime;
}
return true;
}
std::vector<CSubNet> CNode::vWhitelistedRange;
CCriticalSection CNode::cs_vWhitelistedRange;
bool CNode::IsWhitelistedRange(const CNetAddr& addr)
{
LOCK(cs_vWhitelistedRange);
BOOST_FOREACH (const CSubNet& subnet, vWhitelistedRange) {
if (subnet.Match(addr))
return true;
}
return false;
}
void CNode::AddWhitelistedRange(const CSubNet& subnet)
{
LOCK(cs_vWhitelistedRange);
vWhitelistedRange.push_back(subnet);
}
#undef X
#define X(name) stats.name = name
void CNode::copyStats(CNodeStats& stats)
{
stats.nodeid = this->GetId();
X(nServices);
X(nLastSend);
X(nLastRecv);
X(nTimeConnected);
X(addrName);
X(nVersion);
X(cleanSubVer);
X(fInbound);
X(nStartingHeight);
X(nSendBytes);
X(nRecvBytes);
X(fWhitelisted);
// It is common for nodes with good ping times to suddenly become lagged,
// due to a new block arriving or other large transfer.
// Merely reporting pingtime might fool the caller into thinking the node was still responsive,
// since pingtime does not update until the ping is complete, which might take a while.
// So, if a ping is taking an unusually long time in flight,
// the caller can immediately detect that this is happening.
int64_t nPingUsecWait = 0;
if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) {
nPingUsecWait = GetTimeMicros() - nPingUsecStart;
}
// Raw ping time is in microseconds, but show it to user as whole seconds (GNRO users should be well used to small numbers with many decimal places by now :)
stats.dPingTime = (((double)nPingUsecTime) / 1e6);
stats.dPingWait = (((double)nPingUsecWait) / 1e6);
// Leave string empty if addrLocal invalid (not filled in yet)
stats.addrLocal = addrLocal.IsValid() ? addrLocal.ToString() : "";
}
#undef X
// requires LOCK(cs_vRecvMsg)
bool CNode::ReceiveMsgBytes(const char* pch, unsigned int nBytes)
{
while (nBytes > 0) {
// get current incomplete message, or create a new one
if (vRecvMsg.empty() ||
vRecvMsg.back().complete())
vRecvMsg.push_back(CNetMessage(SER_NETWORK, nRecvVersion));
CNetMessage& msg = vRecvMsg.back();
// absorb network data
int handled;
if (!msg.in_data)
handled = msg.readHeader(pch, nBytes);
else
handled = msg.readData(pch, nBytes);
if (handled < 0)
return false;
if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
LogPrint("net", "Oversized message from peer=%i, disconnecting", GetId());
return false;
}
pch += handled;
nBytes -= handled;
if (msg.complete()) {
msg.nTime = GetTimeMicros();
messageHandlerCondition.notify_one();
}
}
return true;
}
int CNetMessage::readHeader(const char* pch, unsigned int nBytes)
{
// copy data to temporary parsing buffer
unsigned int nRemaining = 24 - nHdrPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
memcpy(&hdrbuf[nHdrPos], pch, nCopy);
nHdrPos += nCopy;
// if header incomplete, exit
if (nHdrPos < 24)
return nCopy;
// deserialize to CMessageHeader
try {
hdrbuf >> hdr;
} catch (const std::exception&) {
return -1;
}
// reject messages larger than MAX_SIZE
if (hdr.nMessageSize > MAX_SIZE)
return -1;
// switch state to reading message data
in_data = true;
return nCopy;
}
int CNetMessage::readData(const char* pch, unsigned int nBytes)
{
unsigned int nRemaining = hdr.nMessageSize - nDataPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
if (vRecv.size() < nDataPos + nCopy) {
// Allocate up to 256 KiB ahead, but never more than the total message size.
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
return nCopy;
}
// requires LOCK(cs_vSend)
void SocketSendData(CNode* pnode)
{
std::deque<CSerializeData>::iterator it = pnode->vSendMsg.begin();
while (it != pnode->vSendMsg.end()) {
const CSerializeData& data = *it;
assert(data.size() > pnode->nSendOffset);
int nBytes = send(pnode->hSocket, &data[pnode->nSendOffset], data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
if (nBytes > 0) {
pnode->nLastSend = GetTime();
pnode->nSendBytes += nBytes;
pnode->nSendOffset += nBytes;
pnode->RecordBytesSent(nBytes);
if (pnode->nSendOffset == data.size()) {
pnode->nSendOffset = 0;
pnode->nSendSize -= data.size();
it++;
} else {
// could not send full message; stop sending more
break;
}
} else {
if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
LogPrintf("socket send error %s\n", NetworkErrorString(nErr));
pnode->CloseSocketDisconnect();
}
}
// couldn't send anything at all
break;
}
}
if (it == pnode->vSendMsg.end()) {
assert(pnode->nSendOffset == 0);
assert(pnode->nSendSize == 0);
}
pnode->vSendMsg.erase(pnode->vSendMsg.begin(), it);
}
static list<CNode*> vNodesDisconnected;
void ThreadSocketHandler()
{
unsigned int nPrevNodeCount = 0;
while (true) {
//
// Disconnect nodes
//
{
LOCK(cs_vNodes);
// Disconnect unused nodes
vector<CNode*> vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
if (pnode->fDisconnect ||
(pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0 && pnode->ssSend.empty())) {
// remove from vNodes
vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end());
// release outbound grant (if any)
pnode->grantOutbound.Release();
// close socket and cleanup
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
if (pnode->fNetworkNode || pnode->fInbound)
pnode->Release();
vNodesDisconnected.push_back(pnode);
}
}
}
{
// Delete disconnected nodes
list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected;
BOOST_FOREACH (CNode* pnode, vNodesDisconnectedCopy) {
// wait until threads are done using it
if (pnode->GetRefCount() <= 0) {
bool fDelete = false;
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend) {
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
TRY_LOCK(pnode->cs_inventory, lockInv);
if (lockInv)
fDelete = true;
}
}
}
if (fDelete) {
vNodesDisconnected.remove(pnode);
delete pnode;
}
}
}
}
if (vNodes.size() != nPrevNodeCount) {
nPrevNodeCount = vNodes.size();
uiInterface.NotifyNumConnectionsChanged(nPrevNodeCount);
}
//
// Find which sockets have data to receive
//
struct timeval timeout;
timeout.tv_sec = 0;
timeout.tv_usec = 50000; // frequency to poll pnode->vSend
fd_set fdsetRecv;
fd_set fdsetSend;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
SOCKET hSocketMax = 0;
bool have_fds = false;
BOOST_FOREACH (const ListenSocket& hListenSocket, vhListenSocket) {
FD_SET(hListenSocket.socket, &fdsetRecv);
hSocketMax = max(hSocketMax, hListenSocket.socket);
have_fds = true;
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (pnode->hSocket == INVALID_SOCKET)
continue;
FD_SET(pnode->hSocket, &fdsetError);
hSocketMax = max(hSocketMax, pnode->hSocket);
have_fds = true;
// Implement the following logic:
// * If there is data to send, select() for sending data. As this only
// happens when optimistic write failed, we choose to first drain the
// write buffer in this case before receiving more. This avoids
// needlessly queueing received data, if the remote peer is not themselves
// receiving data. This means properly utilizing TCP flow control signalling.
// * Otherwise, if there is no (complete) message in the receive buffer,
// or there is space left in the buffer, select() for receiving data.
// * (if neither of the above applies, there is certainly one message
// in the receiver buffer ready to be processed).
// Together, that means that at least one of the following is always possible,
// so we don't deadlock:
// * We send some data.
// * We wait for data to be received (and disconnect after timeout).
// * We process a message in the buffer (message handler thread).
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend && !pnode->vSendMsg.empty()) {
FD_SET(pnode->hSocket, &fdsetSend);
continue;
}
}
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv && (pnode->vRecvMsg.empty() || !pnode->vRecvMsg.front().complete() ||
pnode->GetTotalRecvSize() <= ReceiveFloodSize()))
FD_SET(pnode->hSocket, &fdsetRecv);
}
}
}
int nSelect = select(have_fds ? hSocketMax + 1 : 0,
&fdsetRecv, &fdsetSend, &fdsetError, &timeout);
boost::this_thread::interruption_point();
if (nSelect == SOCKET_ERROR) {
if (have_fds) {
int nErr = WSAGetLastError();
LogPrintf("socket select error %s\n", NetworkErrorString(nErr));
for (unsigned int i = 0; i <= hSocketMax; i++)
FD_SET(i, &fdsetRecv);
}
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
MilliSleep(timeout.tv_usec / 1000);
}
//
// Accept new connections
//
BOOST_FOREACH (const ListenSocket& hListenSocket, vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET && FD_ISSET(hListenSocket.socket, &fdsetRecv)) {
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr*)&sockaddr, &len);
CAddress addr;
int nInbound = 0;
if (hSocket != INVALID_SOCKET)
if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr))
LogPrintf("Warning: Unknown socket family\n");
bool whitelisted = hListenSocket.whitelisted || CNode::IsWhitelistedRange(addr);
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
if (pnode->fInbound)
nInbound++;
}
if (hSocket == INVALID_SOCKET) {
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK)
LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr));
} else if (!IsSelectableSocket(hSocket)) {
LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString());
CloseSocket(hSocket);
} else if (nInbound >= nMaxConnections - MAX_OUTBOUND_CONNECTIONS) {
LogPrint("net", "connection from %s dropped (full)\n", addr.ToString());
CloseSocket(hSocket);
} else if (CNode::IsBanned(addr) && !whitelisted) {
LogPrintf("connection from %s dropped (banned)\n", addr.ToString());
CloseSocket(hSocket);
} else {
CNode* pnode = new CNode(hSocket, addr, "", true);
pnode->AddRef();
pnode->fWhitelisted = whitelisted;
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
}
}
}
//
// Service each socket
//
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->AddRef();
}
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
boost::this_thread::interruption_point();
//
// Receive
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetRecv) || FD_ISSET(pnode->hSocket, &fdsetError)) {
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
{
// typical socket buffer is 8K-64K
char pchBuf[0x10000];
int nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT);
if (nBytes > 0) {
if (!pnode->ReceiveMsgBytes(pchBuf, nBytes))
pnode->CloseSocketDisconnect();
pnode->nLastRecv = GetTime();
pnode->nRecvBytes += nBytes;
pnode->RecordBytesRecv(nBytes);
} else if (nBytes == 0) {
// socket closed gracefully
if (!pnode->fDisconnect)
LogPrint("net", "socket closed\n");
pnode->CloseSocketDisconnect();
} else if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
if (!pnode->fDisconnect)
LogPrintf("socket recv error %s\n", NetworkErrorString(nErr));
pnode->CloseSocketDisconnect();
}
}
}
}
}
//
// Send
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetSend)) {
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
SocketSendData(pnode);
}
//
// Inactivity checking
//
int64_t nTime = GetTime();
if (nTime - pnode->nTimeConnected > 60) {
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) {
LogPrint("net", "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
pnode->fDisconnect = true;
} else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) {
LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend);
pnode->fDisconnect = true;
} else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) {
LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv);
pnode->fDisconnect = true;
} else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) {
LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart));
pnode->fDisconnect = true;
}
}
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->Release();
}
}
}
#ifdef USE_UPNP
void ThreadMapPort()
{
std::string port = strprintf("%u", GetListenPort());
const char* multicastif = 0;
const char* minissdpdpath = 0;
struct UPNPDev* devlist = 0;
char lanaddr[64];
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0);
#elif MINIUPNPC_API_VERSION < 14
/* miniupnpc 1.6 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
#else
/* miniupnpc 1.9.20150730 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
#endif
struct UPNPUrls urls;
struct IGDdatas data;
int r;
r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr));
if (r == 1) {
if (fDiscover) {
char externalIPAddress[40];
r = UPNP_GetExternalIPAddress(urls.controlURL, data.first.servicetype, externalIPAddress);
if (r != UPNPCOMMAND_SUCCESS)
LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r);
else {
if (externalIPAddress[0]) {
LogPrintf("UPnP: ExternalIPAddress = %s\n", externalIPAddress);
AddLocal(CNetAddr(externalIPAddress), LOCAL_UPNP);
} else
LogPrintf("UPnP: GetExternalIPAddress failed.\n");
}
}
string strDesc = "GNRO " + FormatFullVersion();
try {
while (true) {
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0);
#else
/* miniupnpc 1.6 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0");
#endif
if (r != UPNPCOMMAND_SUCCESS)
LogPrintf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n",
port, port, lanaddr, r, strupnperror(r));
else
LogPrintf("UPnP Port Mapping successful.\n");
;
MilliSleep(20 * 60 * 1000); // Refresh every 20 minutes
}
} catch (boost::thread_interrupted) {
r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0);
LogPrintf("UPNP_DeletePortMapping() returned : %d\n", r);
freeUPNPDevlist(devlist);
devlist = 0;
FreeUPNPUrls(&urls);
throw;
}
} else {
LogPrintf("No valid UPnP IGDs found\n");
freeUPNPDevlist(devlist);
devlist = 0;
if (r != 0)
FreeUPNPUrls(&urls);
}
}
void MapPort(bool fUseUPnP)
{
static boost::thread* upnp_thread = NULL;
if (fUseUPnP) {
if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
}
upnp_thread = new boost::thread(boost::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort));
} else if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
upnp_thread = NULL;
}
}
#else
void MapPort(bool)
{
// Intentionally left blank.
}
#endif
void ThreadDNSAddressSeed()
{
// goal: only query DNS seeds if address need is acute
if ((addrman.size() > 0) &&
(!GetBoolArg("-forcednsseed", false))) {
MilliSleep(11 * 1000);
LOCK(cs_vNodes);
if (vNodes.size() >= 2) {
LogPrintf("P2P peers available. Skipped DNS seeding.\n");
return;
}
}
const vector<CDNSSeedData>& vSeeds = Params().DNSSeeds();
int found = 0;
LogPrintf("Loading addresses from DNS seeds (could take a while)\n");
BOOST_FOREACH (const CDNSSeedData& seed, vSeeds) {
if (HaveNameProxy()) {
AddOneShot(seed.host);
} else {
vector<CNetAddr> vIPs;
vector<CAddress> vAdd;
if (LookupHost(seed.host.c_str(), vIPs)) {
BOOST_FOREACH (CNetAddr& ip, vIPs) {
int nOneDay = 24 * 3600;
CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()));
addr.nTime = GetTime() - 3 * nOneDay - GetRand(4 * nOneDay); // use a random age between 3 and 7 days old
vAdd.push_back(addr);
found++;
}
}
addrman.Add(vAdd, CNetAddr(seed.name, true));
}
}
LogPrintf("%d addresses found from DNS seeds\n", found);
}
void DumpAddresses()
{
int64_t nStart = GetTimeMillis();
CAddrDB adb;
adb.Write(addrman);
LogPrint("net", "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
void static ProcessOneShot()
{
string strDest;
{
LOCK(cs_vOneShots);
if (vOneShots.empty())
return;
strDest = vOneShots.front();
vOneShots.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
if (!OpenNetworkConnection(addr, &grant, strDest.c_str(), true))
AddOneShot(strDest);
}
}
void ThreadOpenConnections()
{
// Connect to specific addresses
if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) {
for (int64_t nLoop = 0;; nLoop++) {
ProcessOneShot();
BOOST_FOREACH (string strAddr, mapMultiArgs["-connect"]) {
CAddress addr;
OpenNetworkConnection(addr, NULL, strAddr.c_str());
for (int i = 0; i < 10 && i < nLoop; i++) {
MilliSleep(500);
}
}
MilliSleep(500);
}
}
// Initiate network connections
int64_t nStart = GetTime();
while (true) {
ProcessOneShot();
MilliSleep(500);
CSemaphoreGrant grant(*semOutbound);
boost::this_thread::interruption_point();
// Add seed nodes if DNS seeds are all down (an infrastructure attack?).
if (addrman.size() == 0 && (GetTime() - nStart > 60)) {
static bool done = false;
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
addrman.Add(Params().FixedSeeds(), CNetAddr("127.0.0.1"));
done = true;
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
// Do this here so we don't have to critsect vNodes inside mapAddresses critsect.
int nOutbound = 0;
set<vector<unsigned char> > setConnected;
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!pnode->fInbound) {
setConnected.insert(pnode->addr.GetGroup());
nOutbound++;
}
}
}
int64_t nANow = GetAdjustedTime();
int nTries = 0;
while (true) {
CAddress addr = addrman.Select();
// if we selected an invalid address, restart
if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr))
break;
// If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
// stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman addresses.
nTries++;
if (nTries > 100)
break;
if (IsLimited(addr))
continue;
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30)
continue;
// do not allow non-default ports, unless after 50 invalid addresses selected already
if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50)
continue;
addrConnect = addr;
break;
}
if (addrConnect.IsValid())
OpenNetworkConnection(addrConnect, &grant);
}
}
void ThreadOpenAddedConnections()
{
{
LOCK(cs_vAddedNodes);
vAddedNodes = mapMultiArgs["-addnode"];
}
if (HaveNameProxy()) {
while (true) {
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH (string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
BOOST_FOREACH (string& strAddNode, lAddresses) {
CAddress addr;
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(addr, &grant, strAddNode.c_str());
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
for (unsigned int i = 0; true; i++) {
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH (string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
list<vector<CService> > lservAddressesToAdd(0);
BOOST_FOREACH (string& strAddNode, lAddresses) {
vector<CService> vservNode(0);
if (Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0)) {
lservAddressesToAdd.push_back(vservNode);
{
LOCK(cs_setservAddNodeAddresses);
BOOST_FOREACH (CService& serv, vservNode)
setservAddNodeAddresses.insert(serv);
}
}
}
// Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry
// (keeping in mind that addnode entries can have many IPs if fNameLookup)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes)
for (list<vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++)
BOOST_FOREACH (CService& addrNode, *(it))
if (pnode->addr == addrNode) {
it = lservAddressesToAdd.erase(it);
it--;
break;
}
}
BOOST_FOREACH (vector<CService>& vserv, lservAddressesToAdd) {
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), &grant);
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
// if successful, this moves the passed grant to the constructed node
bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant* grantOutbound, const char* pszDest, bool fOneShot)
{
//
// Initiate outbound network connection
//
boost::this_thread::interruption_point();
if (!pszDest) {
if (IsLocal(addrConnect) ||
FindNode((CNetAddr)addrConnect) || CNode::IsBanned(addrConnect) ||
FindNode(addrConnect.ToStringIPPort()))
return false;
} else if (FindNode(pszDest))
return false;
CNode* pnode = ConnectNode(addrConnect, pszDest);
boost::this_thread::interruption_point();
if (!pnode)
return false;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
pnode->fNetworkNode = true;
if (fOneShot)
pnode->fOneShot = true;
return true;
}
void ThreadMessageHandler()
{
boost::mutex condition_mutex;
boost::unique_lock<boost::mutex> lock(condition_mutex);
SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL);
while (true) {
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
pnode->AddRef();
}
}
// Poll the connected nodes for messages
CNode* pnodeTrickle = NULL;
if (!vNodesCopy.empty())
pnodeTrickle = vNodesCopy[GetRand(vNodesCopy.size())];
bool fSleep = true;
BOOST_FOREACH (CNode* pnode, vNodesCopy) {
if (pnode->fDisconnect)
continue;
// Receive messages
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv) {
if (!g_signals.ProcessMessages(pnode))
pnode->CloseSocketDisconnect();
if (pnode->nSendSize < SendBufferSize()) {
if (!pnode->vRecvGetData.empty() || (!pnode->vRecvMsg.empty() && pnode->vRecvMsg[0].complete())) {
fSleep = false;
}
}
}
}
boost::this_thread::interruption_point();
// Send messages
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
g_signals.SendMessages(pnode, pnode == pnodeTrickle || pnode->fWhitelisted);
}
boost::this_thread::interruption_point();
}
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodesCopy)
pnode->Release();
}
if (fSleep)
messageHandlerCondition.timed_wait(lock, boost::posix_time::microsec_clock::universal_time() + boost::posix_time::milliseconds(100));
}
}
// ppcoin: stake minter thread
void static ThreadStakeMinter()
{
boost::this_thread::interruption_point();
LogPrintf("ThreadStakeMinter started\n");
CWallet* pwallet = pwalletMain;
try {
BitcoinMiner(pwallet, true);
boost::this_thread::interruption_point();
} catch (std::exception& e) {
LogPrintf("ThreadStakeMinter() exception \n");
} catch (...) {
LogPrintf("ThreadStakeMinter() error \n");
}
LogPrintf("ThreadStakeMinter exiting,\n");
}
bool BindListenPort(const CService& addrBind, string& strError, bool fWhitelisted)
{
strError = "";
int nOne = 1;
// Create socket for listening for incoming connections
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
strError = strprintf("Error: Bind address family for %s not supported", addrBind.ToString());
LogPrintf("%s\n", strError);
return false;
}
SOCKET hListenSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (hListenSocket == INVALID_SOCKET) {
strError = strprintf("Error: Couldn't open socket for incoming connections (socket returned error %s)", NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
return false;
}
if (!IsSelectableSocket(hListenSocket)) {
strError = "Error: Couldn't create a listenable socket for incoming connections";
LogPrintf("%s\n", strError);
return false;
}
#ifndef WIN32
#ifdef SO_NOSIGPIPE
// Different way of disabling SIGPIPE on BSD
setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&nOne, sizeof(int));
#endif
// Allow binding if the port is still in TIME_WAIT state after
// the program was closed and restarted. Not an issue on windows!
setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void*)&nOne, sizeof(int));
#endif
// Set to non-blocking, incoming connections will also inherit this
if (!SetSocketNonBlocking(hListenSocket, true)) {
strError = strprintf("BindListenPort: Setting listening socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
return false;
}
// some systems don't have IPV6_V6ONLY but are always v6only; others do have the option
// and enable it by default or not. Try to enable it, if possible.
if (addrBind.IsIPv6()) {
#ifdef IPV6_V6ONLY
#ifdef WIN32
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&nOne, sizeof(int));
#else
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&nOne, sizeof(int));
#endif
#endif
#ifdef WIN32
int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED;
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int));
#endif
}
if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE)
strError = strprintf(_("Unable to bind to %s on this computer. Genero Core is probably already running."), addrBind.ToString());
else
strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr));
LogPrintf("%s\n", strError);
CloseSocket(hListenSocket);
return false;
}
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) {
strError = strprintf(_("Error: Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError);
CloseSocket(hListenSocket);
return false;
}
vhListenSocket.push_back(ListenSocket(hListenSocket, fWhitelisted));
if (addrBind.IsRoutable() && fDiscover && !fWhitelisted)
AddLocal(addrBind, LOCAL_BIND);
return true;
}
void static Discover(boost::thread_group& threadGroup)
{
if (!fDiscover)
return;
#ifdef WIN32
// Get local host IP
char pszHostName[256] = "";
if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) {
vector<CNetAddr> vaddr;
if (LookupHost(pszHostName, vaddr)) {
BOOST_FOREACH (const CNetAddr& addr, vaddr) {
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString());
}
}
}
#else
// Get local host ip
struct ifaddrs* myaddrs;
if (getifaddrs(&myaddrs) == 0) {
for (struct ifaddrs* ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next) {
if (ifa->ifa_addr == NULL) continue;
if ((ifa->ifa_flags & IFF_UP) == 0) continue;
if (strcmp(ifa->ifa_name, "lo") == 0) continue;
if (strcmp(ifa->ifa_name, "lo0") == 0) continue;
if (ifa->ifa_addr->sa_family == AF_INET) {
struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
} else if (ifa->ifa_addr->sa_family == AF_INET6) {
struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
}
}
freeifaddrs(myaddrs);
}
#endif
}
void StartNode(boost::thread_group& threadGroup)
{
uiInterface.InitMessage(_("Loading addresses..."));
// Load addresses for peers.dat
int64_t nStart = GetTimeMillis();
{
CAddrDB adb;
if (!adb.Read(addrman))
LogPrintf("Invalid or missing peers.dat; recreating\n");
}
LogPrintf("Loaded %i addresses from peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
fAddressesInitialized = true;
if (semOutbound == NULL) {
// initialize semaphore
int nMaxOutbound = min(MAX_OUTBOUND_CONNECTIONS, nMaxConnections);
semOutbound = new CSemaphore(nMaxOutbound);
}
if (pnodeLocalHost == NULL)
pnodeLocalHost = new CNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), nLocalServices));
Discover(threadGroup);
//
// Start threads
//
if (!GetBoolArg("-dnsseed", true))
LogPrintf("DNS seeding disabled\n");
else
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "dnsseed", &ThreadDNSAddressSeed));
// Map ports with UPnP
MapPort(GetBoolArg("-upnp", DEFAULT_UPNP));
// Send and receive from sockets, accept connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "net", &ThreadSocketHandler));
// Initiate outbound connections from -addnode
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "addcon", &ThreadOpenAddedConnections));
// Initiate outbound connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "opencon", &ThreadOpenConnections));
// Process messages
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "msghand", &ThreadMessageHandler));
// Dump network addresses
threadGroup.create_thread(boost::bind(&LoopForever<void (*)()>, "dumpaddr", &DumpAddresses, DUMP_ADDRESSES_INTERVAL * 1000));
// ppcoin:mint proof-of-stake blocks in the background
if (GetBoolArg("-staking", true))
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "stakemint", &ThreadStakeMinter));
}
bool StopNode()
{
LogPrintf("StopNode()\n");
MapPort(false);
if (semOutbound)
for (int i = 0; i < MAX_OUTBOUND_CONNECTIONS; i++)
semOutbound->post();
if (fAddressesInitialized) {
DumpAddresses();
fAddressesInitialized = false;
}
return true;
}
class CNetCleanup
{
public:
CNetCleanup() {}
~CNetCleanup()
{
// Close sockets
BOOST_FOREACH (CNode* pnode, vNodes)
if (pnode->hSocket != INVALID_SOCKET)
CloseSocket(pnode->hSocket);
BOOST_FOREACH (ListenSocket& hListenSocket, vhListenSocket)
if (hListenSocket.socket != INVALID_SOCKET)
if (!CloseSocket(hListenSocket.socket))
LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError()));
// clean up some globals (to help leak detection)
BOOST_FOREACH (CNode* pnode, vNodes)
delete pnode;
BOOST_FOREACH (CNode* pnode, vNodesDisconnected)
delete pnode;
vNodes.clear();
vNodesDisconnected.clear();
vhListenSocket.clear();
delete semOutbound;
semOutbound = NULL;
delete pnodeLocalHost;
pnodeLocalHost = NULL;
#ifdef WIN32
// Shutdown Windows Sockets
WSACleanup();
#endif
}
} instance_of_cnetcleanup;
void CExplicitNetCleanup::callCleanup()
{
// Explicit call to destructor of CNetCleanup because it's not implicitly called
// when the wallet is restarted from within the wallet itself.
CNetCleanup* tmp = new CNetCleanup();
delete tmp; // Stroustrup's gonna kill me for that
}
void RelayTransaction(const CTransaction& tx)
{
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(10000);
ss << tx;
RelayTransaction(tx, ss);
}
void RelayTransaction(const CTransaction& tx, const CDataStream& ss)
{
CInv inv(MSG_TX, tx.GetHash());
{
LOCK(cs_mapRelay);
// Expire old relay messages
while (!vRelayExpiration.empty() && vRelayExpiration.front().first < GetTime()) {
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
}
// Save original serialized message so newer versions are preserved
mapRelay.insert(std::make_pair(inv, ss));
vRelayExpiration.push_back(std::make_pair(GetTime() + 15 * 60, inv));
}
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!pnode->fRelayTxes)
continue;
LOCK(pnode->cs_filter);
if (pnode->pfilter) {
if (pnode->pfilter->IsRelevantAndUpdate(tx))
pnode->PushInventory(inv);
} else
pnode->PushInventory(inv);
}
}
void RelayTransactionLockReq(const CTransaction& tx, bool relayToAll)
{
CInv inv(MSG_TXLOCK_REQUEST, tx.GetHash());
//broadcast the new lock
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes) {
if (!relayToAll && !pnode->fRelayTxes)
continue;
pnode->PushMessage("ix", tx);
}
}
void RelayInv(CInv& inv)
{
LOCK(cs_vNodes);
BOOST_FOREACH (CNode* pnode, vNodes){
if((pnode->nServices==NODE_BLOOM_WITHOUT_MN) && inv.IsMasterNodeType())continue;
if (pnode->nVersion >= ActiveProtocol())
pnode->PushInventory(inv);
}
}
void CNode::RecordBytesRecv(uint64_t bytes)
{
LOCK(cs_totalBytesRecv);
nTotalBytesRecv += bytes;
}
void CNode::RecordBytesSent(uint64_t bytes)
{
LOCK(cs_totalBytesSent);
nTotalBytesSent += bytes;
}
uint64_t CNode::GetTotalBytesRecv()
{
LOCK(cs_totalBytesRecv);
return nTotalBytesRecv;
}
uint64_t CNode::GetTotalBytesSent()
{
LOCK(cs_totalBytesSent);
return nTotalBytesSent;
}
void CNode::Fuzz(int nChance)
{
if (!fSuccessfullyConnected) return; // Don't fuzz initial handshake
if (GetRand(nChance) != 0) return; // Fuzz 1 of every nChance messages
switch (GetRand(3)) {
case 0:
// xor a random byte with a random value:
if (!ssSend.empty()) {
CDataStream::size_type pos = GetRand(ssSend.size());
ssSend[pos] ^= (unsigned char)(GetRand(256));
}
break;
case 1:
// delete a random byte:
if (!ssSend.empty()) {
CDataStream::size_type pos = GetRand(ssSend.size());
ssSend.erase(ssSend.begin() + pos);
}
break;
case 2:
// insert a random byte at a random position
{
CDataStream::size_type pos = GetRand(ssSend.size());
char ch = (char)GetRand(256);
ssSend.insert(ssSend.begin() + pos, ch);
}
break;
}
// Chance of more than one change half the time:
// (more changes exponentially less likely):
Fuzz(2);
}
//
// CAddrDB
//
CAddrDB::CAddrDB()
{
pathAddr = GetDataDir() / "peers.dat";
}
bool CAddrDB::Write(const CAddrMan& addr)
{
// Generate random temporary filename
unsigned short randv = 0;
GetRandBytes((unsigned char*)&randv, sizeof(randv));
std::string tmpfn = strprintf("peers.dat.%04x", randv);
// serialize addresses, checksum data up to that point, then append csum
CDataStream ssPeers(SER_DISK, CLIENT_VERSION);
ssPeers << FLATDATA(Params().MessageStart());
ssPeers << addr;
uint256 hash = Hash(ssPeers.begin(), ssPeers.end());
ssPeers << hash;
// open output file, and associate with CAutoFile
boost::filesystem::path pathAddr = GetDataDir() / "peers.dat";
FILE* file = fopen(pathAddr.string().c_str(), "wb");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// Write and commit header, data
try {
fileout << ssPeers;
} catch (std::exception& e) {
return error("%s : Serialize or I/O error - %s", __func__, e.what());
}
FileCommit(fileout.Get());
fileout.fclose();
return true;
}
bool CAddrDB::Read(CAddrMan& addr)
{
// open input file, and associate with CAutoFile
FILE* file = fopen(pathAddr.string().c_str(), "rb");
CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// use file size to size memory buffer
int fileSize = boost::filesystem::file_size(pathAddr);
int dataSize = fileSize - sizeof(uint256);
// Don't try to resize to a negative number if file is small
if (dataSize < 0)
dataSize = 0;
vector<unsigned char> vchData;
vchData.resize(dataSize);
uint256 hashIn;
// read data and checksum from file
try {
filein.read((char*)&vchData[0], dataSize);
filein >> hashIn;
} catch (std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
filein.fclose();
CDataStream ssPeers(vchData, SER_DISK, CLIENT_VERSION);
// verify stored checksum matches input data
uint256 hashTmp = Hash(ssPeers.begin(), ssPeers.end());
if (hashIn != hashTmp)
return error("%s : Checksum mismatch, data corrupted", __func__);
unsigned char pchMsgTmp[4];
try {
// de-serialize file header (network specific magic number) and ..
ssPeers >> FLATDATA(pchMsgTmp);
// ... verify the network matches ours
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("%s : Invalid network magic number", __func__);
// de-serialize address data into one CAddrMan object
ssPeers >> addr;
} catch (std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
return true;
}
unsigned int ReceiveFloodSize() { return 1000 * GetArg("-maxreceivebuffer", 5 * 1000); }
unsigned int SendBufferSize() { return 1000 * GetArg("-maxsendbuffer", 1 * 1000); }
CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) : ssSend(SER_NETWORK, INIT_PROTO_VERSION), setAddrKnown(5000)
{
nServices = 0;
hSocket = hSocketIn;
nRecvVersion = INIT_PROTO_VERSION;
nLastSend = 0;
nLastRecv = 0;
nSendBytes = 0;
nRecvBytes = 0;
nTimeConnected = GetTime();
addr = addrIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
nVersion = 0;
strSubVer = "";
fWhitelisted = false;
fOneShot = false;
fClient = false; // set by version message
fInbound = fInboundIn;
fNetworkNode = false;
fSuccessfullyConnected = false;
fDisconnect = false;
nRefCount = 0;
nSendSize = 0;
nSendOffset = 0;
hashContinue = 0;
nStartingHeight = -1;
fGetAddr = false;
fRelayTxes = false;
setInventoryKnown.max_size(SendBufferSize() / 1000);
pfilter = new CBloomFilter();
nPingNonceSent = 0;
nPingUsecStart = 0;
nPingUsecTime = 0;
fPingQueued = false;
fDarKsendMaster = false;
{
LOCK(cs_nLastNodeId);
id = nLastNodeId++;
}
if (fLogIPs)
LogPrint("net", "Added connection to %s peer=%d\n", addrName, id);
else
LogPrint("net", "Added connection peer=%d\n", id);
// Be shy and don't send version until we hear
if (hSocket != INVALID_SOCKET && !fInbound)
PushVersion();
GetNodeSignals().InitializeNode(GetId(), this);
}
CNode::~CNode()
{
CloseSocket(hSocket);
if (pfilter)
delete pfilter;
GetNodeSignals().FinalizeNode(GetId());
}
void CNode::AskFor(const CInv& inv)
{
if (mapAskFor.size() > MAPASKFOR_MAX_SZ)
return;
// We're using mapAskFor as a priority queue,
// the key is the earliest time the request can be sent
int64_t nRequestTime;
limitedmap<CInv, int64_t>::const_iterator it = mapAlreadyAskedFor.find(inv);
if (it != mapAlreadyAskedFor.end())
nRequestTime = it->second;
else
nRequestTime = 0;
LogPrint("net", "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime / 1000000), id);
// Make sure not to reuse time indexes to keep things in the same order
int64_t nNow = GetTimeMicros() - 1000000;
static int64_t nLastTime;
++nLastTime;
nNow = std::max(nNow, nLastTime);
nLastTime = nNow;
// Each retry is 2 minutes after the last
nRequestTime = std::max(nRequestTime + 2 * 60 * 1000000, nNow);
if (it != mapAlreadyAskedFor.end())
mapAlreadyAskedFor.update(it, nRequestTime);
else
mapAlreadyAskedFor.insert(std::make_pair(inv, nRequestTime));
mapAskFor.insert(std::make_pair(nRequestTime, inv));
}
void CNode::BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend)
{
ENTER_CRITICAL_SECTION(cs_vSend);
assert(ssSend.size() == 0);
ssSend << CMessageHeader(pszCommand, 0);
LogPrint("net", "sending: %s ", SanitizeString(pszCommand));
}
void CNode::AbortMessage() UNLOCK_FUNCTION(cs_vSend)
{
ssSend.clear();
LEAVE_CRITICAL_SECTION(cs_vSend);
LogPrint("net", "(aborted)\n");
}
void CNode::EndMessage() UNLOCK_FUNCTION(cs_vSend)
{
// The -*messagestest options are intentionally not documented in the help message,
// since they are only used during development to debug the networking code and are
// not intended for end-users.
if (mapArgs.count("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 2)) == 0) {
LogPrint("net", "dropmessages DROPPING SEND MESSAGE\n");
AbortMessage();
return;
}
if (mapArgs.count("-fuzzmessagestest"))
Fuzz(GetArg("-fuzzmessagestest", 10));
if (ssSend.size() == 0)
return;
// Set the size
unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE;
memcpy((char*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], &nSize, sizeof(nSize));
// Set the checksum
uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end());
unsigned int nChecksum = 0;
memcpy(&nChecksum, &hash, sizeof(nChecksum));
assert(ssSend.size() >= CMessageHeader::CHECKSUM_OFFSET + sizeof(nChecksum));
memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], &nChecksum, sizeof(nChecksum));
LogPrint("net", "(%d bytes) peer=%d\n", nSize, id);
std::deque<CSerializeData>::iterator it = vSendMsg.insert(vSendMsg.end(), CSerializeData());
ssSend.GetAndClear(*it);
nSendSize += (*it).size();
// If write queue empty, attempt "optimistic write"
if (it == vSendMsg.begin())
SocketSendData(this);
LEAVE_CRITICAL_SECTION(cs_vSend);
}
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <locale>
// class num_get<charT, InputIterator>
// iter_type get(iter_type in, iter_type end, ios_base&,
// ios_base::iostate& err, bool& v) const;
#include <locale>
#include <ios>
#include <cassert>
#include <streambuf>
#include "test_macros.h"
#include "test_iterators.h"
typedef std::num_get<char, cpp17_input_iterator<const char*> > F;
class my_facet
: public F
{
public:
explicit my_facet(std::size_t refs = 0)
: F(refs) {}
};
class p1
: public std::numpunct<char>
{
public:
p1() : std::numpunct<char>() {}
protected:
virtual string_type do_truename() const {return "a";}
virtual string_type do_falsename() const {return "abb";}
};
class p2
: public std::numpunct<char>
{
public:
p2() : std::numpunct<char>() {}
protected:
virtual string_type do_truename() const {return "a";}
virtual string_type do_falsename() const {return "ab";}
};
int main(int, char**)
{
const my_facet f(1);
std::ios ios(0);
{
const char str[] = "1";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+sizeof(str)-1);
assert(err == ios.goodbit);
assert(b == true);
}
{
const char str[] = "0";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+sizeof(str)-1);
assert(err == ios.goodbit);
assert(b == false);
}
{
const char str[] = "12";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+sizeof(str)-1);
assert(err == ios.failbit);
assert(b == true);
}
{
const char str[] = "*12";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+0);
assert(err == ios.failbit);
assert(b == false);
}
std::boolalpha(ios);
{
const char str[] = "1";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+0);
assert(err == ios.failbit);
assert(b == false);
}
{
const char str[] = "true";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+sizeof(str)-1);
assert(err == ios.goodbit);
assert(b == true);
}
{
const char str[] = "false";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, b);
assert(iter.base() == str+sizeof(str)-1);
assert(err == ios.goodbit);
assert(b == false);
}
ios.imbue(std::locale(ios.getloc(), new p1));
{
const char str[] = "a";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+1),
ios, err, b);
assert(iter.base() == str+1);
assert(err == ios.eofbit);
assert(b == true);
}
{
const char str[] = "abc";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+3),
ios, err, b);
assert(iter.base() == str+2);
assert(err == ios.failbit);
assert(b == false);
}
{
const char str[] = "acc";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+3),
ios, err, b);
assert(iter.base() == str+1);
assert(err == ios.goodbit);
assert(b == true);
}
ios.imbue(std::locale(ios.getloc(), new p2));
{
const char str[] = "a";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+1),
ios, err, b);
assert(iter.base() == str+1);
assert(err == ios.eofbit);
assert(b == true);
}
{
const char str[] = "ab";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+2),
ios, err, b);
assert(iter.base() == str+2);
assert(err == ios.eofbit);
assert(b == false);
}
{
const char str[] = "abc";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+3),
ios, err, b);
assert(iter.base() == str+2);
assert(err == ios.goodbit);
assert(b == false);
}
{
const char str[] = "ac";
std::ios_base::iostate err = ios.goodbit;
bool b;
cpp17_input_iterator<const char*> iter =
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+2),
ios, err, b);
assert(iter.base() == str+1);
assert(err == ios.goodbit);
assert(b == true);
}
return 0;
}
|
//
// detail/win_iocp_operation.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_WIN_IOCP_OPERATION_HPP
#define ASIO_DETAIL_WIN_IOCP_OPERATION_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "../../../../../asio/asio/include/asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/handler_tracking.hpp"
#include "asio/detail/op_queue.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/error_code.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class win_iocp_io_context;
// Base class for all operations. A function pointer is used instead of virtual
// functions to avoid the associated overhead.
class win_iocp_operation
: public OVERLAPPED
ASIO_ALSO_INHERIT_TRACKED_HANDLER
{
public:
typedef win_iocp_operation operation_type;
void complete(void* owner, const asio::error_code& ec,
std::size_t bytes_transferred)
{
func_(owner, this, ec, bytes_transferred);
}
void destroy()
{
func_(0, this, asio::error_code(), 0);
}
protected:
typedef void (*func_type)(
void*, win_iocp_operation*,
const asio::error_code&, std::size_t);
win_iocp_operation(func_type func)
: next_(0),
func_(func)
{
reset();
}
// Prevents deletion through this type.
~win_iocp_operation()
{
}
void reset()
{
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
hEvent = 0;
ready_ = 0;
}
private:
friend class op_queue_access;
friend class win_iocp_io_context;
win_iocp_operation* next_;
func_type func_;
long ready_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_WIN_IOCP_OPERATION_HPP
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/strided_slice.h"
#include <vector>
#include "nnacl/strided_slice.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_StridedSlice;
namespace mindspore::kernel {
namespace {
constexpr size_t kMultiInputsSize = 4;
constexpr size_t kBeginsIndex = 1;
constexpr size_t kEndsIndex = 2;
constexpr size_t kStridesInex = 3;
} // namespace
int StridedSliceCPUKernel::Init() {
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int StridedSliceCPUKernel::ReSize() {
auto input = in_tensors_.at(0);
auto parameter = reinterpret_cast<StridedSliceParameter *>(op_parameter_);
MS_ASSERT(input);
MS_ASSERT(parameter);
parameter->data_type = input->data_type() == kNumberTypeInt8 ? kDataTypeInt8 : kDataTypeFloat;
auto input_shape = input->shape();
for (size_t i = 0; i < input_shape.size(); ++i) {
parameter->in_shape_[i] = input_shape[i];
}
parameter->in_shape_length_ = static_cast<int>(input_shape.size());
return RET_OK;
}
int StridedSliceCPUKernel::HandleMultiInputs() {
if (in_tensors_.size() != kMultiInputsSize) {
MS_LOG(ERROR) << "Inputs size should be " << kMultiInputsSize << ", got " << in_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<StridedSliceParameter *>(op_parameter_);
if (param == nullptr) {
MS_LOG(ERROR) << "StridedSliceParamater cast nullptr";
return RET_ERROR;
}
auto begins = in_tensors_.at(kBeginsIndex);
MS_ASSERT(begins != nullptr);
int axis_num = begins->ElementsNum();
if (axis_num > DIMENSION_6D) {
MS_LOG(ERROR) << "StridedSlice supports max dimension " << DIMENSION_6D << ", input begins dim is " << axis_num;
return RET_ERROR;
}
memcpy(param->begins_, begins->MutableData(), axis_num * sizeof(int));
auto ends = in_tensors_.at(kEndsIndex);
MS_ASSERT(ends != nullptr);
MS_ASSERT(axis_num == ends->ElementsNum());
memcpy(param->ends_, ends->MutableData(), axis_num * sizeof(int));
auto strides = in_tensors_.at(kStridesInex);
MS_ASSERT(strides != nullptr);
MS_ASSERT(axis_num == strides->ElementsNum());
memcpy(param->strides_, strides->MutableData(), axis_num * sizeof(int));
param->num_axes_ = axis_num;
return RET_OK;
}
int StridedSliceCPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << ret;
return ret;
}
auto input = in_tensors_.at(0);
auto output = out_tensors_.at(0);
MS_ASSERT(input);
MS_ASSERT(output);
if (in_tensors().size() == kMultiInputsSize) {
ret = HandleMultiInputs();
if (ret != RET_OK) {
return ret;
}
}
ret = DoStridedSlice(input->MutableData(), output->MutableData(),
reinterpret_cast<StridedSliceParameter *>(op_parameter_));
if (ret != RET_OK) {
MS_LOG(ERROR) << "StridedSlice error error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_StridedSlice);
if (opParameter == nullptr) {
MS_LOG(ERROR) << "opParameter null pointer dereferencing.";
return nullptr;
}
auto *kernel = new (std::nothrow) StridedSliceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, CpuStridedSliceKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_StridedSlice, CpuStridedSliceKernelCreator)
} // namespace mindspore::kernel
|
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/proxy/proxy_resolver_winhttp.h"
#include <windows.h>
#include <winhttp.h>
#include "base/metrics/histogram.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "net/base/net_errors.h"
#include "net/proxy/proxy_info.h"
#include "net/proxy/proxy_resolver.h"
#include "url/gurl.h"
#pragma comment(lib, "winhttp.lib")
using base::TimeDelta;
using base::TimeTicks;
namespace net {
namespace {
static void FreeInfo(WINHTTP_PROXY_INFO* info) {
if (info->lpszProxy)
GlobalFree(info->lpszProxy);
if (info->lpszProxyBypass)
GlobalFree(info->lpszProxyBypass);
}
class ProxyResolverWinHttp : public ProxyResolver {
public:
ProxyResolverWinHttp(
const scoped_refptr<ProxyResolverScriptData>& script_data);
~ProxyResolverWinHttp() override;
// ProxyResolver implementation:
int GetProxyForURL(const GURL& url,
ProxyInfo* results,
const CompletionCallback& /*callback*/,
RequestHandle* /*request*/,
const BoundNetLog& /*net_log*/) override;
void CancelRequest(RequestHandle request) override;
LoadState GetLoadState(RequestHandle request) const override;
void CancelSetPacScript() override;
int SetPacScript(const scoped_refptr<ProxyResolverScriptData>& script_data,
const CompletionCallback& /*callback*/) override;
private:
bool OpenWinHttpSession();
void CloseWinHttpSession();
// Proxy configuration is cached on the session handle.
HINTERNET session_handle_;
const GURL pac_url_;
DISALLOW_COPY_AND_ASSIGN(ProxyResolverWinHttp);
};
ProxyResolverWinHttp::ProxyResolverWinHttp(
const scoped_refptr<ProxyResolverScriptData>& script_data)
: ProxyResolver(false /*expects_pac_bytes*/),
session_handle_(NULL),
pac_url_(script_data->type() == ProxyResolverScriptData::TYPE_AUTO_DETECT
? GURL("http://wpad/wpad.dat")
: script_data->url()) {
}
ProxyResolverWinHttp::~ProxyResolverWinHttp() {
CloseWinHttpSession();
}
int ProxyResolverWinHttp::GetProxyForURL(const GURL& query_url,
ProxyInfo* results,
const CompletionCallback& /*callback*/,
RequestHandle* /*request*/,
const BoundNetLog& /*net_log*/) {
// If we don't have a WinHTTP session, then create a new one.
if (!session_handle_ && !OpenWinHttpSession())
return ERR_FAILED;
// If we have been given an empty PAC url, then use auto-detection.
//
// NOTE: We just use DNS-based auto-detection here like Firefox. We do this
// to avoid WinHTTP's auto-detection code, which while more featureful (it
// supports DHCP based auto-detection) also appears to have issues.
//
WINHTTP_AUTOPROXY_OPTIONS options = {0};
options.fAutoLogonIfChallenged = FALSE;
options.dwFlags = WINHTTP_AUTOPROXY_CONFIG_URL;
base::string16 pac_url16 = base::ASCIIToUTF16(pac_url_.spec());
options.lpszAutoConfigUrl = pac_url16.c_str();
WINHTTP_PROXY_INFO info = {0};
DCHECK(session_handle_);
// Per http://msdn.microsoft.com/en-us/library/aa383153(VS.85).aspx, it is
// necessary to first try resolving with fAutoLogonIfChallenged set to false.
// Otherwise, we fail over to trying it with a value of true. This way we
// get good performance in the case where WinHTTP uses an out-of-process
// resolver. This is important for Vista and Win2k3.
BOOL ok = WinHttpGetProxyForUrl(session_handle_,
base::ASCIIToUTF16(query_url.spec()).c_str(),
&options, &info);
if (!ok) {
if (ERROR_WINHTTP_LOGIN_FAILURE == GetLastError()) {
options.fAutoLogonIfChallenged = TRUE;
ok = WinHttpGetProxyForUrl(
session_handle_, base::ASCIIToUTF16(query_url.spec()).c_str(),
&options, &info);
}
if (!ok) {
DWORD error = GetLastError();
// If we got here because of RPC timeout during out of process PAC
// resolution, no further requests on this session are going to work.
if (ERROR_WINHTTP_TIMEOUT == error ||
ERROR_WINHTTP_AUTO_PROXY_SERVICE_ERROR == error) {
CloseWinHttpSession();
}
return ERR_FAILED; // TODO(darin): Bug 1189288: translate error code.
}
}
int rv = OK;
switch (info.dwAccessType) {
case WINHTTP_ACCESS_TYPE_NO_PROXY:
results->UseDirect();
break;
case WINHTTP_ACCESS_TYPE_NAMED_PROXY:
// According to MSDN:
//
// The proxy server list contains one or more of the following strings
// separated by semicolons or whitespace.
//
// ([<scheme>=][<scheme>"://"]<server>[":"<port>])
//
// Based on this description, ProxyInfo::UseNamedProxy() isn't
// going to handle all the variations (in particular <scheme>=).
//
// However in practice, it seems that WinHTTP is simply returning
// things like "foopy1:80;foopy2:80". It strips out the non-HTTP
// proxy types, and stops the list when PAC encounters a "DIRECT".
// So UseNamedProxy() should work OK.
results->UseNamedProxy(base::UTF16ToASCII(info.lpszProxy));
break;
default:
NOTREACHED();
rv = ERR_FAILED;
}
FreeInfo(&info);
return rv;
}
void ProxyResolverWinHttp::CancelRequest(RequestHandle request) {
// This is a synchronous ProxyResolver; no possibility for async requests.
NOTREACHED();
}
LoadState ProxyResolverWinHttp::GetLoadState(RequestHandle request) const {
NOTREACHED();
return LOAD_STATE_IDLE;
}
void ProxyResolverWinHttp::CancelSetPacScript() {
NOTREACHED();
}
int ProxyResolverWinHttp::SetPacScript(
const scoped_refptr<ProxyResolverScriptData>& script_data,
const CompletionCallback& /*callback*/) {
NOTREACHED();
return ERR_NOT_IMPLEMENTED;
}
bool ProxyResolverWinHttp::OpenWinHttpSession() {
DCHECK(!session_handle_);
session_handle_ = WinHttpOpen(NULL,
WINHTTP_ACCESS_TYPE_NO_PROXY,
WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS,
0);
if (!session_handle_)
return false;
// Since this session handle will never be used for WinHTTP connections,
// these timeouts don't really mean much individually. However, WinHTTP's
// out of process PAC resolution will use a combined (sum of all timeouts)
// value to wait for an RPC reply.
BOOL rv = WinHttpSetTimeouts(session_handle_, 10000, 10000, 5000, 5000);
DCHECK(rv);
return true;
}
void ProxyResolverWinHttp::CloseWinHttpSession() {
if (session_handle_) {
WinHttpCloseHandle(session_handle_);
session_handle_ = NULL;
}
}
} // namespace
ProxyResolverFactoryWinHttp::ProxyResolverFactoryWinHttp()
: ProxyResolverFactory(false /*expects_pac_bytes*/) {
}
int ProxyResolverFactoryWinHttp::CreateProxyResolver(
const scoped_refptr<ProxyResolverScriptData>& pac_script,
scoped_ptr<ProxyResolver>* resolver,
const CompletionCallback& callback,
scoped_ptr<Request>* request) {
resolver->reset(new ProxyResolverWinHttp(pac_script));
return OK;
}
} // namespace net
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#include "pybind11/pybind11.h"
#include "tink/util/fake_kms_client.h"
#include "tink/util/statusor.h"
#include "tink/cc/pybind/status_casters.h"
namespace crypto {
namespace tink {
namespace test {
void PybindRegisterCcFakeKmsClientTestonly(pybind11::module* module) {
namespace py = pybind11;
py::module& m = *module;
m.def(
"register_fake_kms_client_testonly",
[](const std::string& key_uri,
const std::string& credentials_path) -> util::Status {
return FakeKmsClient::RegisterNewClient(key_uri, credentials_path);
},
py::arg("key_uri"), "URI of the key which should be used.",
py::arg("credentials_path"), "Path to the credentials for the client.");
}
} // namespace test
} // namespace tink
} // namespace crypto
|
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/js-heap-broker.h"
#include "src/common/globals.h"
#include "src/compiler/heap-refs.h"
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
#endif
#include "include/v8-fast-api-calls.h"
#include "src/api/api-inl.h"
#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/access-info.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/execution/protectors-inl.h"
#include "src/init/bootstrapper.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/utils.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(broker, x) TRACE_BROKER(broker, x)
#define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x)
#define FORWARD_DECL(Name) class Name##Data;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
// removed.
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
// There are five kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
// base class (ObjectData), i.e. it's basically just the handle. Because the
// object is a Smi, it's safe to access the handle in order to extract the
// number value, and AsSmi() does exactly that.
//
// kSerializedHeapObject: The underlying V8 object is a HeapObject and the
// data is an instance of the corresponding (most-specific) subclass, e.g.
// JSFunctionData, which provides serialized information about the object.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
// data is an instance of the base class (ObjectData), i.e. it basically
// carries no information other than the handle.
//
// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
// mutable) HeapObject and the data is an instance of ObjectData. Its handle
// must be persistent so that the GC can update it at a safepoint. Via this
// handle, the object can be accessed concurrently to the main thread. To be
// used the flag --turbo-direct-heap-access must be on.
//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
// ReadOnlyHeapObjects, it is OK to access heap even from off-thread, so
// these objects need not be serialized.
enum ObjectDataKind {
kSmi,
kSerializedHeapObject,
kUnserializedHeapObject,
kNeverSerializedHeapObject,
kUnserializedReadOnlyHeapObject
};
class AllowHandleAllocationIfNeeded {
public:
explicit AllowHandleAllocationIfNeeded(ObjectDataKind kind,
JSHeapBroker::BrokerMode mode,
bool direct_heap_access = false) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
kind == kUnserializedReadOnlyHeapObject ||
kind == kNeverSerializedHeapObject ||
(direct_heap_access && kind == kSerializedHeapObject));
if (kind == kUnserializedHeapObject) maybe_allow_handle_.emplace();
}
private:
base::Optional<AllowHandleAllocation> maybe_allow_handle_;
};
class AllowHandleDereferenceIfNeeded {
public:
explicit AllowHandleDereferenceIfNeeded(ObjectDataKind kind,
JSHeapBroker::BrokerMode mode,
bool direct_heap_access = false)
: AllowHandleDereferenceIfNeeded(kind) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
kind == kUnserializedReadOnlyHeapObject ||
kind == kNeverSerializedHeapObject ||
(direct_heap_access && kind == kSerializedHeapObject));
}
explicit AllowHandleDereferenceIfNeeded(ObjectDataKind kind) {
if (kind == kUnserializedHeapObject ||
kind == kUnserializedReadOnlyHeapObject) {
maybe_allow_handle_.emplace();
}
}
private:
base::Optional<AllowHandleDereference> maybe_allow_handle_;
};
class AllowHeapAllocationIfNeeded {
public:
explicit AllowHeapAllocationIfNeeded(ObjectDataKind kind,
JSHeapBroker::BrokerMode mode) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
kind == kUnserializedReadOnlyHeapObject);
if (kind == kUnserializedHeapObject) maybe_allow_handle_.emplace();
}
private:
base::Optional<AllowHeapAllocation> maybe_allow_handle_;
};
namespace {
bool IsReadOnlyHeapObject(Object object) {
DisallowHeapAllocation no_gc;
return (object.IsCode() && Code::cast(object).is_builtin()) ||
(object.IsHeapObject() &&
ReadOnlyHeap::Contains(HeapObject::cast(object)));
}
} // namespace
class ObjectData : public ZoneObject {
public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
ObjectDataKind kind)
: object_(object), kind_(kind) {
// This assignment ensures we don't end up inserting the same object
// in an endless recursion.
*storage = this;
TRACE(broker, "Creating data " << this << " for handle " << object.address()
<< " (" << Brief(*object) << ")");
// It is safe to access read only heap objects and builtins from a
// background thread. When we read fileds of these objects, we may create
// ObjectData on the background thread even without a canonical handle
// scope. This is safe too since we don't create handles but just get
// handles from read only root table or builtins table which is what
// canonical scope uses as well. For all other objects we should have
// created ObjectData in canonical handle scope on the main thread.
CHECK_IMPLIES(
broker->mode() == JSHeapBroker::kDisabled ||
broker->mode() == JSHeapBroker::kSerializing,
broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
(kind == kUnserializedReadOnlyHeapObject &&
IsReadOnlyHeapObject(*object)) ||
kind == kNeverSerializedHeapObject);
}
#define DECLARE_IS(Name) bool Is##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
#undef DECLARE_IS
#define DECLARE_AS(Name) Name##Data* As##Name();
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
// removed.
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
#undef DECLARE_AS
Handle<Object> object() const { return object_; }
ObjectDataKind kind() const { return kind_; }
bool is_smi() const { return kind_ == kSmi; }
bool should_access_heap() const {
return kind_ == kUnserializedHeapObject ||
kind_ == kNeverSerializedHeapObject ||
kind_ == kUnserializedReadOnlyHeapObject;
}
#ifdef DEBUG
enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
mutable Usage used_status = Usage::kUnused;
#endif // DEBUG
private:
Handle<Object> const object_;
ObjectDataKind const kind_;
};
class HeapObjectData : public ObjectData {
public:
HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapObject> object);
bool boolean_value() const { return boolean_value_; }
ObjectData* map() const { return map_; }
InstanceType GetMapInstanceType() const;
static HeapObjectData* Serialize(JSHeapBroker* broker,
Handle<HeapObject> object);
private:
bool const boolean_value_;
ObjectData* const map_;
};
class PropertyCellData : public HeapObjectData {
public:
PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<PropertyCell> object);
PropertyDetails property_details() const { return property_details_; }
void Serialize(JSHeapBroker* broker);
ObjectData* value() const { return value_; }
private:
PropertyDetails const property_details_;
ObjectData* value_ = nullptr;
};
// TODO(mslekova): Once we have real-world usage data, we might want to
// reimplement this as sorted vector instead, to reduce the memory overhead.
typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap;
class FunctionTemplateInfoData : public HeapObjectData {
public:
FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object);
bool is_signature_undefined() const { return is_signature_undefined_; }
bool accept_any_receiver() const { return accept_any_receiver_; }
bool has_call_code() const { return has_call_code_; }
void SerializeCallCode(JSHeapBroker* broker);
ObjectData* call_code() const { return call_code_; }
Address c_function() const { return c_function_; }
const CFunctionInfo* c_signature() const { return c_signature_; }
KnownReceiversMap& known_receivers() { return known_receivers_; }
private:
bool is_signature_undefined_ = false;
bool accept_any_receiver_ = false;
bool has_call_code_ = false;
ObjectData* call_code_ = nullptr;
const Address c_function_;
const CFunctionInfo* const c_signature_;
KnownReceiversMap known_receivers_;
};
class CallHandlerInfoData : public HeapObjectData {
public:
CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<CallHandlerInfo> object);
Address callback() const { return callback_; }
void Serialize(JSHeapBroker* broker);
ObjectData* data() const { return data_; }
private:
Address const callback_;
ObjectData* data_ = nullptr;
};
FunctionTemplateInfoData::FunctionTemplateInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object)
: HeapObjectData(broker, storage, object),
c_function_(v8::ToCData<Address>(object->GetCFunction())),
c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
known_receivers_(broker->zone()) {
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
is_signature_undefined_ =
function_template_info->signature().IsUndefined(broker->isolate());
accept_any_receiver_ = function_template_info->accept_any_receiver();
CallOptimization call_optimization(broker->isolate(), object);
has_call_code_ = call_optimization.is_simple_api_call();
}
CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
Handle<CallHandlerInfo> object)
: HeapObjectData(broker, storage, object),
callback_(v8::ToCData<Address>(object->callback())) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
// These definitions are here in order to please the linker, which in debug mode
// sometimes requires static constants to be defined in .cc files.
const uint32_t JSHeapBroker::kMinimalRefsBucketCount;
const uint32_t JSHeapBroker::kInitialRefsBucketCount;
void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<PropertyCell> object)
: HeapObjectData(broker, storage, object),
property_details_(object->property_details()) {}
void PropertyCellData::Serialize(JSHeapBroker* broker) {
if (value_ != nullptr) return;
TraceScope tracer(broker, this, "PropertyCellData::Serialize");
auto cell = Handle<PropertyCell>::cast(object());
value_ = broker->GetOrCreateData(cell->value());
}
void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
if (call_code_ != nullptr) return;
TraceScope tracer(broker, this,
"FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
call_code_ =
broker->GetOrCreateData(function_template_info->call_code(kAcquireLoad));
if (call_code_->should_access_heap()) {
// TODO(mvstanton): When ObjectRef is in the never serialized list, this
// code can be removed.
broker->GetOrCreateData(
Handle<CallHandlerInfo>::cast(call_code_->object())->data());
} else {
call_code_->AsCallHandlerInfo()->Serialize(broker);
}
}
void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
if (data_ != nullptr) return;
TraceScope tracer(broker, this, "CallHandlerInfoData::Serialize");
auto call_handler_info = Handle<CallHandlerInfo>::cast(object());
data_ = broker->GetOrCreateData(call_handler_info->data());
}
class JSObjectField {
public:
bool IsDouble() const { return object_ == nullptr; }
uint64_t AsBitsOfDouble() const {
CHECK(IsDouble());
return number_bits_;
}
double AsDouble() const {
CHECK(IsDouble());
return bit_cast<double>(number_bits_);
}
bool IsObject() const { return object_ != nullptr; }
ObjectData* AsObject() const {
CHECK(IsObject());
return object_;
}
explicit JSObjectField(uint64_t value_bits) : number_bits_(value_bits) {}
explicit JSObjectField(ObjectData* value) : object_(value) {}
private:
ObjectData* object_ = nullptr;
uint64_t number_bits_ = 0;
};
class JSReceiverData : public HeapObjectData {
public:
JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSReceiver> object)
: HeapObjectData(broker, storage, object) {}
};
class JSObjectData : public JSReceiverData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object);
// Recursive serialization of all reachable JSObjects.
void SerializeAsBoilerplate(JSHeapBroker* broker);
const JSObjectField& GetInobjectField(int property_index) const;
// Shallow serialization of {elements}.
void SerializeElements(JSHeapBroker* broker);
bool serialized_elements() const { return serialized_elements_; }
ObjectData* elements() const;
void SerializeObjectCreateMap(JSHeapBroker* broker);
ObjectData* object_create_map(
JSHeapBroker* broker) const { // Can be nullptr.
if (!serialized_object_create_map_) {
DCHECK_NULL(object_create_map_);
TRACE_MISSING(broker, "object_create_map on " << this);
}
return object_create_map_;
}
ObjectData* GetOwnConstantElement(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
ObjectData* GetOwnDataProperty(
JSHeapBroker* broker, Representation representation,
FieldIndex field_index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
private:
void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
ObjectData* elements_ = nullptr;
bool cow_or_empty_elements_tenured_ = false;
// The {serialized_as_boilerplate} flag is set when all recursively
// reachable JSObjects are serialized.
bool serialized_as_boilerplate_ = false;
bool serialized_elements_ = false;
ZoneVector<JSObjectField> inobject_fields_;
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
// Elements (indexed properties) that either
// (1) are known to exist directly on the object as non-writable and
// non-configurable, or (2) are known not to (possibly they don't exist at
// all). In case (2), the second pair component is nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
// Properties that either:
// (1) are known to exist directly on the object, or
// (2) are known not to (possibly they don't exist at all).
// In case (2), the second pair component is nullptr.
// For simplicity, this may in theory overlap with inobject_fields_.
// The keys of the map are the property_index() values of the
// respective property FieldIndex'es.
ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
if (serialized_object_create_map_) return;
serialized_object_create_map_ = true;
TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
if (jsobject->map().is_prototype_map()) {
Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
broker->isolate());
if (maybe_proto_info->IsPrototypeInfo()) {
auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
if (proto_info->HasObjectCreateMap()) {
DCHECK_NULL(object_create_map_);
object_create_map_ =
broker->GetOrCreateData(proto_info->ObjectCreateMap());
}
}
}
}
namespace {
base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
Handle<Object> receiver,
uint32_t index,
bool constant_only) {
LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
if (it.state() == LookupIterator::DATA &&
(!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
return ObjectRef(broker,
broker->CanonicalPersistentHandle(it.GetDataValue()));
}
return base::nullopt;
}
ObjectRef GetOwnDataPropertyFromHeap(JSHeapBroker* broker,
Handle<JSObject> receiver,
Representation representation,
FieldIndex field_index) {
Handle<Object> constant =
JSObject::FastPropertyAt(receiver, representation, field_index);
return ObjectRef(broker, constant);
}
} // namespace
ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
uint32_t index,
SerializationPolicy policy) {
for (auto const& p : own_constant_elements_) {
if (p.first == index) return p.second;
}
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
return nullptr;
}
base::Optional<ObjectRef> element =
GetOwnElementFromHeap(broker, object(), index, true);
ObjectData* result = element.has_value() ? element->data() : nullptr;
own_constant_elements_.push_back({index, result});
return result;
}
ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker,
Representation representation,
FieldIndex field_index,
SerializationPolicy policy) {
auto p = own_properties_.find(field_index.property_index());
if (p != own_properties_.end()) return p->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_MISSING(broker, "knowledge about property with index "
<< field_index.property_index() << " on "
<< this);
return nullptr;
}
ObjectRef property = GetOwnDataPropertyFromHeap(
broker, Handle<JSObject>::cast(object()), representation, field_index);
ObjectData* result(property.data());
own_properties_.insert(std::make_pair(field_index.property_index(), result));
return result;
}
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSTypedArray> object);
bool is_on_heap() const { return is_on_heap_; }
size_t length() const { return length_; }
void* data_ptr() const { return data_ptr_; }
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* buffer() const { return buffer_; }
private:
bool const is_on_heap_;
size_t const length_;
void* const data_ptr_;
bool serialized_ = false;
ObjectData* buffer_ = nullptr;
};
JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSTypedArray> object)
: JSObjectData(broker, storage, object),
is_on_heap_(object->is_on_heap()),
length_(object->length()),
data_ptr_(object->DataPtr()) {}
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
if (!is_on_heap()) {
DCHECK_NULL(buffer_);
buffer_ = broker->GetOrCreateData(typed_array->buffer());
}
}
class ArrayBoilerplateDescriptionData : public HeapObjectData {
public:
ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ArrayBoilerplateDescription> object)
: HeapObjectData(broker, storage, object),
constants_elements_length_(object->constant_elements().length()) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
int constants_elements_length() const { return constants_elements_length_; }
private:
int const constants_elements_length_;
};
class ObjectBoilerplateDescriptionData : public HeapObjectData {
public:
ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ObjectBoilerplateDescription> object)
: HeapObjectData(broker, storage, object), size_(object->size()) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
int size() const { return size_; }
private:
int const size_;
};
class JSDataViewData : public JSObjectData {
public:
JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSDataView> object);
size_t byte_length() const { return byte_length_; }
private:
size_t const byte_length_;
};
class JSBoundFunctionData : public JSObjectData {
public:
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object);
bool Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* bound_target_function() const { return bound_target_function_; }
ObjectData* bound_this() const { return bound_this_; }
ObjectData* bound_arguments() const { return bound_arguments_; }
private:
bool serialized_ = false;
ObjectData* bound_target_function_ = nullptr;
ObjectData* bound_this_ = nullptr;
ObjectData* bound_arguments_ = nullptr;
};
class JSFunctionData : public JSObjectData {
public:
JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSFunction> object);
bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
bool HasAttachedOptimizedCode() const { return has_attached_optimized_code_; }
bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_;
}
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* context() const { return context_; }
ObjectData* native_context() const { return native_context_; }
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; }
ObjectData* raw_feedback_cell() const { return feedback_cell_; }
ObjectData* feedback_vector() const { return feedback_vector_; }
ObjectData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
}
private:
bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
bool has_attached_optimized_code_;
bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false;
ObjectData* context_ = nullptr;
ObjectData* native_context_ = nullptr;
ObjectData* initial_map_ = nullptr;
ObjectData* prototype_ = nullptr;
ObjectData* shared_ = nullptr;
ObjectData* feedback_vector_ = nullptr;
ObjectData* feedback_cell_ = nullptr;
ObjectData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
class JSRegExpData : public JSObjectData {
public:
JSRegExpData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSRegExp> object)
: JSObjectData(broker, storage, object) {}
void SerializeAsRegExpBoilerplate(JSHeapBroker* broker);
ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
ObjectData* data() const { return data_; }
ObjectData* source() const { return source_; }
ObjectData* flags() const { return flags_; }
ObjectData* last_index() const { return last_index_; }
private:
bool serialized_as_reg_exp_boilerplate_ = false;
ObjectData* raw_properties_or_hash_ = nullptr;
ObjectData* data_ = nullptr;
ObjectData* source_ = nullptr;
ObjectData* flags_ = nullptr;
ObjectData* last_index_ = nullptr;
};
class HeapNumberData : public HeapObjectData {
public:
HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapNumber> object)
: HeapObjectData(broker, storage, object), value_(object->value()) {
}
double value() const { return value_; }
private:
double const value_;
};
class ContextData : public HeapObjectData {
public:
ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object);
ObjectData* previous(
JSHeapBroker* broker,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
// Returns nullptr if the slot index isn't valid or wasn't serialized,
// unless {policy} is {kSerializeIfNeeded}.
ObjectData* GetSlot(
JSHeapBroker* broker, int index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
ZoneMap<int, ObjectData*> slots_;
ObjectData* previous_ = nullptr;
};
ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object)
: HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
ObjectData* ContextData::previous(JSHeapBroker* broker,
SerializationPolicy policy) {
if (policy == SerializationPolicy::kSerializeIfNeeded &&
previous_ == nullptr) {
TraceScope tracer(broker, this, "ContextData::previous");
Handle<Context> context = Handle<Context>::cast(object());
previous_ = broker->GetOrCreateData(context->unchecked_previous());
}
return previous_;
}
ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
SerializationPolicy policy) {
CHECK_GE(index, 0);
auto search = slots_.find(index);
if (search != slots_.end()) {
return search->second;
}
if (policy == SerializationPolicy::kSerializeIfNeeded) {
Handle<Context> context = Handle<Context>::cast(object());
if (index < context->length()) {
TraceScope tracer(broker, this, "ContextData::GetSlot");
TRACE(broker, "Serializing context slot " << index);
ObjectData* odata = broker->GetOrCreateData(context->get(index));
slots_.insert(std::make_pair(index, odata));
return odata;
}
}
return nullptr;
}
class NativeContextData : public ContextData {
public:
#define DECL_ACCESSOR(type, name) \
ObjectData* name() const { return name##_; }
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
const ZoneVector<ObjectData*>& function_maps() const {
CHECK(serialized_);
return function_maps_;
}
ObjectData* scope_info() const {
CHECK(serialized_);
return scope_info_;
}
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
private:
bool serialized_ = false;
#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
ZoneVector<ObjectData*> function_maps_;
ObjectData* scope_info_ = nullptr;
};
class NameData : public HeapObjectData {
public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
: HeapObjectData(broker, storage, object) {}
};
class StringData : public NameData {
public:
StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object);
int length() const { return length_; }
uint16_t first_char() const { return first_char_; }
base::Optional<double> to_number() const { return to_number_; }
bool is_external_string() const { return is_external_string_; }
bool is_seq_string() const { return is_seq_string_; }
ObjectData* GetCharAsString(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
int const length_;
uint16_t const first_char_;
base::Optional<double> to_number_;
bool const is_external_string_;
bool const is_seq_string_;
// Known individual characters as strings, corresponding to the semantics of
// element access (s[i]). The first pair component is always less than
// {length_}. The second component is never nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
};
class SymbolData : public NameData {
public:
SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
: NameData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
};
namespace {
// String to double helper without heap allocation.
base::Optional<double> StringToDouble(Handle<String> object) {
const int kMaxLengthForDoubleConversion = 23;
String string = *object;
int length = string.length();
if (length <= kMaxLengthForDoubleConversion) {
const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
uc16 buffer[kMaxLengthForDoubleConversion];
String::WriteToFlat(*object, buffer, 0, length);
Vector<const uc16> v(buffer, length);
return StringToDouble(v, flags);
}
return base::nullopt;
}
} // namespace
StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
Handle<String> object)
: NameData(broker, storage, object),
length_(object->length()),
first_char_(length_ > 0 ? object->Get(0) : 0),
to_number_(StringToDouble(object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
chars_as_strings_(broker->zone()) {}
class InternalizedStringData : public StringData {
public:
InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
Handle<InternalizedString> object);
uint32_t array_index() const { return array_index_; }
private:
uint32_t array_index_;
};
ObjectData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy) {
if (index >= static_cast<uint32_t>(length())) return nullptr;
for (auto const& p : chars_as_strings_) {
if (p.first == index) return p.second;
}
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
return nullptr;
}
base::Optional<ObjectRef> element =
GetOwnElementFromHeap(broker, object(), index, true);
ObjectData* result = element.has_value() ? element->data() : nullptr;
chars_as_strings_.push_back({index, result});
return result;
}
InternalizedStringData::InternalizedStringData(
JSHeapBroker* broker, ObjectData** storage,
Handle<InternalizedString> object)
: StringData(broker, storage, object) {}
namespace {
bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
int* max_properties) {
DCHECK_GE(max_depth, 0);
DCHECK_GE(*max_properties, 0);
Isolate* const isolate = boilerplate->GetIsolate();
// Make sure the boilerplate map is not deprecated.
if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false;
// Check for too deep nesting.
if (max_depth == 0) return false;
// Check the elements.
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
if (boilerplate->HasSmiOrObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteralHelper(value_object, max_depth - 1,
max_properties)) {
return false;
}
}
}
} else if (boilerplate->HasDoubleElements()) {
if (elements->Size() > kMaxRegularHeapObjectSize) return false;
} else {
return false;
}
}
// TODO(turbofan): Do we want to support out-of-object properties?
if (!(boilerplate->HasFastProperties() &&
boilerplate->property_array().length() == 0)) {
return false;
}
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
return false;
}
}
}
return true;
}
// Maximum depth and total number of elements and properties for literal
// graphs to be considered for fast deep-copying. The limit is chosen to
// match the maximum number of inobject properties, to ensure that the
// performance of using object literals is not worse than using constructor
// functions, see crbug.com/v8/6211 for details.
const int kMaxFastLiteralDepth = 3;
const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
// Determines whether the given array or object literal boilerplate satisfies
// all limits to be considered for fast deep-copying and computes the total
// size of all objects that are part of the graph.
bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
int max_properties = kMaxFastLiteralProperties;
return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
&max_properties);
}
} // namespace
class AccessorInfoData : public HeapObjectData {
public:
AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<AccessorInfo> object);
};
class AllocationSiteData : public HeapObjectData {
public:
AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
Handle<AllocationSite> object);
void SerializeBoilerplate(JSHeapBroker* broker);
bool PointsToLiteral() const { return PointsToLiteral_; }
AllocationType GetAllocationType() const { return GetAllocationType_; }
ObjectData* nested_site() const { return nested_site_; }
bool IsFastLiteral() const { return IsFastLiteral_; }
ObjectData* boilerplate() const { return boilerplate_; }
// These are only valid if PointsToLiteral is false.
ElementsKind GetElementsKind() const { return GetElementsKind_; }
bool CanInlineCall() const { return CanInlineCall_; }
private:
bool const PointsToLiteral_;
AllocationType const GetAllocationType_;
ObjectData* nested_site_ = nullptr;
bool IsFastLiteral_ = false;
ObjectData* boilerplate_ = nullptr;
ElementsKind GetElementsKind_ = NO_ELEMENTS;
bool CanInlineCall_ = false;
bool serialized_boilerplate_ = false;
};
class BigIntData : public HeapObjectData {
public:
BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
: HeapObjectData(broker, storage, object),
as_uint64_(object->AsUint64(nullptr)) {
}
uint64_t AsUint64() const { return as_uint64_; }
private:
const uint64_t as_uint64_;
};
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public HeapObjectData {
public:
ScriptContextTableData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScriptContextTable> object)
: HeapObjectData(broker, storage, object) {}
};
struct PropertyDescriptor {
ObjectData* key = nullptr;
ObjectData* value = nullptr;
PropertyDetails details = PropertyDetails::Empty();
FieldIndex field_index;
ObjectData* field_owner = nullptr;
ObjectData* field_type = nullptr;
bool is_unboxed_double_field = false;
};
class MapData : public HeapObjectData {
public:
MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object);
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
byte bit_field() const { return bit_field_; }
byte bit_field2() const { return bit_field2_; }
uint32_t bit_field3() const { return bit_field3_; }
bool can_be_deprecated() const { return can_be_deprecated_; }
bool can_transition() const { return can_transition_; }
int in_object_properties_start_in_words() const {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_start_in_words_;
}
int in_object_properties() const {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_;
}
int constructor_function_index() const { return constructor_function_index_; }
int NextFreePropertyIndex() const { return next_free_property_index_; }
int UnusedPropertyFields() const { return unused_property_fields_; }
bool supports_fast_array_iteration() const {
return supports_fast_array_iteration_;
}
bool supports_fast_array_resize() const {
return supports_fast_array_resize_;
}
bool is_abandoned_prototype_map() const {
return is_abandoned_prototype_map_;
}
// Extra information.
void SerializeElementsKindGeneralizations(JSHeapBroker* broker);
const ZoneVector<ObjectData*>& elements_kind_generalizations() const {
CHECK(serialized_elements_kind_generalizations_);
return elements_kind_generalizations_;
}
// Serialize a single (or all) own slot(s) of the descriptor array and recurse
// on field owner(s).
void SerializeOwnDescriptor(JSHeapBroker* broker,
InternalIndex descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
// TODO(neis): This code needs to be changed to allow for ObjectData* instance
// descriptors. However, this is likely to require a non-trivial refactoring
// of how maps are serialized because actual instance descriptors don't
// contain information about owner maps.
DescriptorArrayData* instance_descriptors() const {
return instance_descriptors_;
}
void SerializeRootMap(JSHeapBroker* broker);
ObjectData* FindRootMap() const;
void SerializeConstructor(JSHeapBroker* broker);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
return constructor_;
}
void SerializeBackPointer(JSHeapBroker* broker);
ObjectData* GetBackPointer() const {
CHECK(serialized_backpointer_);
return backpointer_;
}
void SerializePrototype(JSHeapBroker* broker);
bool serialized_prototype() const { return serialized_prototype_; }
ObjectData* prototype() const {
CHECK(serialized_prototype_);
return prototype_;
}
void SerializeForElementLoad(JSHeapBroker* broker);
void SerializeForElementStore(JSHeapBroker* broker);
private:
InstanceType const instance_type_;
int const instance_size_;
byte const bit_field_;
byte const bit_field2_;
uint32_t const bit_field3_;
bool const can_be_deprecated_;
bool const can_transition_;
int const in_object_properties_start_in_words_;
int const in_object_properties_;
int const constructor_function_index_;
int const next_free_property_index_;
int const unused_property_fields_;
bool const supports_fast_array_iteration_;
bool const supports_fast_array_resize_;
bool const is_abandoned_prototype_map_;
bool serialized_elements_kind_generalizations_ = false;
ZoneVector<ObjectData*> elements_kind_generalizations_;
bool serialized_own_descriptors_ = false;
DescriptorArrayData* instance_descriptors_ = nullptr;
bool serialized_constructor_ = false;
ObjectData* constructor_ = nullptr;
bool serialized_backpointer_ = false;
ObjectData* backpointer_ = nullptr;
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
bool serialized_root_map_ = false;
ObjectData* root_map_ = nullptr;
bool serialized_for_element_load_ = false;
bool serialized_for_element_store_ = false;
};
AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<AccessorInfo> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
ObjectData** storage,
Handle<AllocationSite> object)
: HeapObjectData(broker, storage, object),
PointsToLiteral_(object->PointsToLiteral()),
GetAllocationType_(object->GetAllocationType()) {
if (PointsToLiteral_) {
IsFastLiteral_ = IsInlinableFastLiteral(
handle(object->boilerplate(), broker->isolate()));
} else {
GetElementsKind_ = object->GetElementsKind();
CanInlineCall_ = object->CanInlineCall();
}
}
void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
if (serialized_boilerplate_) return;
serialized_boilerplate_ = true;
TraceScope tracer(broker, this, "AllocationSiteData::SerializeBoilerplate");
Handle<AllocationSite> site = Handle<AllocationSite>::cast(object());
CHECK(IsFastLiteral_);
DCHECK_NULL(boilerplate_);
boilerplate_ = broker->GetOrCreateData(site->boilerplate());
if (!boilerplate_->should_access_heap()) {
boilerplate_->AsJSObject()->SerializeAsBoilerplate(broker);
}
DCHECK_NULL(nested_site_);
nested_site_ = broker->GetOrCreateData(site->nested_site());
if (nested_site_->IsAllocationSite() && !nested_site_->should_access_heap()) {
nested_site_->AsAllocationSite()->SerializeBoilerplate(broker);
}
}
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapObject> object)
: ObjectData(broker, storage, object, kSerializedHeapObject),
boolean_value_(object->BooleanValue(broker->isolate())),
// We have to use a raw cast below instead of AsMap() because of
// recursion. AsMap() would call IsMap(), which accesses the
// instance_type_ member. In the case of constructing the MapData for the
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->map())) {
CHECK_EQ(broker->mode(), JSHeapBroker::kSerializing);
}
InstanceType HeapObjectData::GetMapInstanceType() const {
ObjectData* map_data = map();
if (map_data->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(kind());
return Handle<Map>::cast(map_data->object())->instance_type();
}
return map_data->AsMap()->instance_type();
}
namespace {
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray descriptors = jsarray_map->instance_descriptors(kRelaxedLoad);
// TODO(jkummerow): We could skip the search and hardcode number == 0.
InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
DCHECK(number.is_found());
return descriptors.GetDetails(number).IsReadOnly();
}
bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
return map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(map->elements_kind()) &&
map->prototype().IsJSArray() &&
isolate->IsAnyInitialArrayPrototype(
handle(JSArray::cast(map->prototype()), isolate)) &&
Protectors::IsNoElementsIntact(isolate);
}
bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
return SupportsFastArrayIteration(isolate, map) && map->is_extensible() &&
!map->is_dictionary_map() && !IsReadOnlyLengthDescriptor(isolate, map);
}
} // namespace
MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
: HeapObjectData(broker, storage, object),
instance_type_(object->instance_type()),
instance_size_(object->instance_size()),
bit_field_(object->bit_field()),
bit_field2_(object->bit_field2()),
bit_field3_(object->bit_field3()),
can_be_deprecated_(object->NumberOfOwnDescriptors() > 0
? object->CanBeDeprecated()
: false),
can_transition_(object->CanTransition()),
in_object_properties_start_in_words_(
object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
: 0),
in_object_properties_(
object->IsJSObjectMap() ? object->GetInObjectProperties() : 0),
constructor_function_index_(object->IsPrimitiveMap()
? object->GetConstructorFunctionIndex()
: Map::kNoConstructorFunctionIndex),
next_free_property_index_(object->NextFreePropertyIndex()),
unused_property_fields_(object->UnusedPropertyFields()),
supports_fast_array_iteration_(
SupportsFastArrayIteration(broker->isolate(), object)),
supports_fast_array_resize_(
SupportsFastArrayResize(broker->isolate(), object)),
is_abandoned_prototype_map_(object->is_abandoned_prototype_map()),
elements_kind_generalizations_(broker->zone()) {}
JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSFunction> object)
: JSObjectData(broker, storage, object),
has_feedback_vector_(object->has_feedback_vector()),
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
has_attached_optimized_code_(object->HasAttachedOptimizedCode()),
PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {}
void JSFunctionData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "JSFunctionData::Serialize");
Handle<JSFunction> function = Handle<JSFunction>::cast(object());
DCHECK_NULL(context_);
DCHECK_NULL(native_context_);
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared());
feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector())
: nullptr;
code_ = broker->GetOrCreateData(function->code());
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())
: nullptr;
prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
: nullptr;
if (initial_map_ != nullptr) {
initial_map_instance_size_with_min_slack_ =
function->ComputeInstanceSizeWithMinSlack(broker->isolate());
}
if (initial_map_ != nullptr && !initial_map_->should_access_heap()) {
if (initial_map_->AsMap()->instance_type() == JS_ARRAY_TYPE) {
initial_map_->AsMap()->SerializeElementsKindGeneralizations(broker);
}
initial_map_->AsMap()->SerializeConstructor(broker);
// TODO(neis): This is currently only needed for native_context's
// object_function, as used by GetObjectCreateMap. If no further use sites
// show up, we should move this into NativeContextData::Serialize.
initial_map_->AsMap()->SerializePrototype(broker);
}
}
void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
if (serialized_elements_kind_generalizations_) return;
serialized_elements_kind_generalizations_ = true;
TraceScope tracer(broker, this,
"MapData::SerializeElementsKindGeneralizations");
DCHECK_EQ(instance_type(), JS_ARRAY_TYPE);
MapRef self(broker, this);
ElementsKind from_kind = self.elements_kind();
DCHECK(elements_kind_generalizations_.empty());
for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
ElementsKind to_kind = static_cast<ElementsKind>(i);
if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
Handle<Map> target =
Map::AsElementsKind(broker->isolate(), self.object(), to_kind);
elements_kind_generalizations_.push_back(broker->GetOrCreateData(target));
}
}
}
class DescriptorArrayData : public HeapObjectData {
public:
DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
ZoneMap<int, PropertyDescriptor>& contents() { return contents_; }
private:
ZoneMap<int, PropertyDescriptor> contents_;
};
class FeedbackCellData : public HeapObjectData {
public:
FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object);
ObjectData* value() const { return value_; }
private:
ObjectData* const value_;
};
FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object)
: HeapObjectData(broker, storage, object),
value_(broker->GetOrCreateData(object->value())) {}
class FeedbackVectorData : public HeapObjectData {
public:
FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackVector> object);
double invocation_count() const { return invocation_count_; }
ObjectData* shared_function_info() {
CHECK(serialized_);
return shared_function_info_;
}
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
private:
double const invocation_count_;
bool serialized_ = false;
ObjectData* shared_function_info_;
ZoneVector<ObjectData*> closure_feedback_cell_array_;
};
FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
ObjectData** storage,
Handle<FeedbackVector> object)
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
closure_feedback_cell_array_(broker->zone()) {}
ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const {
CHECK_GE(index, 0);
size_t cell_array_size = closure_feedback_cell_array_.size();
if (!serialized_) {
DCHECK_EQ(cell_array_size, 0);
TRACE_BROKER_MISSING(broker,
" closure feedback cell array for vector " << this);
return nullptr;
}
CHECK_LT(index, cell_array_size);
return closure_feedback_cell_array_[index];
}
void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
broker->isolate());
shared_function_info_ = broker->GetOrCreateData(sfi);
DCHECK(closure_feedback_cell_array_.empty());
int length = vector->closure_feedback_cell_array().length();
closure_feedback_cell_array_.reserve(length);
for (int i = 0; i < length; ++i) {
Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i);
ObjectData* cell_data = broker->GetOrCreateData(cell);
closure_feedback_cell_array_.push_back(cell_data);
}
TRACE(broker, "Copied " << length << " feedback cells");
}
class FixedArrayBaseData : public HeapObjectData {
public:
FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArrayBase> object)
: HeapObjectData(broker, storage, object), length_(object->length()) {}
int length() const { return length_; }
private:
int const length_;
};
class FixedArrayData : public FixedArrayBaseData {
public:
FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArray> object);
// Creates all elements of the fixed array.
void SerializeContents(JSHeapBroker* broker);
ObjectData* Get(int i) const;
private:
bool serialized_contents_ = false;
ZoneVector<ObjectData*> contents_;
};
JSDataViewData::JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSDataView> object)
: JSObjectData(broker, storage, object),
byte_length_(object->byte_length()) {}
JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
ObjectData** storage,
Handle<JSBoundFunction> object)
: JSObjectData(broker, storage, object) {}
bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
if (serialized_) return true;
if (broker->StackHasOverflowed()) return false;
TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
// We don't immediately set {serialized_} in order to correctly handle the
// case where a recursive call to this method reaches the stack limit.
DCHECK_NULL(bound_target_function_);
bound_target_function_ =
broker->GetOrCreateData(function->bound_target_function());
bool serialized_nested = true;
if (!bound_target_function_->should_access_heap()) {
if (bound_target_function_->IsJSBoundFunction()) {
serialized_nested =
bound_target_function_->AsJSBoundFunction()->Serialize(broker);
} else if (bound_target_function_->IsJSFunction()) {
bound_target_function_->AsJSFunction()->Serialize(broker);
}
}
if (!serialized_nested) {
// We couldn't serialize all nested bound functions due to stack
// overflow. Give up.
DCHECK(!serialized_);
bound_target_function_ = nullptr; // Reset to sync with serialized_.
return false;
}
serialized_ = true;
DCHECK_NULL(bound_arguments_);
bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
if (!bound_arguments_->should_access_heap()) {
bound_arguments_->AsFixedArray()->SerializeContents(broker);
}
DCHECK_NULL(bound_this_);
bound_this_ = broker->GetOrCreateData(function->bound_this());
return true;
}
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object)
: JSReceiverData(broker, storage, object),
inobject_fields_(broker->zone()),
own_constant_elements_(broker->zone()),
own_properties_(broker->zone()) {}
FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArray> object)
: FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
void FixedArrayData::SerializeContents(JSHeapBroker* broker) {
if (serialized_contents_) return;
serialized_contents_ = true;
TraceScope tracer(broker, this, "FixedArrayData::SerializeContents");
Handle<FixedArray> array = Handle<FixedArray>::cast(object());
CHECK_EQ(array->length(), length());
CHECK(contents_.empty());
contents_.reserve(static_cast<size_t>(length()));
for (int i = 0; i < length(); i++) {
Handle<Object> value(array->get(i), broker->isolate());
contents_.push_back(broker->GetOrCreateData(value));
}
TRACE(broker, "Copied " << contents_.size() << " elements");
}
class FixedDoubleArrayData : public FixedArrayBaseData {
public:
FixedDoubleArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedDoubleArray> object);
// Serializes all elements of the fixed array.
void SerializeContents(JSHeapBroker* broker);
Float64 Get(int i) const;
private:
bool serialized_contents_ = false;
ZoneVector<Float64> contents_;
};
FixedDoubleArrayData::FixedDoubleArrayData(JSHeapBroker* broker,
ObjectData** storage,
Handle<FixedDoubleArray> object)
: FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {
}
void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
if (serialized_contents_) return;
serialized_contents_ = true;
TraceScope tracer(broker, this, "FixedDoubleArrayData::SerializeContents");
Handle<FixedDoubleArray> self = Handle<FixedDoubleArray>::cast(object());
CHECK_EQ(self->length(), length());
CHECK(contents_.empty());
contents_.reserve(static_cast<size_t>(length()));
for (int i = 0; i < length(); i++) {
contents_.push_back(Float64::FromBits(self->get_representation(i)));
}
TRACE(broker, "Copied " << contents_.size() << " elements");
}
class BytecodeArrayData : public FixedArrayBaseData {
public:
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
interpreter::Register incoming_new_target_or_generator_register() const {
return incoming_new_target_or_generator_register_;
}
Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
return constant_pool_[index]->object();
}
bool IsConstantAtIndexSmi(int index) const {
return constant_pool_[index]->is_smi();
}
Smi GetConstantAtIndexAsSmi(int index) const {
return *(Handle<Smi>::cast(constant_pool_[index]->object()));
}
void SerializeForCompilation(JSHeapBroker* broker) {
if (is_serialized_for_compilation_) return;
// Convinience cast: object() is already a canonical persistent handle.
Handle<BytecodeArray> bytecodes = Handle<BytecodeArray>::cast(object());
DCHECK(constant_pool_.empty());
Handle<FixedArray> constant_pool(bytecodes->constant_pool(),
broker->isolate());
constant_pool_.reserve(constant_pool->length());
for (int i = 0; i < constant_pool->length(); i++) {
constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
}
is_serialized_for_compilation_ = true;
}
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
object->incoming_new_target_or_generator_register()),
constant_pool_(broker->zone()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
bool is_serialized_for_compilation_ = false;
ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object);
void Serialize(JSHeapBroker* broker);
ObjectData* length() const { return length_; }
ObjectData* GetOwnElement(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
// Elements (indexed properties) that either
// (1) are known to exist directly on the object, or
// (2) are known not to (possibly they don't exist at all).
// In case (2), the second pair component is nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object)
: JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
void JSArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "JSArrayData::Serialize");
Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
DCHECK_NULL(length_);
length_ = broker->GetOrCreateData(jsarray->length());
}
ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy) {
for (auto const& p : own_elements_) {
if (p.first == index) return p.second;
}
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
return nullptr;
}
base::Optional<ObjectRef> element =
GetOwnElementFromHeap(broker, object(), index, false);
ObjectData* result = element.has_value() ? element->data() : nullptr;
own_elements_.push_back({index, result});
return result;
}
class ScopeInfoData : public HeapObjectData {
public:
ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object);
int ContextLength() const { return context_length_; }
bool HasContextExtensionSlot() const { return has_context_extension_slot_; }
bool HasOuterScopeInfo() const { return has_outer_scope_info_; }
ObjectData* OuterScopeInfo() const { return outer_scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
private:
int const context_length_;
bool const has_context_extension_slot_;
bool const has_outer_scope_info_;
// Only serialized via SerializeScopeInfoChain.
ObjectData* outer_scope_info_;
};
ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object)
: HeapObjectData(broker, storage, object),
context_length_(object->ContextLength()),
has_context_extension_slot_(object->HasContextExtensionSlot()),
has_outer_scope_info_(object->HasOuterScopeInfo()),
outer_scope_info_(nullptr) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (outer_scope_info_) return;
if (!has_outer_scope_info_) return;
outer_scope_info_ = broker->GetOrCreateData(
Handle<ScopeInfo>::cast(object())->OuterScopeInfo());
if (!outer_scope_info_->should_access_heap()) {
outer_scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
}
}
class SharedFunctionInfoData : public HeapObjectData {
public:
SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<SharedFunctionInfo> object);
int builtin_id() const { return builtin_id_; }
int context_header_size() const { return context_header_size_; }
ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
SharedFunctionInfo::Inlineability GetInlineability() const {
return inlineability_;
}
void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
ObjectData* scope_info() const { return scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
ObjectData* function_template_info() const { return function_template_info_; }
ObjectData* GetTemplateObject(FeedbackSlot slot) const {
auto lookup_it = template_objects_.find(slot.ToInt());
if (lookup_it != template_objects_.cend()) {
return lookup_it->second;
}
return nullptr;
}
void SetTemplateObject(FeedbackSlot slot, ObjectData* object) {
CHECK(
template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
}
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
BROKER_SFI_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
private:
int const builtin_id_;
int const context_header_size_;
ObjectData* const GetBytecodeArray_;
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
SharedFunctionInfo::Inlineability const inlineability_;
ObjectData* function_template_info_;
ZoneMap<int, ObjectData*> template_objects_;
ObjectData* scope_info_;
};
SharedFunctionInfoData::SharedFunctionInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<SharedFunctionInfo> object)
: HeapObjectData(broker, storage, object),
builtin_id_(object->HasBuiltinId() ? object->builtin_id()
: Builtins::kNoBuiltinId),
context_header_size_(object->scope_info().ContextHeaderLength()),
GetBytecodeArray_(
object->HasBytecodeArray()
? broker->GetOrCreateData(object->GetBytecodeArray())
: nullptr)
#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
,
inlineability_(object->GetInlineability()),
function_template_info_(nullptr),
template_objects_(broker->zone()),
scope_info_(nullptr) {
DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
}
void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
JSHeapBroker* broker) {
if (function_template_info_) return;
function_template_info_ = broker->GetOrCreateData(
Handle<SharedFunctionInfo>::cast(object())->function_data(kAcquireLoad));
}
void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (scope_info_) return;
scope_info_ = broker->GetOrCreateData(
Handle<SharedFunctionInfo>::cast(object())->scope_info());
if (!scope_info_->should_access_heap()) {
scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
}
}
class SourceTextModuleData : public HeapObjectData {
public:
SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
Handle<SourceTextModule> object);
void Serialize(JSHeapBroker* broker);
ObjectData* GetCell(JSHeapBroker* broker, int cell_index) const;
ObjectData* GetImportMeta(JSHeapBroker* broker) const;
private:
bool serialized_ = false;
ZoneVector<ObjectData*> imports_;
ZoneVector<ObjectData*> exports_;
ObjectData* import_meta_;
};
SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
ObjectData** storage,
Handle<SourceTextModule> object)
: HeapObjectData(broker, storage, object),
imports_(broker->zone()),
exports_(broker->zone()),
import_meta_(nullptr) {}
ObjectData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
int cell_index) const {
if (!serialized_) {
DCHECK(imports_.empty());
TRACE_BROKER_MISSING(broker,
"module cell " << cell_index << " on " << this);
return nullptr;
}
ObjectData* cell;
switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
case SourceTextModuleDescriptor::kImport:
cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
break;
case SourceTextModuleDescriptor::kExport:
cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
break;
case SourceTextModuleDescriptor::kInvalid:
UNREACHABLE();
}
CHECK_NOT_NULL(cell);
return cell;
}
ObjectData* SourceTextModuleData::GetImportMeta(JSHeapBroker* broker) const {
CHECK(serialized_);
return import_meta_;
}
void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
// TODO(neis): We could be smarter and only serialize the cells we care about.
// TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
DCHECK(imports_.empty());
Handle<FixedArray> imports(module->regular_imports(), broker->isolate());
int const imports_length = imports->length();
imports_.reserve(imports_length);
for (int i = 0; i < imports_length; ++i) {
imports_.push_back(broker->GetOrCreateData(imports->get(i)));
}
TRACE(broker, "Copied " << imports_.size() << " imports");
DCHECK(exports_.empty());
Handle<FixedArray> exports(module->regular_exports(), broker->isolate());
int const exports_length = exports->length();
exports_.reserve(exports_length);
for (int i = 0; i < exports_length; ++i) {
exports_.push_back(broker->GetOrCreateData(exports->get(i)));
}
TRACE(broker, "Copied " << exports_.size() << " exports");
DCHECK_NULL(import_meta_);
import_meta_ = broker->GetOrCreateData(module->import_meta());
TRACE(broker, "Copied import_meta");
}
class CellData : public HeapObjectData {
public:
CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
};
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object);
bool IsDetached() const { return is_detached_; }
ObjectData* GetPropertyCell(
JSHeapBroker* broker, ObjectData* name,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
bool const is_detached_;
// Properties that either
// (1) are known to exist as property cells on the global object, or
// (2) are known not to (possibly they don't exist at all).
// In case (2), the second pair component is nullptr.
ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
};
JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
ObjectData** storage,
Handle<JSGlobalObject> object)
: JSObjectData(broker, storage, object),
is_detached_(object->IsDetached()),
properties_(broker->zone()) {}
class JSGlobalProxyData : public JSObjectData {
public:
JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalProxy> object);
};
JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalProxy> object)
: JSObjectData(broker, storage, object) {}
namespace {
base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
Handle<Name> name) {
LookupIterator it(
broker->isolate(),
handle(broker->target_native_context().object()->global_object(),
broker->isolate()),
name, LookupIterator::OWN);
it.TryLookupCachedProperty();
if (it.state() == LookupIterator::DATA &&
it.GetHolder<JSObject>()->IsJSGlobalObject()) {
return PropertyCellRef(broker, it.GetPropertyCell());
}
return base::nullopt;
}
} // namespace
ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
ObjectData* name,
SerializationPolicy policy) {
CHECK_NOT_NULL(name);
for (auto const& p : properties_) {
if (p.first == name) return p.second;
}
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_MISSING(broker, "knowledge about global property " << name);
return nullptr;
}
ObjectData* result = nullptr;
base::Optional<PropertyCellRef> cell =
GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
if (cell.has_value()) {
result = cell->data();
if (!result->should_access_heap()) {
result->AsPropertyCell()->Serialize(broker);
}
}
properties_.push_back({name, result});
return result;
}
class TemplateObjectDescriptionData : public HeapObjectData {
public:
TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<TemplateObjectDescription> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
};
class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
: HeapObjectData(broker, storage, object),
inlined_bytecode_size_(object->inlined_bytecode_size()) {}
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
private:
unsigned const inlined_bytecode_size_;
};
#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
AllowHandleDereferenceIfNeeded allow_handle_dereference(kind()); \
return object()->Is##Name(); \
} \
if (is_smi()) return false; \
InstanceType instance_type = \
static_cast<const HeapObjectData*>(this)->GetMapInstanceType(); \
return InstanceTypeChecker::Is##Name(instance_type); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
#undef DEFINE_IS
#define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \
CHECK(Is##Name()); \
CHECK_EQ(kind_, kSerializedHeapObject); \
return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
// removed.
// This macro defines the Asxxx methods for NeverSerialized objects, which
// should only be used with direct heap access off.
#define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \
DCHECK(!FLAG_turbo_direct_heap_access); \
CHECK(Is##Name()); \
CHECK_EQ(kind_, kSerializedHeapObject); \
return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
const JSObjectField& JSObjectData::GetInobjectField(int property_index) const {
CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
return inobject_fields_[property_index];
}
bool JSObjectData::cow_or_empty_elements_tenured() const {
return cow_or_empty_elements_tenured_;
}
ObjectData* JSObjectData::elements() const { return elements_; }
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
}
void JSObjectData::SerializeElements(JSHeapBroker* broker) {
if (serialized_elements_) return;
serialized_elements_ = true;
TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
Handle<FixedArrayBase> elements_object(boilerplate->elements(),
broker->isolate());
DCHECK_NULL(elements_);
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
}
void MapData::SerializeConstructor(JSHeapBroker* broker) {
if (serialized_constructor_) return;
serialized_constructor_ = true;
TraceScope tracer(broker, this, "MapData::SerializeConstructor");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK(!map->IsContextMap());
DCHECK_NULL(constructor_);
constructor_ = broker->GetOrCreateData(map->GetConstructor());
}
void MapData::SerializeBackPointer(JSHeapBroker* broker) {
if (serialized_backpointer_) return;
serialized_backpointer_ = true;
TraceScope tracer(broker, this, "MapData::SerializeBackPointer");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(backpointer_);
DCHECK(!map->IsContextMap());
backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
}
void MapData::SerializePrototype(JSHeapBroker* broker) {
if (serialized_prototype_) return;
serialized_prototype_ = true;
TraceScope tracer(broker, this, "MapData::SerializePrototype");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(prototype_);
prototype_ = broker->GetOrCreateData(map->prototype());
}
void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
if (serialized_own_descriptors_) return;
serialized_own_descriptors_ = true;
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
Handle<Map> map = Handle<Map>::cast(object());
for (InternalIndex i : map->IterateOwnDescriptors()) {
SerializeOwnDescriptor(broker, i);
}
}
ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
auto data = instance_descriptors_->contents().find(descriptor_index.as_int());
if (data == instance_descriptors_->contents().end()) return nullptr;
return data->second.value;
}
void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
Handle<Map> map = Handle<Map>::cast(object());
if (instance_descriptors_ == nullptr) {
instance_descriptors_ =
broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad))
->AsDescriptorArray();
}
ZoneMap<int, PropertyDescriptor>& contents =
instance_descriptors()->contents();
CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
if (contents.find(descriptor_index.as_int()) != contents.end()) return;
Isolate* const isolate = broker->isolate();
auto descriptors =
Handle<DescriptorArray>::cast(instance_descriptors_->object());
CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDescriptor d;
d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
MaybeObject value = descriptors->GetValue(descriptor_index);
HeapObject obj;
if (value.GetHeapObjectIfStrong(&obj)) {
d.value = broker->GetOrCreateData(obj);
}
d.details = descriptors->GetDetails(descriptor_index);
if (d.details.location() == kField) {
d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
d.field_owner =
broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
d.field_type =
broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
}
contents[descriptor_index.as_int()] = d;
if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
// Recurse on the owner map.
d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
}
TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
<< instance_descriptors_ << " ("
<< contents.size() << " total)");
}
void MapData::SerializeRootMap(JSHeapBroker* broker) {
if (serialized_root_map_) return;
serialized_root_map_ = true;
TraceScope tracer(broker, this, "MapData::SerializeRootMap");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(root_map_);
root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
}
ObjectData* MapData::FindRootMap() const { return root_map_; }
void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
int depth) {
if (serialized_as_boilerplate_) return;
serialized_as_boilerplate_ = true;
TraceScope tracer(broker, this,
"JSObjectData::SerializeRecursiveAsBoilerplate");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
// We only serialize boilerplates that pass the IsInlinableFastLiteral
// check, so we only do a check on the depth here.
CHECK_GT(depth, 0);
CHECK(!boilerplate->map().is_deprecated());
// Serialize the elements.
Isolate* const isolate = broker->isolate();
Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
// Boilerplates need special serialization - we need to make sure COW arrays
// are tenured. Boilerplate objects should only be reachable from their
// allocation site, so it is safe to assume that the elements have not been
// serialized yet.
bool const empty_or_cow =
elements_object->length() == 0 ||
elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
if (empty_or_cow) {
// We need to make sure copy-on-write elements are tenured.
if (ObjectInYoungGeneration(*elements_object)) {
elements_object = isolate->factory()->CopyAndTenureFixedCOWArray(
Handle<FixedArray>::cast(elements_object));
boilerplate->set_elements(*elements_object);
}
cow_or_empty_elements_tenured_ = true;
}
DCHECK_NULL(elements_);
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
if (empty_or_cow || elements_->should_access_heap()) {
// No need to do anything here. Empty or copy-on-write elements
// do not need to be serialized because we only need to store the elements
// reference to the allocated object.
} else if (boilerplate->HasSmiOrObjectElements()) {
elements_->AsFixedArray()->SerializeContents(broker);
Handle<FixedArray> fast_elements =
Handle<FixedArray>::cast(elements_object);
int length = elements_object->length();
for (int i = 0; i < length; i++) {
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
if (!value_data->should_access_heap()) {
value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
depth - 1);
}
}
}
} else {
CHECK(boilerplate->HasDoubleElements());
CHECK_LE(elements_object->Size(), kMaxRegularHeapObjectSize);
DCHECK_EQ(elements_->kind(), ObjectDataKind::kSerializedHeapObject);
elements_->AsFixedDoubleArray()->SerializeContents(broker);
}
// TODO(turbofan): Do we want to support out-of-object properties?
CHECK(boilerplate->HasFastProperties() &&
boilerplate->property_array().length() == 0);
CHECK_EQ(inobject_fields_.size(), 0u);
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
// Make sure {field_index} agrees with {inobject_properties} on the index of
// this field.
DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size()));
if (boilerplate->IsUnboxedDoubleField(field_index)) {
uint64_t value_bits =
boilerplate->RawFastDoublePropertyAsBitsAt(field_index);
inobject_fields_.push_back(JSObjectField{value_bits});
} else {
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
isolate);
// In case of double fields we use a sentinel NaN value to mark
// uninitialized fields. A boilerplate value with such a field may migrate
// from its double to a tagged representation. If the double is unboxed,
// the raw double is converted to a heap number, otherwise the (boxed)
// double ceases to be mutable, and becomes a normal heap number. The
// sentinel value carries no special meaning when it occurs in a heap
// number, so we would like to recover the uninitialized value. We check
// for the sentinel here, specifically, since migrations might have been
// triggered as part of boilerplate serialization.
if (!details.representation().IsDouble() && value->IsHeapNumber() &&
HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
value = isolate->factory()->uninitialized_value();
}
ObjectData* value_data = broker->GetOrCreateData(value);
if (value_data->IsJSObject() && !value_data->should_access_heap()) {
value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
depth - 1);
}
inobject_fields_.push_back(JSObjectField{value_data});
}
}
TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
if (!map()->should_access_heap()) {
map()->AsMap()->SerializeOwnDescriptors(broker);
}
if (IsJSArray()) AsJSArray()->Serialize(broker);
}
void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
if (serialized_as_reg_exp_boilerplate_) return;
serialized_as_reg_exp_boilerplate_ = true;
TraceScope tracer(broker, this, "JSRegExpData::SerializeAsRegExpBoilerplate");
Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(object());
SerializeElements(broker);
raw_properties_or_hash_ =
broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
data_ = broker->GetOrCreateData(boilerplate->data());
source_ = broker->GetOrCreateData(boilerplate->source());
flags_ = broker->GetOrCreateData(boilerplate->flags());
last_index_ = broker->GetOrCreateData(boilerplate->last_index());
}
bool ObjectRef::equals(const ObjectRef& other) const {
#ifdef DEBUG
if (broker()->mode() == JSHeapBroker::kSerialized &&
data_->used_status == ObjectData::Usage::kUnused) {
data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
}
#endif // DEBUG
return data_ == other.data_;
}
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
ContextRef ContextRef::previous(size_t* depth,
SerializationPolicy policy) const {
DCHECK_NOT_NULL(depth);
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Context current = *object();
while (*depth != 0 && current.unchecked_previous().IsContext()) {
current = Context::cast(current.unchecked_previous());
(*depth)--;
}
return ContextRef(broker(), broker()->CanonicalPersistentHandle(current));
}
if (*depth == 0) return *this;
ObjectData* previous_data = data()->AsContext()->previous(broker(), policy);
if (previous_data == nullptr || !previous_data->IsContext()) return *this;
*depth = *depth - 1;
return ContextRef(broker(), previous_data).previous(depth, policy);
}
base::Optional<ObjectRef> ContextRef::get(int index,
SerializationPolicy policy) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
ObjectData* optional_slot =
data()->AsContext()->GetSlot(broker(), index, policy);
if (optional_slot != nullptr) {
return ObjectRef(broker(), optional_slot);
}
return base::nullopt;
}
SourceTextModuleRef ContextRef::GetModule(SerializationPolicy policy) const {
ContextRef current = *this;
while (current.map().instance_type() != MODULE_CONTEXT_TYPE) {
size_t depth = 1;
current = current.previous(&depth, policy);
CHECK_EQ(depth, 0);
}
return current.get(Context::EXTENSION_INDEX, policy)
.value()
.AsSourceTextModule();
}
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
CodeKind code_kind)
: isolate_(isolate),
zone_(broker_zone),
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
zone())),
root_index_map_(isolate),
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
code_kind_(code_kind),
feedback_(zone()),
bytecode_analyses_(zone()),
property_access_infos_(zone()),
minimorphic_property_access_infos_(zone()),
typed_array_string_tags_(zone()),
serialized_functions_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
// immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though.
TRACE(this, "Constructing heap broker");
}
JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
void JSHeapBroker::SetPersistentAndCopyCanonicalHandlesForTesting(
std::unique_ptr<PersistentHandles> persistent_handles,
std::unique_ptr<CanonicalHandlesMap> canonical_handles) {
set_persistent_handles(std::move(persistent_handles));
CopyCanonicalHandlesForTesting(std::move(canonical_handles));
}
void JSHeapBroker::CopyCanonicalHandlesForTesting(
std::unique_ptr<CanonicalHandlesMap> canonical_handles) {
DCHECK_NULL(canonical_handles_);
canonical_handles_ = std::make_unique<CanonicalHandlesMap>(
isolate_->heap(), ZoneAllocationPolicy(zone()));
CanonicalHandlesMap::IteratableScope it_scope(canonical_handles.get());
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
Address* entry = *it.entry();
Object key = it.key();
canonical_handles_->Insert(key, entry);
}
}
std::string JSHeapBroker::Trace() const {
std::ostringstream oss;
oss << "[" << this << "] ";
for (unsigned i = 0; i < trace_indentation_ * 2; ++i) oss.put(' ');
return oss.str();
}
void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
LocalIsolate* local_isolate) {
set_canonical_handles(info->DetachCanonicalHandles());
DCHECK_NULL(local_isolate_);
local_isolate_ = local_isolate;
DCHECK_NOT_NULL(local_isolate_);
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
}
void JSHeapBroker::DetachLocalIsolate(OptimizedCompilationInfo* info) {
DCHECK_NULL(ph_);
DCHECK_NOT_NULL(local_isolate_);
std::unique_ptr<PersistentHandles> ph =
local_isolate_->heap()->DetachPersistentHandles();
local_isolate_ = nullptr;
info->set_canonical_handles(DetachCanonicalHandles());
info->set_persistent_handles(std::move(ph));
}
void JSHeapBroker::StopSerializing() {
CHECK_EQ(mode_, kSerializing);
TRACE(this, "Stopping serialization");
mode_ = kSerialized;
}
#ifdef DEBUG
void JSHeapBroker::PrintRefsAnalysis() const {
// Usage counts
size_t used_total = 0, unused_total = 0, identity_used_total = 0;
for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
ref = refs_->Next(ref)) {
switch (ref->value->used_status) {
case ObjectData::Usage::kUnused:
++unused_total;
break;
case ObjectData::Usage::kOnlyIdentityUsed:
++identity_used_total;
break;
case ObjectData::Usage::kDataUsed:
++used_total;
break;
}
}
// Ref types analysis
TRACE_BROKER_MEMORY(
this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
<< "; only identity used: " << identity_used_total
<< "; unused: " << unused_total);
size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
size_t used[LAST_TYPE + 1] = {0};
size_t unused[LAST_TYPE + 1] = {0};
size_t identity_used[LAST_TYPE + 1] = {0};
for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
ref = refs_->Next(ref)) {
if (ref->value->is_smi()) {
switch (ref->value->used_status) {
case ObjectData::Usage::kUnused:
++unused_smis;
break;
case ObjectData::Usage::kOnlyIdentityUsed:
++identity_used_smis;
break;
case ObjectData::Usage::kDataUsed:
++used_smis;
break;
}
} else {
InstanceType instance_type;
if (ref->value->should_access_heap()) {
instance_type = Handle<HeapObject>::cast(ref->value->object())
->map()
.instance_type();
} else {
instance_type = ref->value->AsHeapObject()->GetMapInstanceType();
}
CHECK_LE(FIRST_TYPE, instance_type);
CHECK_LE(instance_type, LAST_TYPE);
switch (ref->value->used_status) {
case ObjectData::Usage::kUnused:
++unused[instance_type];
break;
case ObjectData::Usage::kOnlyIdentityUsed:
++identity_used[instance_type];
break;
case ObjectData::Usage::kDataUsed:
++used[instance_type];
break;
}
}
}
TRACE_BROKER_MEMORY(
this, "Smis: " << used_smis + identity_used_smis + unused_smis
<< "; data used: " << used_smis << "; only identity used: "
<< identity_used_smis << "; unused: " << unused_smis);
for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
size_t total = used[i] + identity_used[i] + unused[i];
if (total == 0) continue;
TRACE_BROKER_MEMORY(
this, InstanceType(i) << ": " << total << "; data used: " << used[i]
<< "; only identity used: " << identity_used[i]
<< "; unused: " << unused[i]);
}
}
#endif // DEBUG
void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
TRACE(this, "Retiring");
mode_ = kRetired;
#ifdef DEBUG
PrintRefsAnalysis();
#endif // DEBUG
}
void JSHeapBroker::SetTargetNativeContextRef(
Handle<NativeContext> native_context) {
// The MapData constructor uses {target_native_context_}. This creates a
// benign cycle that we break by setting {target_native_context_} right before
// starting to serialize (thus creating dummy data), and then again properly
// right after.
DCHECK((mode() == kDisabled && !target_native_context_.has_value()) ||
(mode() == kSerializing &&
target_native_context_->object().equals(native_context) &&
target_native_context_->data_->kind() == kUnserializedHeapObject));
target_native_context_ = NativeContextRef(this, native_context);
}
void JSHeapBroker::CollectArrayAndObjectPrototypes() {
DisallowHeapAllocation no_gc;
CHECK_EQ(mode(), kSerializing);
CHECK(array_and_object_prototypes_.empty());
Object maybe_context = isolate()->heap()->native_contexts_list();
while (!maybe_context.IsUndefined(isolate())) {
Context context = Context::cast(maybe_context);
Object array_prot = context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
Object object_prot = context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
array_and_object_prototypes_.emplace(JSObject::cast(array_prot), isolate());
array_and_object_prototypes_.emplace(JSObject::cast(object_prot),
isolate());
maybe_context = context.next_context_link();
}
CHECK(!array_and_object_prototypes_.empty());
}
StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
DCHECK(IsTypedArrayElementsKind(kind));
switch (kind) {
#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \
case ElementsKind::TYPE##_ELEMENTS: \
return StringRef(this, isolate()->factory()->Type##Array_string());
TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
#undef TYPED_ARRAY_STRING_TAG
default:
UNREACHABLE();
}
}
bool JSHeapBroker::ShouldBeSerializedForCompilation(
const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
const HintsVector& arguments) const {
if (serialized_functions_.size() >= kMaxSerializedFunctionsCacheSize) {
TRACE_BROKER_MISSING(this,
"opportunity - serialized functions cache is full.");
return false;
}
SerializedFunction function{shared, feedback};
auto matching_functions = serialized_functions_.equal_range(function);
return std::find_if(matching_functions.first, matching_functions.second,
[&arguments](const auto& entry) {
return entry.second == arguments;
}) == matching_functions.second;
}
void JSHeapBroker::SetSerializedForCompilation(
const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
const HintsVector& arguments) {
SerializedFunction function{shared, feedback};
serialized_functions_.insert({function, arguments});
TRACE(this, "Set function " << shared << " with " << feedback
<< " as serialized for compilation");
}
bool JSHeapBroker::IsSerializedForCompilation(
const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const {
if (mode() == kDisabled) return true;
SerializedFunction function = {shared, feedback};
return serialized_functions_.find(function) != serialized_functions_.end();
}
bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
if (mode() == kDisabled) {
return isolate()->IsInAnyContext(*object.object(),
Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
isolate()->IsInAnyContext(*object.object(),
Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
}
CHECK(!array_and_object_prototypes_.empty());
return array_and_object_prototypes_.find(object.object()) !=
array_and_object_prototypes_.end();
}
void JSHeapBroker::InitializeAndStartSerializing(
Handle<NativeContext> native_context) {
TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing");
CHECK_EQ(mode_, kDisabled);
mode_ = kSerializing;
// Throw away the dummy data that we created while disabled.
refs_->Clear();
refs_ = nullptr;
refs_ =
zone()->New<RefsMap>(kInitialRefsBucketCount, AddressMatcher(), zone());
SetTargetNativeContextRef(native_context);
target_native_context().Serialize();
CollectArrayAndObjectPrototypes();
Factory* const f = isolate()->factory();
{
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->array_constructor_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->array_iterator_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->array_species_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->no_elements_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->promise_hook_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->promise_species_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->promise_then_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
data = GetOrCreateData(f->string_length_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
}
GetOrCreateData(f->many_closures_cell());
GetOrCreateData(
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
TRACE(this, "Finished serializing standard objects");
}
// clang-format off
ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
RefsMap::Entry* entry = refs_->LookupOrInsert(object.address());
ObjectData* object_data = entry->value;
if (object_data == nullptr) {
ObjectData** data_storage = &(entry->value);
// TODO(neis): Remove these Allow* once we serialize everything upfront.
AllowHandleDereference handle_dereference;
if (object->IsSmi()) {
object_data = zone()->New<ObjectData>(this, data_storage, object, kSmi);
} else if (IsReadOnlyHeapObject(*object)) {
object_data = zone()->New<ObjectData>(this, data_storage, object,
kUnserializedReadOnlyHeapObject);
// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
// FLAG_turbo_direct_heap_access.
#define CREATE_DATA_FOR_DIRECT_READ(name) \
} else if (object->Is##name()) { \
if (FLAG_turbo_direct_heap_access) { \
object_data = zone()->New<ObjectData>( \
this, data_storage, object, kNeverSerializedHeapObject); \
} else { \
CHECK_EQ(mode(), kSerializing); \
AllowHandleAllocation handle_allocation; \
object_data = zone()->New<name##Data>(this, data_storage, \
Handle<name>::cast(object)); \
}
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
#undef CREATE_DATA_FOR_DIRECT_READ
#define CREATE_DATA_FOR_SERIALIZATION(name) \
} else if (object->Is##name()) { \
CHECK_EQ(mode(), kSerializing); \
AllowHandleAllocation handle_allocation; \
object_data = zone()->New<name##Data>(this, data_storage, \
Handle<name>::cast(object));
HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
#undef CREATE_DATA_FOR_SERIALIZATION
} else {
UNREACHABLE();
}
// At this point the entry pointer is not guaranteed to be valid as
// the refs_ hash hable could be resized by one of the constructors above.
DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
}
return object_data;
}
// clang-format on
ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
return GetOrCreateData(CanonicalPersistentHandle(object));
}
#define DEFINE_IS_AND_AS(Name) \
bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
Name##Ref ObjectRef::As##Name() const { \
DCHECK(Is##Name()); \
return Name##Ref(broker(), data()); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
DCHECK(IsSmi());
// Handle-dereference is always allowed for Handle<Smi>.
return Handle<Smi>::cast(object())->value();
}
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
Handle<Map> instance_map;
if (Map::TryGetObjectCreateMap(broker()->isolate(), object())
.ToHandle(&instance_map)) {
return MapRef(broker(), instance_map);
} else {
return base::Optional<MapRef>();
}
}
ObjectData* map_data = data()->AsJSObject()->object_create_map(broker());
if (map_data == nullptr) return base::Optional<MapRef>();
if (map_data->should_access_heap()) {
return MapRef(broker(), map_data->object());
}
return MapRef(broker(), map_data->AsMap());
}
#define DEF_TESTER(Type, ...) \
bool MapRef::Is##Type##Map() const { \
return InstanceTypeChecker::Is##Type(instance_type()); \
}
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return MapRef(broker(),
Map::AsElementsKind(broker()->isolate(), object(), kind));
}
if (kind == elements_kind()) return *this;
const ZoneVector<ObjectData*>& elements_kind_generalizations =
data()->AsMap()->elements_kind_generalizations();
for (auto data : elements_kind_generalizations) {
MapRef map(broker(), data);
if (map.elements_kind() == kind) return map;
}
return base::Optional<MapRef>();
}
void MapRef::SerializeForElementLoad() {
if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeForElementLoad(broker());
}
void MapRef::SerializeForElementStore() {
if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeForElementStore(broker());
}
namespace {
// This helper function has two modes. If {prototype_maps} is nullptr, the
// prototype chain is serialized as necessary to determine the result.
// Otherwise, the heap is untouched and the encountered prototypes are pushed
// onto {prototype_maps}.
bool HasOnlyStablePrototypesWithFastElementsHelper(
JSHeapBroker* broker, MapRef const& map,
ZoneVector<MapRef>* prototype_maps) {
for (MapRef prototype_map = map;;) {
if (prototype_maps == nullptr) prototype_map.SerializePrototype();
prototype_map = prototype_map.prototype().AsHeapObject().map();
if (prototype_map.oddball_type() == OddballType::kNull) return true;
if (!map.prototype().IsJSObject() || !prototype_map.is_stable() ||
!IsFastElementsKind(prototype_map.elements_kind())) {
return false;
}
if (prototype_maps != nullptr) prototype_maps->push_back(prototype_map);
}
}
} // namespace
void MapData::SerializeForElementLoad(JSHeapBroker* broker) {
if (serialized_for_element_load_) return;
serialized_for_element_load_ = true;
TraceScope tracer(broker, this, "MapData::SerializeForElementLoad");
SerializePrototype(broker);
}
void MapData::SerializeForElementStore(JSHeapBroker* broker) {
if (serialized_for_element_store_) return;
serialized_for_element_store_ = true;
TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
HasOnlyStablePrototypesWithFastElementsHelper(broker, MapRef(broker, this),
nullptr);
}
bool MapRef::HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps) {
for (MapRef prototype_map = *this;;) {
if (prototype_maps == nullptr) prototype_map.SerializePrototype();
prototype_map = prototype_map.prototype().AsHeapObject().map();
if (prototype_map.oddball_type() == OddballType::kNull) return true;
if (!prototype().IsJSObject() || !prototype_map.is_stable() ||
!IsFastElementsKind(prototype_map.elements_kind())) {
return false;
}
if (prototype_maps != nullptr) prototype_maps->push_back(prototype_map);
}
}
bool MapRef::supports_fast_array_iteration() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
return SupportsFastArrayIteration(broker()->isolate(), object());
}
return data()->AsMap()->supports_fast_array_iteration();
}
bool MapRef::supports_fast_array_resize() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
return SupportsFastArrayResize(broker()->isolate(), object());
}
return data()->AsMap()->supports_fast_array_resize();
}
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
}
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
OddballType MapRef::oddball_type() const {
if (instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
}
Factory* f = broker()->isolate()->factory();
if (equals(MapRef(broker(), f->undefined_map()))) {
return OddballType::kUndefined;
}
if (equals(MapRef(broker(), f->null_map()))) {
return OddballType::kNull;
}
if (equals(MapRef(broker(), f->boolean_map()))) {
return OddballType::kBoolean;
}
if (equals(MapRef(broker(), f->the_hole_map()))) {
return OddballType::kHole;
}
if (equals(MapRef(broker(), f->uninitialized_map()))) {
return OddballType::kUninitialized;
}
DCHECK(equals(MapRef(broker(), f->termination_exception_map())) ||
equals(MapRef(broker(), f->arguments_marker_map())) ||
equals(MapRef(broker(), f->optimized_out_map())) ||
equals(MapRef(broker(), f->stale_register_map())));
return OddballType::kOther;
}
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index));
}
return FeedbackCellRef(
broker(),
data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
}
double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->RawFastDoublePropertyAt(index);
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
return object_data->GetInobjectField(index.property_index()).AsDouble();
}
uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->RawFastDoublePropertyAsBitsAt(index);
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
return object_data->GetInobjectField(index.property_index()).AsBitsOfDouble();
}
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
object()->RawFastPropertyAt(index)));
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
return ObjectRef(
broker(),
object_data->GetInobjectField(index.property_index()).AsObject());
}
bool AllocationSiteRef::IsFastLiteral() const {
if (data_->should_access_heap()) {
CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
AllowHeapAllocationIfNeeded allow_heap_allocation(
data()->kind(), broker()->mode()); // For TryMigrateInstance.
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return IsInlinableFastLiteral(
handle(object()->boilerplate(), broker()->isolate()));
}
return data()->AsAllocationSite()->IsFastLiteral();
}
void AllocationSiteRef::SerializeBoilerplate() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsAllocationSite()->SerializeBoilerplate(broker());
}
void JSObjectRef::SerializeElements() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeElements(broker());
}
void JSObjectRef::EnsureElementsTenured() {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
Handle<FixedArrayBase> object_elements = elements().object();
if (ObjectInYoungGeneration(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
// old-to-new-space pointers (overflowing the store buffer).
object_elements =
broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
Handle<FixedArray>::cast(object_elements));
object()->set_elements(*object_elements);
}
return;
}
CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
}
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index.as_int()).field_index;
}
int MapRef::GetInObjectPropertyOffset(int i) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->GetInObjectPropertyOffset(i);
}
return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
}
PropertyDetails MapRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()
->instance_descriptors(kRelaxedLoad)
.GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index.as_int()).details;
}
NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return NameRef(broker(), broker()->CanonicalPersistentHandle(
object()
->instance_descriptors(kRelaxedLoad)
.GetKey(descriptor_index)));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return NameRef(broker(),
descriptors->contents().at(descriptor_index.as_int()).key);
}
bool MapRef::IsFixedCowArrayMap() const {
Handle<Map> fixed_cow_array_map =
ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map_handle();
return equals(MapRef(broker(), fixed_cow_array_map));
}
bool MapRef::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Handle<Map> owner(
object()->FindFieldOwner(broker()->isolate(), descriptor_index),
broker()->isolate());
return MapRef(broker(), owner);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return MapRef(
broker(),
descriptors->contents().at(descriptor_index.as_int()).field_owner);
}
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Handle<FieldType> field_type(object()
->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor_index),
broker()->isolate());
return ObjectRef(broker(), field_type);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return ObjectRef(
broker(),
descriptors->contents().at(descriptor_index.as_int()).field_type);
}
bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->IsUnboxedDoubleField(
FieldIndex::ForDescriptor(*object(), descriptor_index));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents()
.at(descriptor_index.as_int())
.is_unboxed_double_field;
}
uint16_t StringRef::GetFirstChar() {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->Get(0);
}
return data()->AsString()->first_char();
}
base::Optional<double> StringRef::ToNumber() {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
return StringToDouble(object());
}
return data()->AsString()->to_number();
}
int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->constant_elements().length();
}
return data()->AsArrayBoilerplateDescription()->constants_elements_length();
}
ObjectRef FixedArrayRef::get(int i) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return ObjectRef(broker(),
broker()->CanonicalPersistentHandle(object()->get(i)));
}
return ObjectRef(broker(), data()->AsFixedArray()->Get(i));
}
Float64 FixedDoubleArrayRef::get(int i) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return Float64::FromBits(object()->get_representation(i));
} else {
return data()->AsFixedDoubleArray()->Get(i);
}
}
uint8_t BytecodeArrayRef::get(int index) const { return object()->get(index); }
Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
return object()->GetFirstBytecodeAddress();
}
Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return broker()->CanonicalPersistentHandle(
object()->constant_pool().get(index));
}
return data()->AsBytecodeArray()->GetConstantAtIndex(index,
broker()->isolate());
}
bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->constant_pool().get(index).IsSmi();
}
return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
}
Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return Smi::cast(object()->constant_pool().get(index));
}
return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
}
void BytecodeArrayRef::SerializeForCompilation() {
if (data_->should_access_heap()) return;
data()->AsBytecodeArray()->SerializeForCompilation(broker());
}
Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
}
Address BytecodeArrayRef::handler_table_address() const {
return reinterpret_cast<Address>(
object()->handler_table().GetDataStartAddress());
}
int BytecodeArrayRef::handler_table_size() const {
return object()->handler_table().length();
}
#define IF_ACCESS_FROM_HEAP_C(name) \
if (data_->should_access_heap()) { \
AllowHandleAllocationIfNeeded handle_allocation(data_->kind(), \
broker()->mode()); \
AllowHandleDereferenceIfNeeded allow_handle_dereference(data_->kind(), \
broker()->mode()); \
return object()->name(); \
}
#define IF_ACCESS_FROM_HEAP(result, name) \
if (data_->should_access_heap()) { \
AllowHandleAllocationIfNeeded handle_allocation(data_->kind(), \
broker()->mode()); \
AllowHandleDereferenceIfNeeded handle_dereference(data_->kind(), \
broker()->mode()); \
return result##Ref(broker(), \
broker()->CanonicalPersistentHandle(object()->name())); \
}
// Macros for definining a const getter that, depending on the data kind,
// either looks into the heap or into the serialized data.
#define BIMODAL_ACCESSOR(holder, result, name) \
result##Ref holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP(result, name); \
return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
}
// Like above except that the result type is not an XYZRef.
#define BIMODAL_ACCESSOR_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_C(name); \
return ObjectRef::data()->As##holder()->name(); \
}
// Like above but for BitFields.
#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
typename BitField::FieldType holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_C(name); \
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
// kSerialized only for methods that we identified to be safe.
#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
AllowHandleAllocationIfNeeded handle_allocation( \
data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
AllowHandleDereferenceIfNeeded allow_handle_dereference( \
data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
return result##Ref(broker(), \
broker()->CanonicalPersistentHandle(object()->name())); \
}
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
AllowHandleAllocationIfNeeded handle_allocation( \
data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
AllowHandleDereferenceIfNeeded allow_handle_dereference( \
data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
return object()->name(); \
}
// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
// we identified the method to be safe to use direct heap access, but the
// holder##Data class still needs to be serialized.
#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
result##Ref holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
}
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
return ObjectRef::data()->As##holder()->name(); \
}
BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
BIMODAL_ACCESSOR_C(BytecodeArray, int, parameter_count)
BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
incoming_new_target_or_generator_register)
BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
BIMODAL_ACCESSOR(JSArray, Object, length)
BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
BIMODAL_ACCESSOR_C(JSFunction, bool, HasAttachedOptimizedCode)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map,
Map::Bits3::IsDictionaryMapBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::Bits3::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::Bits3::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::Bits3::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_indexed_interceptor,
Map::Bits1::HasIndexedInterceptorBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_constructor, Map::Bits1::IsConstructorBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_undetectable,
Map::Bits1::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR(Map, HeapObject, prototype)
BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
BIMODAL_ACCESSOR(Map, Object, GetConstructor)
BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
BIMODAL_ACCESSOR_C(Code, unsigned, inlined_bytecode_size)
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
#undef DEF_NATIVE_CONTEXT_ACCESSOR
BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
object()->call_code(kAcquireLoad)));
}
ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
if (!call_code) return base::nullopt;
return CallHandlerInfoRef(broker(), call_code);
}
bool FunctionTemplateInfoRef::is_signature_undefined() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
return object()->signature().IsUndefined(broker()->isolate());
}
return data()->AsFunctionTemplateInfo()->is_signature_undefined();
}
bool FunctionTemplateInfoRef::has_call_code() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
CallOptimization call_optimization(broker()->isolate(), object());
return call_optimization.is_simple_api_call();
}
return data()->AsFunctionTemplateInfo()->has_call_code();
}
BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
MapRef receiver_map, SerializationPolicy policy) {
const HolderLookupResult not_found;
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
CallOptimization call_optimization(broker()->isolate(), object());
Handle<Map> receiver_map_ref(receiver_map.object());
if (!receiver_map_ref->IsJSReceiverMap() ||
(receiver_map_ref->is_access_check_needed() &&
!object()->accept_any_receiver())) {
return not_found;
}
HolderLookupResult result;
Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
receiver_map_ref, &result.lookup);
switch (result.lookup) {
case CallOptimization::kHolderFound:
result.holder = JSObjectRef(broker(), holder);
break;
default:
DCHECK_EQ(result.holder, base::nullopt);
break;
}
return result;
}
FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
KnownReceiversMap::iterator lookup_it =
fti_data->known_receivers().find(receiver_map.data());
if (lookup_it != fti_data->known_receivers().cend()) {
return lookup_it->second;
}
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_BROKER_MISSING(broker(),
"holder for receiver with map " << receiver_map);
return not_found;
}
if (!receiver_map.IsJSReceiverMap() ||
(receiver_map.is_access_check_needed() && !accept_any_receiver())) {
fti_data->known_receivers().insert({receiver_map.data(), not_found});
return not_found;
}
HolderLookupResult result;
CallOptimization call_optimization(broker()->isolate(), object());
Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
receiver_map.object(), &result.lookup);
switch (result.lookup) {
case CallOptimization::kHolderFound: {
result.holder = JSObjectRef(broker(), holder);
fti_data->known_receivers().insert({receiver_map.data(), result});
break;
}
default: {
DCHECK_EQ(result.holder, base::nullopt);
fti_data->known_receivers().insert({receiver_map.data(), result});
}
}
return result;
}
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
BIMODAL_ACCESSOR_C(ScopeInfo, int, ContextLength)
BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
#define DEF_SFI_ACCESSOR(type, name) \
BIMODAL_ACCESSOR_WITH_FLAG_C(SharedFunctionInfo, type, name)
BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
#undef DEF_SFI_ACCESSOR
BIMODAL_ACCESSOR_C(SharedFunctionInfo, SharedFunctionInfo::Inlineability,
GetInlineability)
BIMODAL_ACCESSOR_C(String, int, length)
BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
base::Optional<ObjectRef> MapRef::GetStrongValue(
InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
MaybeObject value =
object()->instance_descriptors(kRelaxedLoad).GetValue(descriptor_index);
HeapObject object;
if (value.GetHeapObjectIfStrong(&object)) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
}
return base::nullopt;
}
ObjectData* value = data()->AsMap()->GetStrongValue(descriptor_index);
if (!value) {
return base::nullopt;
}
return ObjectRef(broker(), value);
}
void MapRef::SerializeRootMap() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeRootMap(broker());
}
base::Optional<MapRef> MapRef::FindRootMap() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return MapRef(broker(), broker()->CanonicalPersistentHandle(
object()->FindRootMap(broker()->isolate())));
}
ObjectData* map_data = data()->AsMap()->FindRootMap();
if (map_data != nullptr) {
return MapRef(broker(), map_data);
}
TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
return base::nullopt;
}
void* JSTypedArrayRef::data_ptr() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->DataPtr();
}
return data()->AsJSTypedArray()->data_ptr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
IF_ACCESS_FROM_HEAP_C(IsInobjectSlackTrackingInProgress);
return Map::Bits3::ConstructionCounterBits::decode(
data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
}
int MapRef::constructor_function_index() const {
IF_ACCESS_FROM_HEAP_C(GetConstructorFunctionIndex);
CHECK(IsPrimitiveMap());
return data()->AsMap()->constructor_function_index();
}
bool MapRef::is_stable() const {
IF_ACCESS_FROM_HEAP_C(is_stable);
return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
}
bool MapRef::CanBeDeprecated() const {
IF_ACCESS_FROM_HEAP_C(CanBeDeprecated);
CHECK_GT(NumberOfOwnDescriptors(), 0);
return data()->AsMap()->can_be_deprecated();
}
bool MapRef::CanTransition() const {
IF_ACCESS_FROM_HEAP_C(CanTransition);
return data()->AsMap()->can_transition();
}
int MapRef::GetInObjectPropertiesStartInWords() const {
IF_ACCESS_FROM_HEAP_C(GetInObjectPropertiesStartInWords);
return data()->AsMap()->in_object_properties_start_in_words();
}
int MapRef::GetInObjectProperties() const {
IF_ACCESS_FROM_HEAP_C(GetInObjectProperties);
return data()->AsMap()->in_object_properties();
}
void ScopeInfoRef::SerializeScopeInfoChain() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsScopeInfo()->SerializeScopeInfoChain(broker());
}
bool StringRef::IsExternalString() const {
IF_ACCESS_FROM_HEAP_C(IsExternalString);
return data()->AsString()->is_external_string();
}
Address CallHandlerInfoRef::callback() const {
if (data_->should_access_heap()) {
return v8::ToCData<Address>(object()->callback());
}
return HeapObjectRef::data()->AsCallHandlerInfo()->callback();
}
Address FunctionTemplateInfoRef::c_function() const {
if (data_->should_access_heap()) {
return v8::ToCData<Address>(object()->GetCFunction());
}
return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_function();
}
const CFunctionInfo* FunctionTemplateInfoRef::c_signature() const {
if (data_->should_access_heap()) {
return v8::ToCData<CFunctionInfo*>(object()->GetCSignature());
}
return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_signature();
}
bool StringRef::IsSeqString() const {
IF_ACCESS_FROM_HEAP_C(IsSeqString);
return data()->AsString()->is_seq_string();
}
ScopeInfoRef NativeContextRef::scope_info() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return ScopeInfoRef(
broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
}
return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return SharedFunctionInfoRef(
broker(),
broker()->CanonicalPersistentHandle(object()->shared_function_info()));
}
return SharedFunctionInfoRef(
broker(), data()->AsFeedbackVector()->shared_function_info());
}
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
if (data_->should_access_heap()) {
return get(index).value().AsMap();
}
return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
index - Context::FIRST_FUNCTION_MAP_INDEX));
}
MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
switch (kind) {
case PACKED_SMI_ELEMENTS:
return js_array_packed_smi_elements_map();
case HOLEY_SMI_ELEMENTS:
return js_array_holey_smi_elements_map();
case PACKED_DOUBLE_ELEMENTS:
return js_array_packed_double_elements_map();
case HOLEY_DOUBLE_ELEMENTS:
return js_array_holey_double_elements_map();
case PACKED_ELEMENTS:
return js_array_packed_elements_map();
case HOLEY_ELEMENTS:
return js_array_holey_elements_map();
default:
UNREACHABLE();
}
}
base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
const MapRef& map) const {
CHECK(map.IsPrimitiveMap());
switch (map.constructor_function_index()) {
case Map::kNoConstructorFunctionIndex:
return base::nullopt;
case Context::BIGINT_FUNCTION_INDEX:
return bigint_function();
case Context::BOOLEAN_FUNCTION_INDEX:
return boolean_function();
case Context::NUMBER_FUNCTION_INDEX:
return number_function();
case Context::STRING_FUNCTION_INDEX:
return string_function();
case Context::SYMBOL_FUNCTION_INDEX:
return symbol_function();
default:
UNREACHABLE();
}
}
bool ObjectRef::IsNullOrUndefined() const {
if (IsSmi()) return false;
OddballType type = AsHeapObject().map().oddball_type();
return type == OddballType::kNull || type == OddballType::kUndefined;
}
bool ObjectRef::IsTheHole() const {
return IsHeapObject() &&
AsHeapObject().map().oddball_type() == OddballType::kHole;
}
bool ObjectRef::BooleanValue() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return object()->BooleanValue(broker()->isolate());
}
return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
}
Maybe<double> ObjectRef::OddballToNumber() const {
OddballType type = AsHeapObject().map().oddball_type();
switch (type) {
case OddballType::kBoolean: {
ObjectRef true_ref(broker(),
broker()->isolate()->factory()->true_value());
return this->equals(true_ref) ? Just(1.0) : Just(0.0);
break;
}
case OddballType::kUndefined: {
return Just(std::numeric_limits<double>::quiet_NaN());
break;
}
case OddballType::kNull: {
return Just(0.0);
break;
}
default: {
return Nothing<double>();
break;
}
}
}
base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
if (!(IsJSObject() || IsString())) return base::nullopt;
if (data_->should_access_heap()) {
// TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optmization for
// concurrent inlining when we have the infrastructure to safely do so.
if (broker()->is_concurrent_inlining() && IsString()) return base::nullopt;
CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
return GetOwnElementFromHeap(broker(), object(), index, true);
}
ObjectData* element = nullptr;
if (IsJSObject()) {
element =
data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
} else if (IsString()) {
element = data()->AsString()->GetCharAsString(broker(), index, policy);
}
if (element == nullptr) return base::nullopt;
return ObjectRef(broker(), element);
}
base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy) const {
if (data_->should_access_heap()) {
return GetOwnDataPropertyFromHeap(broker(),
Handle<JSObject>::cast(object()),
field_representation, index);
}
ObjectData* property = data()->AsJSObject()->GetOwnDataProperty(
broker(), field_representation, index, policy);
if (property == nullptr) return base::nullopt;
return ObjectRef(broker(), property);
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
uint32_t index, SerializationPolicy policy) const {
if (data_->should_access_heap()) {
if (!object()->elements().IsCowArray()) return base::nullopt;
return GetOwnElementFromHeap(broker(), object(), index, false);
}
if (policy == SerializationPolicy::kSerializeIfNeeded) {
data()->AsJSObject()->SerializeElements(broker());
} else if (!data()->AsJSObject()->serialized_elements()) {
TRACE(broker(), "'elements' on " << this);
return base::nullopt;
}
if (!elements().map().IsFixedCowArrayMap()) return base::nullopt;
ObjectData* element =
data()->AsJSArray()->GetOwnElement(broker(), index, policy);
if (element == nullptr) return base::nullopt;
return ObjectRef(broker(), element);
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
AllowHandleAllocationIfNeeded allow_handle_allocation(
data()->kind(), broker()->mode(), FLAG_turbo_direct_heap_access);
AllowHandleDereferenceIfNeeded allow_handle_dereference(
data()->kind(), broker()->mode(), FLAG_turbo_direct_heap_access);
return CellRef(broker(), broker()->CanonicalPersistentHandle(
object()->GetCell(cell_index)));
}
ObjectData* cell =
data()->AsSourceTextModule()->GetCell(broker(), cell_index);
if (cell == nullptr) return base::nullopt;
return CellRef(broker(), cell);
}
ObjectRef SourceTextModuleRef::import_meta() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return ObjectRef(
broker(), broker()->CanonicalPersistentHandle(object()->import_meta()));
}
return ObjectRef(broker(),
data()->AsSourceTextModule()->GetImportMeta(broker()));
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
bool check_type)
: broker_(broker) {
switch (broker->mode()) {
// We may have to create data in JSHeapBroker::kSerialized as well since we
// read the data from read only heap objects directly instead of serializing
// them.
case JSHeapBroker::kSerialized:
case JSHeapBroker::kSerializing:
data_ = broker->GetOrCreateData(object);
break;
case JSHeapBroker::kDisabled: {
RefsMap::Entry* entry = broker->refs_->LookupOrInsert(object.address());
ObjectData** storage = &(entry->value);
if (*storage == nullptr) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(
kUnserializedHeapObject, broker->mode());
entry->value = broker->zone()->New<ObjectData>(
broker, storage, object,
object->IsSmi() ? kSmi : kUnserializedHeapObject);
}
data_ = *storage;
break;
}
case JSHeapBroker::kRetired:
UNREACHABLE();
}
if (!data_) { // TODO(mslekova): Remove once we're on the background thread.
AllowHandleDereferenceIfNeeded allow_handle_dereference(data_->kind(),
broker->mode());
object->Print();
}
CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker");
}
namespace {
OddballType GetOddballType(Isolate* isolate, Map map) {
if (map.instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
}
ReadOnlyRoots roots(isolate);
if (map == roots.undefined_map()) {
return OddballType::kUndefined;
}
if (map == roots.null_map()) {
return OddballType::kNull;
}
if (map == roots.boolean_map()) {
return OddballType::kBoolean;
}
if (map == roots.the_hole_map()) {
return OddballType::kHole;
}
if (map == roots.uninitialized_map()) {
return OddballType::kUninitialized;
}
DCHECK(map == roots.termination_exception_map() ||
map == roots.arguments_marker_map() ||
map == roots.optimized_out_map() || map == roots.stale_register_map());
return OddballType::kOther;
}
} // namespace
HeapObjectType HeapObjectRef::GetHeapObjectType() const {
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Map map = Handle<HeapObject>::cast(object())->map();
HeapObjectType::Flags flags(0);
if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
if (map.is_callable()) flags |= HeapObjectType::kCallable;
return HeapObjectType(map.instance_type(), flags,
GetOddballType(broker()->isolate(), map));
}
HeapObjectType::Flags flags(0);
if (map().is_undetectable()) flags |= HeapObjectType::kUndetectable;
if (map().is_callable()) flags |= HeapObjectType::kCallable;
return HeapObjectType(map().instance_type(), flags, map().oddball_type());
}
base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return JSObjectRef(
broker(), broker()->CanonicalPersistentHandle(object()->boilerplate()));
}
ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
if (boilerplate) {
return JSObjectRef(broker(), boilerplate);
} else {
return base::nullopt;
}
}
ElementsKind JSObjectRef::GetElementsKind() const {
return map().elements_kind();
}
FixedArrayBaseRef JSObjectRef::elements() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return FixedArrayBaseRef(
broker(), broker()->CanonicalPersistentHandle(object()->elements()));
}
return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
}
int FixedArrayBaseRef::length() const {
IF_ACCESS_FROM_HEAP_C(length);
return data()->AsFixedArrayBase()->length();
}
ObjectData* FixedArrayData::Get(int i) const {
CHECK_LT(i, static_cast<int>(contents_.size()));
CHECK_NOT_NULL(contents_[i]);
return contents_[i];
}
Float64 FixedDoubleArrayData::Get(int i) const {
CHECK_LT(i, static_cast<int>(contents_.size()));
return contents_[i];
}
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
if (value().IsFeedbackVector()) {
FeedbackVectorRef vector = value().AsFeedbackVector();
if (vector.serialized()) {
return value().AsFeedbackVector().shared_function_info();
}
}
return base::nullopt;
}
void FeedbackVectorRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsFeedbackVector()->Serialize(broker());
}
bool FeedbackVectorRef::serialized() const {
if (data_->should_access_heap()) return true;
return data()->AsFeedbackVector()->serialized();
}
bool NameRef::IsUniqueName() const {
// Must match Name::IsUniqueName.
return IsInternalizedString() || IsSymbol();
}
ObjectRef JSRegExpRef::data() const {
IF_ACCESS_FROM_HEAP(Object, data);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->data());
}
ObjectRef JSRegExpRef::flags() const {
IF_ACCESS_FROM_HEAP(Object, flags);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->flags());
}
ObjectRef JSRegExpRef::last_index() const {
IF_ACCESS_FROM_HEAP(Object, last_index);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->last_index());
}
ObjectRef JSRegExpRef::raw_properties_or_hash() const {
IF_ACCESS_FROM_HEAP(Object, raw_properties_or_hash);
return ObjectRef(broker(),
ObjectRef::data()->AsJSRegExp()->raw_properties_or_hash());
}
ObjectRef JSRegExpRef::source() const {
IF_ACCESS_FROM_HEAP(Object, source);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
}
void JSRegExpRef::SerializeAsRegExpBoilerplate() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
}
Handle<Object> ObjectRef::object() const {
#ifdef DEBUG
if (broker()->mode() == JSHeapBroker::kSerialized &&
data_->used_status == ObjectData::Usage::kUnused) {
data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
}
#endif // DEBUG
return data_->object();
}
#ifdef DEBUG
#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
if (broker()->mode() == JSHeapBroker::kSerialized && \
data_->used_status == ObjectData::Usage::kUnused) { \
data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
} \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
#else
#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
#endif // DEBUG
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
JSHeapBroker* ObjectRef::broker() const { return broker_; }
ObjectData* ObjectRef::data() const {
switch (broker()->mode()) {
case JSHeapBroker::kDisabled:
CHECK_NE(data_->kind(), kSerializedHeapObject);
return data_;
case JSHeapBroker::kSerializing:
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
case JSHeapBroker::kSerialized:
#ifdef DEBUG
data_->used_status = ObjectData::Usage::kDataUsed;
#endif // DEBUG
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
case JSHeapBroker::kRetired:
UNREACHABLE();
}
}
Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
const char* function, int line) {
TRACE_MISSING(broker, "data in function " << function << " at line " << line);
return AdvancedReducer::NoChange();
}
NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object)
: ContextData(broker, storage, object), function_maps_(broker->zone()) {}
void NativeContextData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "NativeContextData::Serialize");
Handle<NativeContext> context = Handle<NativeContext>::cast(object());
#define SERIALIZE_MEMBER(type, name) \
DCHECK_NULL(name##_); \
name##_ = broker->GetOrCreateData(context->name()); \
if (!name##_->should_access_heap()) { \
if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
if (name##_->IsMap() && \
!InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
name##_->AsMap()->SerializeConstructor(broker); \
} \
}
BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
if (!broker->isolate()->bootstrapper()->IsActive()) {
BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
}
#undef SERIALIZE_MEMBER
if (!bound_function_with_constructor_map_->should_access_heap()) {
bound_function_with_constructor_map_->AsMap()->SerializePrototype(broker);
}
if (!bound_function_without_constructor_map_->should_access_heap()) {
bound_function_without_constructor_map_->AsMap()->SerializePrototype(
broker);
}
DCHECK(function_maps_.empty());
int const first = Context::FIRST_FUNCTION_MAP_INDEX;
int const last = Context::LAST_FUNCTION_MAP_INDEX;
function_maps_.reserve(last + 1 - first);
for (int i = first; i <= last; ++i) {
function_maps_.push_back(broker->GetOrCreateData(context->get(i)));
}
scope_info_ = broker->GetOrCreateData(context->scope_info());
}
void JSFunctionRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSFunction()->Serialize(broker());
}
bool JSBoundFunctionRef::serialized() const {
if (data_->should_access_heap()) return true;
return data()->AsJSBoundFunction()->serialized();
}
bool JSFunctionRef::serialized() const {
if (data_->should_access_heap()) return true;
return data()->AsJSFunction()->serialized();
}
JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
TemplateObjectDescriptionRef description, FeedbackSource const& source,
SerializationPolicy policy) {
// First, see if we have processed feedback from the vector, respecting
// the serialization policy.
ProcessedFeedback const& feedback =
policy == SerializationPolicy::kSerializeIfNeeded
? broker()->ProcessFeedbackForTemplateObject(source)
: broker()->GetFeedbackForTemplateObject(source);
if (!feedback.IsInsufficient()) {
return feedback.AsTemplateObject().value();
}
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
isolate(), broker()->target_native_context().object(),
description.object(), object(), source.slot.ToInt());
return JSArrayRef(broker(), template_object);
}
ObjectData* array =
data()->AsSharedFunctionInfo()->GetTemplateObject(source.slot);
if (array != nullptr) return JSArrayRef(broker(), array);
CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
broker()->isolate(), broker()->target_native_context().object(),
description.object(), object(), source.slot.ToInt());
array = broker()->GetOrCreateData(template_object);
data()->AsSharedFunctionInfo()->SetTemplateObject(source.slot, array);
return JSArrayRef(broker(), array);
}
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
}
void SharedFunctionInfoRef::SerializeScopeInfoChain() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSharedFunctionInfo()->SerializeScopeInfoChain(broker());
}
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
if (object()->IsApiFunction()) {
return FunctionTemplateInfoRef(
broker(), broker()->CanonicalPersistentHandle(
object()->function_data(kAcquireLoad)));
}
return base::nullopt;
}
ObjectData* function_template_info =
data()->AsSharedFunctionInfo()->function_template_info();
if (!function_template_info) return base::nullopt;
return FunctionTemplateInfoRef(broker(), function_template_info);
}
int SharedFunctionInfoRef::context_header_size() const {
IF_ACCESS_FROM_HEAP_C(scope_info().ContextHeaderLength);
return data()->AsSharedFunctionInfo()->context_header_size();
}
ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
if (data_->should_access_heap()) {
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
return ScopeInfoRef(
broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
}
return ScopeInfoRef(broker(), data()->AsSharedFunctionInfo()->scope_info());
}
void JSObjectRef::SerializeObjectCreateMap() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeObjectCreateMap(broker());
}
void MapRef::SerializeOwnDescriptors() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptors(broker());
}
void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
}
bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) return true;
DescriptorArrayData* desc_array_data =
data()->AsMap()->instance_descriptors();
if (!desc_array_data) return false;
return desc_array_data->contents().find(descriptor_index.as_int()) !=
desc_array_data->contents().end();
}
void MapRef::SerializeBackPointer() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeBackPointer(broker());
}
void MapRef::SerializePrototype() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializePrototype(broker());
}
bool MapRef::serialized_prototype() const {
CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
if (data_->should_access_heap()) return true;
return data()->AsMap()->serialized_prototype();
}
void SourceTextModuleRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSourceTextModule()->Serialize(broker());
}
void NativeContextRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsNativeContext()->Serialize(broker());
}
void JSTypedArrayRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSTypedArray()->Serialize(broker());
}
bool JSTypedArrayRef::serialized() const {
CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
return data()->AsJSTypedArray()->serialized();
}
bool JSBoundFunctionRef::Serialize() {
if (data_->should_access_heap()) return true;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
return data()->AsJSBoundFunction()->Serialize(broker());
}
void PropertyCellRef::Serialize() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsPropertyCell()->Serialize(broker());
}
void FunctionTemplateInfoRef::SerializeCallCode() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name, SerializationPolicy policy) const {
if (data_->should_access_heap()) {
return GetPropertyCellFromHeap(broker(), name.object());
}
ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
broker(), name.data(), policy);
if (property_cell_data == nullptr) return base::nullopt;
return PropertyCellRef(broker(), property_cell_data);
}
bool CanInlineElementAccess(MapRef const& map) {
if (!map.IsJSObjectMap()) return false;
if (map.is_access_check_needed()) return false;
if (map.has_indexed_interceptor()) return false;
ElementsKind const elements_kind = map.elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
if (IsTypedArrayElementsKind(elements_kind) &&
elements_kind != BIGUINT64_ELEMENTS &&
elements_kind != BIGINT64_ELEMENTS) {
return true;
}
return false;
}
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
KeyedAccessMode ElementAccessFeedback::keyed_mode() const {
return keyed_mode_;
}
ZoneVector<ElementAccessFeedback::TransitionGroup> const&
ElementAccessFeedback::transition_groups() const {
return transition_groups_;
}
ElementAccessFeedback const& ElementAccessFeedback::Refine(
ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const {
ElementAccessFeedback& refined_feedback =
*zone->New<ElementAccessFeedback>(zone, keyed_mode(), slot_kind());
if (inferred_maps.empty()) return refined_feedback;
ZoneUnorderedSet<Handle<Map>, Handle<Map>::hash, Handle<Map>::equal_to>
inferred(zone);
inferred.insert(inferred_maps.begin(), inferred_maps.end());
for (auto const& group : transition_groups()) {
DCHECK(!group.empty());
TransitionGroup new_group(zone);
for (size_t i = 1; i < group.size(); ++i) {
Handle<Map> source = group[i];
if (inferred.find(source) != inferred.end()) {
new_group.push_back(source);
}
}
Handle<Map> target = group.front();
bool const keep_target =
inferred.find(target) != inferred.end() || new_group.size() > 1;
if (keep_target) {
new_group.push_back(target);
// The target must be at the front, the order of sources doesn't matter.
std::swap(new_group[0], new_group[new_group.size() - 1]);
}
if (!new_group.empty()) {
DCHECK(new_group.size() == 1 || new_group.front().equals(target));
refined_feedback.transition_groups_.push_back(std::move(new_group));
}
}
return refined_feedback;
}
InsufficientFeedback::InsufficientFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kInsufficient, slot_kind) {}
GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
cell_or_context_(cell),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
GlobalAccessFeedback::GlobalAccessFeedback(ContextRef script_context,
int slot_index, bool immutable,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
cell_or_context_(script_context),
index_and_immutable_(FeedbackNexus::SlotIndexBits::encode(slot_index) |
FeedbackNexus::ImmutabilityBit::encode(immutable)) {
DCHECK_EQ(this->slot_index(), slot_index);
DCHECK_EQ(this->immutable(), immutable);
DCHECK(IsGlobalICKind(slot_kind));
}
bool GlobalAccessFeedback::IsMegamorphic() const {
return !cell_or_context_.has_value();
}
bool GlobalAccessFeedback::IsPropertyCell() const {
return cell_or_context_.has_value() && cell_or_context_->IsPropertyCell();
}
bool GlobalAccessFeedback::IsScriptContextSlot() const {
return cell_or_context_.has_value() && cell_or_context_->IsContext();
}
PropertyCellRef GlobalAccessFeedback::property_cell() const {
CHECK(IsPropertyCell());
return cell_or_context_->AsPropertyCell();
}
ContextRef GlobalAccessFeedback::script_context() const {
CHECK(IsScriptContextSlot());
return cell_or_context_->AsContext();
}
int GlobalAccessFeedback::slot_index() const {
DCHECK(IsScriptContextSlot());
return FeedbackNexus::SlotIndexBits::decode(index_and_immutable_);
}
bool GlobalAccessFeedback::immutable() const {
DCHECK(IsScriptContextSlot());
return FeedbackNexus::ImmutabilityBit::decode(index_and_immutable_);
}
base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
if (IsPropertyCell()) {
return property_cell().value();
} else if (IsScriptContextSlot() && immutable()) {
return script_context().get(slot_index());
} else {
return base::nullopt;
}
}
KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
FeedbackSlotKind kind = nexus.kind();
if (IsKeyedLoadICKind(kind)) {
return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode());
}
if (IsKeyedHasICKind(kind)) {
return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
}
if (IsKeyedStoreICKind(kind)) {
return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
}
if (IsStoreInArrayLiteralICKind(kind) ||
IsStoreDataPropertyInLiteralKind(kind)) {
return KeyedAccessMode(AccessMode::kStoreInLiteral,
nexus.GetKeyedAccessStoreMode());
}
UNREACHABLE();
}
AccessMode KeyedAccessMode::access_mode() const { return access_mode_; }
bool KeyedAccessMode::IsLoad() const {
return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas;
}
bool KeyedAccessMode::IsStore() const {
return access_mode_ == AccessMode::kStore ||
access_mode_ == AccessMode::kStoreInLiteral;
}
KeyedAccessLoadMode KeyedAccessMode::load_mode() const {
CHECK(IsLoad());
return load_store_mode_.load_mode;
}
KeyedAccessStoreMode KeyedAccessMode::store_mode() const {
CHECK(IsStore());
return load_store_mode_.store_mode;
}
KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode)
: load_mode(load_mode) {}
KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode)
: store_mode(store_mode) {}
KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
KeyedAccessLoadMode load_mode)
: access_mode_(access_mode), load_store_mode_(load_mode) {
CHECK(!IsStore());
CHECK(IsLoad());
}
KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
KeyedAccessStoreMode store_mode)
: access_mode_(access_mode), load_store_mode_(store_mode) {
CHECK(!IsLoad());
CHECK(IsStore());
}
ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kElementAccess, slot_kind),
keyed_mode_(keyed_mode),
transition_groups_(zone) {
DCHECK(IsKeyedLoadICKind(slot_kind) || IsKeyedHasICKind(slot_kind) ||
IsStoreDataPropertyInLiteralKind(slot_kind) ||
IsKeyedStoreICKind(slot_kind) ||
IsStoreInArrayLiteralICKind(slot_kind));
}
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
if (!MapRef(broker, map).IsStringMap()) return false;
}
}
return true;
}
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
maps_(maps),
has_migration_target_maps_(has_migration_target_maps) {
DCHECK(IsLoadICKind(slot_kind));
}
NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
ZoneVector<Handle<Map>> const& maps,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
IsStoreOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) ||
IsKeyedHasICKind(slot_kind) || IsKeyedStoreICKind(slot_kind) ||
IsStoreInArrayLiteralICKind(slot_kind) ||
IsStoreDataPropertyInLiteralKind(slot_kind));
}
void JSHeapBroker::SetFeedback(FeedbackSource const& source,
ProcessedFeedback const* feedback) {
CHECK(source.IsValid());
auto insertion = feedback_.insert({source, feedback});
CHECK(insertion.second);
}
bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const {
DCHECK(source.IsValid());
return feedback_.find(source) != feedback_.end();
}
ProcessedFeedback const& JSHeapBroker::GetFeedback(
FeedbackSource const& source) const {
DCHECK(source.IsValid());
auto it = feedback_.find(source);
CHECK_NE(it, feedback_.end());
return *it->second;
}
FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
FeedbackSource const& source) const {
if (is_concurrent_inlining_) {
ProcessedFeedback const& processed = GetFeedback(source);
return processed.slot_kind();
}
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
return nexus.kind();
}
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient()
: FeedbackNexus(source.vector, source.slot,
feedback_nexus_config())
.IsUninitialized();
}
namespace {
// Remove unupdatable and abandoned prototype maps in-place.
void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
auto in = maps->begin();
auto out = in;
auto end = maps->end();
for (; in != end; ++in) {
Handle<Map> map = *in;
if (Map::TryUpdate(isolate, map).ToHandle(&map) &&
!map->is_abandoned_prototype_map()) {
DCHECK(!map->is_deprecated());
*out = *in;
++out;
}
}
// Remove everything between the last valid map and the end of the vector.
maps->erase(out, end);
}
MaybeObjectHandle TryGetMinimorphicHandler(
std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
Handle<NativeContext> native_context, bool is_turboprop) {
if (!is_turboprop || !FLAG_turboprop_dynamic_map_checks ||
!IsLoadICKind(kind)) {
return MaybeObjectHandle();
}
// Don't use dynamic map checks when loading properties from Array.prototype.
// Using dynamic map checks prevents constant folding and hence does not
// inline the array builtins. We only care about monomorphic cases here. For
// polymorphic loads currently we don't inline the builtins even without
// dynamic map checks.
if (maps_and_handlers.size() == 1 &&
*maps_and_handlers[0].first ==
native_context->initial_array_prototype().map()) {
return MaybeObjectHandle();
}
MaybeObjectHandle initial_handler;
for (MapAndHandler map_and_handler : maps_and_handlers) {
auto map = map_and_handler.first;
MaybeObjectHandle handler = map_and_handler.second;
if (handler.is_null()) return MaybeObjectHandle();
DCHECK(!handler->IsCleared());
// TODO(mythria): extend this to DataHandlers too
if (!handler.object()->IsSmi()) return MaybeObjectHandle();
if (LoadHandler::GetHandlerKind(handler.object()->ToSmi()) !=
LoadHandler::Kind::kField) {
return MaybeObjectHandle();
}
CHECK(!map->IsJSGlobalProxyMap());
if (initial_handler.is_null()) {
initial_handler = handler;
} else if (!handler.is_identical_to(initial_handler)) {
return MaybeObjectHandle();
}
}
return initial_handler;
}
bool HasMigrationTargets(const MapHandles& maps) {
for (Handle<Map> map : maps) {
if (map->is_migration_target()) return true;
}
return false;
}
} // namespace
bool JSHeapBroker::CanUseFeedback(const FeedbackNexus& nexus) const {
// TODO(jgruber,v8:8888): Currently, nci code does not use any
// feedback. This restriction will be relaxed in the future.
return !is_native_context_independent() && !nexus.IsUninitialized();
}
const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
FeedbackSlotKind kind) const {
return *zone()->New<InsufficientFeedback>(kind);
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
FeedbackSlotKind kind = nexus.kind();
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind);
std::vector<MapAndHandler> maps_and_handlers;
nexus.ExtractMapsAndFeedback(&maps_and_handlers);
MapHandles maps;
for (auto const& entry : maps_and_handlers) {
maps.push_back(entry.first);
}
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
MaybeObjectHandle handler = TryGetMinimorphicHandler(
maps_and_handlers, kind, target_native_context().object(),
is_turboprop());
if (!handler.is_null()) {
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
*name, kind, handler.object(),
ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()),
HasMigrationTargets(maps));
}
FilterRelevantReceiverMaps(isolate(), &maps);
// If no maps were found for a non-megamorphic access, then our maps died
// and we should soft-deopt.
if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) {
return NewInsufficientFeedback(kind);
}
if (name.has_value()) {
// We rely on this invariant in JSGenericLowering.
DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC);
return *zone()->New<NamedAccessFeedback>(
*name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind);
} else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
return ProcessFeedbackMapsForElementAccess(
maps, KeyedAccessMode::FromNexus(nexus), kind);
} else {
// No actionable feedback.
DCHECK(maps.empty());
DCHECK_EQ(nexus.ic_state(), MEGAMORPHIC);
// TODO(neis): Using ElementAccessFeedback here is kind of an abuse.
return *zone()->New<ElementAccessFeedback>(
zone(), KeyedAccessMode::FromNexus(nexus), kind);
}
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot);
DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict);
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
return *zone()->New<GlobalAccessFeedback>(nexus.kind());
}
Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(),
isolate());
if (feedback_value->IsSmi()) {
// The wanted name belongs to a script-scope variable and the feedback
// tells us where to find its value.
int number = feedback_value->Number();
int const script_context_index =
FeedbackNexus::ContextIndexBits::decode(number);
int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number);
Handle<Context> context = ScriptContextTable::GetContext(
isolate(), target_native_context().script_context_table().object(),
script_context_index);
{
ObjectRef contents(this,
handle(context->get(context_slot_index), isolate()));
CHECK(!contents.equals(
ObjectRef(this, isolate()->factory()->the_hole_value())));
}
ContextRef context_ref(this, context);
if (immutable) {
context_ref.get(context_slot_index,
SerializationPolicy::kSerializeIfNeeded);
}
return *zone()->New<GlobalAccessFeedback>(context_ref, context_slot_index,
immutable, nexus.kind());
}
CHECK(feedback_value->IsPropertyCell());
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value));
cell.Serialize();
return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
DCHECK_NE(hint, BinaryOperationHint::kNone); // Not uninitialized.
return *zone()->New<BinaryOperationFeedback>(hint, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
CompareOperationHint hint = nexus.GetCompareOperationFeedback();
DCHECK_NE(hint, CompareOperationHint::kNone); // Not uninitialized.
return *zone()->New<CompareOperationFeedback>(hint, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
ForInHint hint = nexus.GetForInFeedback();
DCHECK_NE(hint, ForInHint::kNone); // Not uninitialized.
return *zone()->New<ForInFeedback>(hint, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<JSObjectRef> optional_constructor;
{
MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
Handle<JSObject> constructor;
if (maybe_constructor.ToHandle(&constructor)) {
optional_constructor = JSObjectRef(this, constructor);
}
}
return *zone()->New<InstanceOfFeedback>(optional_constructor, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
AllocationSiteRef site(this, handle(object, isolate()));
if (site.IsFastLiteral()) {
site.SerializeBoilerplate();
}
return *zone()->New<LiteralFeedback>(site, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
JSRegExpRef regexp(this, handle(object, isolate()));
regexp.SerializeAsRegExpBoilerplate();
return *zone()->New<RegExpLiteralFeedback>(regexp, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
JSArrayRef array(this, handle(object, isolate()));
return *zone()->New<TemplateObjectFeedback>(array, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<HeapObjectRef> target_ref;
{
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
target_ref = HeapObjectRef(this, handle(target_object, isolate()));
}
}
float frequency = nexus.ComputeCallFrequency();
SpeculationMode mode = nexus.GetSpeculationMode();
return *zone()->New<CallFeedback>(target_ref, frequency, mode, nexus.kind());
}
BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
FeedbackSource const& source) {
ProcessedFeedback const& feedback =
is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForBinaryOperation(source);
return feedback.IsInsufficient() ? BinaryOperationHint::kNone
: feedback.AsBinaryOperation().value();
}
CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation(
FeedbackSource const& source) {
ProcessedFeedback const& feedback =
is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCompareOperation(source);
return feedback.IsInsufficient() ? CompareOperationHint::kNone
: feedback.AsCompareOperation().value();
}
ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
ProcessedFeedback const& feedback = is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForForIn(source);
return feedback.IsInsufficient() ? ForInHint::kNone
: feedback.AsForIn().value();
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
return is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForPropertyAccess(source, mode, static_name);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForInstanceOf(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCall(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForGlobalAccess(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
return is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForArrayOrObjectLiteral(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForRegExpLiteral(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForTemplateObject(source);
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback =
ReadFeedbackForArrayOrObjectLiteral(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForBinaryOperation(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCompareOperation(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForCompareOperation(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForForIn(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback =
ReadFeedbackForPropertyAccess(source, mode, static_name);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForCall(source);
SetFeedback(source, &feedback);
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source);
SetFeedback(source, &feedback);
return feedback;
}
ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles const& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind) {
DCHECK(!maps.empty());
// Collect possible transition targets.
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
MapRef map_ref(this, map);
map_ref.SerializeRootMap();
if (CanInlineElementAccess(map_ref) &&
IsFastElementsKind(map->elements_kind()) &&
GetInitialFastElementsKind() != map->elements_kind()) {
possible_transition_targets.push_back(map);
}
}
using TransitionGroup = ElementAccessFeedback::TransitionGroup;
ZoneUnorderedMap<Handle<Map>, TransitionGroup, Handle<Map>::hash,
Handle<Map>::equal_to>
transition_groups(zone());
// Separate the actual receiver maps and the possible transition sources.
for (Handle<Map> map : maps) {
// Don't generate elements kind transitions from stable maps.
Map transition_target = map->is_stable()
? Map()
: map->FindElementsKindTransitionedMap(
isolate(), possible_transition_targets);
if (transition_target.is_null()) {
TransitionGroup group(1, map, zone());
transition_groups.insert({map, group});
} else {
Handle<Map> target(transition_target, isolate());
TransitionGroup new_group(1, target, zone());
TransitionGroup& actual_group =
transition_groups.insert({target, new_group}).first->second;
actual_group.push_back(map);
}
}
ElementAccessFeedback* result =
zone()->New<ElementAccessFeedback>(zone(), keyed_mode, slot_kind);
for (auto entry : transition_groups) {
result->AddGroup(std::move(entry.second));
}
CHECK(!result->transition_groups().empty());
return *result;
}
void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
CHECK(!group.empty());
transition_groups_.push_back(std::move(group));
#ifdef ENABLE_SLOW_DCHECKS
// Check that each of the group's maps occurs exactly once in the whole
// feedback. This implies that "a source is not a target".
for (Handle<Map> map : group) {
int count = 0;
for (TransitionGroup const& some_group : transition_groups()) {
count += std::count_if(
some_group.begin(), some_group.end(),
[&](Handle<Map> some_map) { return some_map.equals(map); });
}
CHECK_EQ(count, 1);
}
#endif
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
if (!FLAG_concurrent_recompilation) {
// We cannot be in a background thread so it's safe to read the heap.
AllowHandleDereference allow_handle_dereference;
return os << ref.data() << " {" << ref.object() << "}";
} else if (ref.data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(
ref.data()->kind(), ref.broker()->mode());
return os << ref.data() << " {" << ref.object() << "}";
} else {
return os << ref.data();
}
}
base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
FeedbackNexus const& nexus) {
Name raw_name = nexus.GetName();
if (raw_name.is_null()) return base::nullopt;
return NameRef(this, handle(raw_name, isolate()));
}
PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode,
CompilationDependencies* dependencies, SerializationPolicy policy) {
PropertyAccessTarget target({map, name, access_mode});
auto it = property_access_infos_.find(target);
if (it != property_access_infos_.end()) return it->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_BROKER_MISSING(this, "PropertyAccessInfo for "
<< access_mode << " of property " << name
<< " on map " << map);
return PropertyAccessInfo::Invalid(zone());
}
CHECK_NOT_NULL(dependencies);
AccessInfoFactory factory(this, dependencies, zone());
PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(
map.object(), name.object(), access_mode);
if (is_concurrent_inlining_) {
CHECK_EQ(mode(), kSerializing);
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
property_access_infos_.insert({target, access_info});
}
return access_info;
}
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source, SerializationPolicy policy) {
auto it = minimorphic_property_access_infos_.find(source);
if (it != minimorphic_property_access_infos_.end()) return it->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_BROKER_MISSING(this, "MinimorphicLoadPropertyAccessInfo for slot "
<< source.index() << " "
<< ObjectRef(this, source.vector));
return MinimorphicLoadPropertyAccessInfo::Invalid();
}
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
<< ObjectRef(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
}
BinaryOperationFeedback const& ProcessedFeedback::AsBinaryOperation() const {
CHECK_EQ(kBinaryOperation, kind());
return *static_cast<BinaryOperationFeedback const*>(this);
}
CallFeedback const& ProcessedFeedback::AsCall() const {
CHECK_EQ(kCall, kind());
return *static_cast<CallFeedback const*>(this);
}
CompareOperationFeedback const& ProcessedFeedback::AsCompareOperation() const {
CHECK_EQ(kCompareOperation, kind());
return *static_cast<CompareOperationFeedback const*>(this);
}
ElementAccessFeedback const& ProcessedFeedback::AsElementAccess() const {
CHECK_EQ(kElementAccess, kind());
return *static_cast<ElementAccessFeedback const*>(this);
}
ForInFeedback const& ProcessedFeedback::AsForIn() const {
CHECK_EQ(kForIn, kind());
return *static_cast<ForInFeedback const*>(this);
}
GlobalAccessFeedback const& ProcessedFeedback::AsGlobalAccess() const {
CHECK_EQ(kGlobalAccess, kind());
return *static_cast<GlobalAccessFeedback const*>(this);
}
InstanceOfFeedback const& ProcessedFeedback::AsInstanceOf() const {
CHECK_EQ(kInstanceOf, kind());
return *static_cast<InstanceOfFeedback const*>(this);
}
NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
CHECK_EQ(kNamedAccess, kind());
return *static_cast<NamedAccessFeedback const*>(this);
}
MinimorphicLoadPropertyAccessFeedback const&
ProcessedFeedback::AsMinimorphicPropertyAccess() const {
CHECK_EQ(kMinimorphicPropertyAccess, kind());
return *static_cast<MinimorphicLoadPropertyAccessFeedback const*>(this);
}
LiteralFeedback const& ProcessedFeedback::AsLiteral() const {
CHECK_EQ(kLiteral, kind());
return *static_cast<LiteralFeedback const*>(this);
}
RegExpLiteralFeedback const& ProcessedFeedback::AsRegExpLiteral() const {
CHECK_EQ(kRegExpLiteral, kind());
return *static_cast<RegExpLiteralFeedback const*>(this);
}
TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
CHECK_EQ(kTemplateObject, kind());
return *static_cast<TemplateObjectFeedback const*>(this);
}
BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
bool analyze_liveness, SerializationPolicy policy) {
ObjectData* bytecode_array_data = GetOrCreateData(bytecode_array);
CHECK_NOT_NULL(bytecode_array_data);
auto it = bytecode_analyses_.find(bytecode_array_data);
if (it != bytecode_analyses_.end()) {
// Bytecode analysis can be run for OSR or for non-OSR. In the rare case
// where we optimize for OSR and consider the top-level function itself for
// inlining (because of recursion), we need both the OSR and the non-OSR
// analysis. Fortunately, the only difference between the two lies in
// whether the OSR entry offset gets computed (from the OSR bailout id).
// Hence it's okay to reuse the OSR-version when asked for the non-OSR
// version, such that we need to store at most one analysis result per
// bytecode array.
CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(),
osr_bailout_id.IsNone());
CHECK_EQ(analyze_liveness, it->second->liveness_analyzed());
return *it->second;
}
CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
BytecodeAnalysis* analysis = zone()->New<BytecodeAnalysis>(
bytecode_array, zone(), osr_bailout_id, analyze_liveness);
DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id);
bytecode_analyses_[bytecode_array_data] = analysis;
return *analysis;
}
bool JSHeapBroker::StackHasOverflowed() const {
DCHECK_IMPLIES(local_isolate_ == nullptr,
ThreadId::Current() == isolate_->thread_id());
return (local_isolate_ != nullptr)
? StackLimitCheck::HasOverflowed(local_isolate_)
: StackLimitCheck(isolate_).HasOverflowed();
}
OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
: array_(bytecode_array) {}
int OffHeapBytecodeArray::length() const { return array_.length(); }
int OffHeapBytecodeArray::parameter_count() const {
return array_.parameter_count();
}
uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
return array_.GetFirstBytecodeAddress();
}
Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
int index, Isolate* isolate) const {
return array_.GetConstantAtIndex(index);
}
bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
return array_.IsConstantAtIndexSmi(index);
}
Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
return array_.GetConstantAtIndexAsSmi(index);
}
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
#undef TRACE
#undef TRACE_MISSING
} // namespace compiler
} // namespace internal
} // namespace v8
|
extern "C" {
#include "csuperglue.h"
}
#include "sg/superglue.hpp"
#ifdef SG_LOGGING
#include "sg/option/instr_trace.hpp"
#endif
#include "sg/platform/gettime.hpp"
#include <cassert>
#include <map>
#include <vector>
#include <stdarg.h>
struct Options : public DefaultOptions<Options> {
typedef Enable PauseExecution;
#ifdef SG_LOGGING
typedef Enable TaskName;
typedef Trace<Options> Instrumentation;
#endif
};
SuperGlue<Options> *superglue;
class CTaskBase : public Task<Options> {
protected:
sg_task_function function;
void *args;
public:
CTaskBase(sg_task_function function_, void *args_)
: function(function_), args(args_)
{}
virtual ~CTaskBase() {}
void run() {
function(args);
}
};
#ifdef SG_LOGGING
class CTask : public CTaskBase {
protected:
std::string name;
public:
CTask(sg_task_function function, void *args_, size_t argsize, const char *name_)
: CTaskBase(function, new char[argsize]), name(name_ == NULL ? "" : name_) {
memcpy(args, args_, argsize);
}
virtual ~CTask() { delete [] (char *) args; }
std::string get_name() { return name; }
};
class CInplaceTask : public CTaskBase {
protected:
std::string name;
public:
CInplaceTask(sg_task_function function, void *args, const char *name_)
: CTaskBase(function, args), name(name_ == NULL ? "" : name_) {}
std::string get_name() { return name; }
};
#else // SG_LOGGING
class CTask : public CTaskBase {
public:
CTask(sg_task_function function, void *args_, size_t argsize, const char *)
: CTaskBase(function, new char[argsize]) {
memcpy(args, args_, argsize);
}
virtual ~CTask() { delete [] (char *) args; }
};
class CInplaceTask : public CTaskBase {
public:
CInplaceTask(sg_task_function function, void *args, const char *)
: CTaskBase(function, args) {}
};
#endif // SG_LOGGING
extern "C" sg_task_t sg_create_task(sg_task_function function, void *args, size_t argsize, const char *name) {
CTask *task(new CTask(function, args, argsize, name));
return (sg_task_t) task;
}
extern "C" sg_task_t sg_create_inplace_task(sg_task_function function, void *args, const char *name) {
CInplaceTask *task(new CInplaceTask(function, args, name));
return (sg_task_t) task;
}
extern "C" void sg_register_access(sg_task_t task_, enum sg_access_type type, sg_handle_t handle_) {
CTask *task((CTask *) task_);
Handle<Options> *handle((Handle<Options> *) handle_);
task->register_access((ReadWriteAdd::Type) (type-1), *handle);
}
extern "C" void sg_submit_task(sg_task_t task_) {
CTask *task((CTask *) task_);
superglue->submit(task);
}
extern "C" void sg_submit(sg_task_function function, void *args, size_t argsize, const char *name, ...) {
va_list deps;
CTask *task(new CTask(function, args, argsize, name));
va_start(deps, name);
for (;;) {
int type = va_arg(deps, int);
if (type == 0)
break;
Handle<Options> *handle = va_arg(deps, Handle<Options> *);
task->register_access((ReadWriteAdd::Type) (type-1), *handle);
}
va_end(deps);
superglue->submit(task);
}
extern "C" void sg_submit_inplace(sg_task_function function, void *args, const char *name, ...) {
va_list deps;
CInplaceTask *task(new CInplaceTask(function, args, name));
va_start(deps, name);
for (;;) {
int type = va_arg(deps, int);
if (type == 0)
break;
Handle<Options> *handle = va_arg(deps, Handle<Options> *);
task->register_access((ReadWriteAdd::Type) (type-1), *handle);
}
va_end(deps);
superglue->submit(task);
}
extern "C" void sg_wait(sg_handle_t handle) {
superglue->wait(*(Handle<Options> *) handle);
}
extern "C" void sg_barrier() {
superglue->barrier();
}
extern "C" sg_handle_t *sg_create_handles(int num) {
sg_handle_t *mem = new sg_handle_t[num];
for (int i = 0; i < num; ++i)
mem[i] = (sg_handle_t) new Handle<Options>();
return mem;
}
extern "C" void sg_destroy_handles(sg_handle_t *handles, int num) {
for (int i = 0; i < num; ++i)
delete (Handle<Options> *) handles[i];
delete [] handles;
}
extern "C" void sg_init() {
superglue = new SuperGlue<Options>();
superglue->start_executing();
}
extern "C" void sg_init_paused() {
superglue = new SuperGlue<Options>();
}
extern "C" void sg_execute() {
superglue->start_executing();
}
extern "C" void sg_destroy() {
delete superglue;
}
extern "C" void sg_write_log(const char *filename) {
#ifdef SG_LOGGING
Options::Instrumentation::dump(filename);
#endif
}
extern "C" unsigned long long sg_get_time() {
return Time::getTime();
}
extern "C" void sg_log(const char *name, unsigned long long start, unsigned long long stop) {
#ifdef SG_LOGGING
Log<Options>::log(name, start, stop);
#endif
}
|
/***
*
* Copyright (c) 1996-2001, Valve LLC. All rights reserved.
*
* This product contains software technology licensed from Id
* Software, Inc. ("Id Technology"). Id Technology (c) 1996 Id Software, Inc.
* All Rights Reserved.
*
* Use, distribution, and modification of this source code and/or resulting
* object code is restricted to non-commercial enhancements to products from
* Valve LLC. All other use, distribution, or modification is prohibited
* without written permission from Valve LLC.
*
****/
#include <utility>
#include "extdll.h"
#include "util.h"
#include "cbase.h"
#include "gamerules.h"
#include "CRopeSample.h"
#include "CRopeSegment.h"
#include "CRope.h"
static const char* const g_pszCreakSounds[] =
{
"items/rope1.wav",
"items/rope2.wav",
"items/rope3.wav"
};
TYPEDESCRIPTION CRope::m_SaveData[] =
{
DEFINE_FIELD( CRope, m_uiSegments, FIELD_INTEGER ),
DEFINE_FIELD( CRope, m_bToggle, FIELD_BOOLEAN ),
DEFINE_FIELD( CRope, m_bInitialDeltaTime, FIELD_BOOLEAN ),
DEFINE_FIELD( CRope, m_flLastTime, FIELD_TIME ),
DEFINE_FIELD( CRope, m_vecLastEndPos, FIELD_POSITION_VECTOR ),
DEFINE_FIELD( CRope, m_vecGravity, FIELD_VECTOR ),
DEFINE_FIELD( CRope, m_flHookConstant, FIELD_FLOAT ),
DEFINE_FIELD( CRope, m_flSpringDampning, FIELD_FLOAT ),
DEFINE_FIELD( CRope, m_uiNumSamples, FIELD_INTEGER ),
DEFINE_FIELD( CRope, m_SpringCnt, FIELD_INTEGER ),
DEFINE_FIELD( CRope, m_bObjectAttached, FIELD_BOOLEAN ),
DEFINE_FIELD( CRope, m_uiAttachedObjectsSegment, FIELD_INTEGER ),
DEFINE_FIELD( CRope, m_flDetachTime, FIELD_TIME ),
DEFINE_ARRAY( CRope, seg, FIELD_CLASSPTR, CRope::MAX_SEGMENTS ),
DEFINE_ARRAY( CRope, altseg, FIELD_CLASSPTR, CRope::MAX_SEGMENTS ),
DEFINE_ARRAY( CRope, m_CurrentSys, FIELD_CLASSPTR, CRope::MAX_SAMPLES ),
DEFINE_ARRAY( CRope, m_TargetSys, FIELD_CLASSPTR, CRope::MAX_SAMPLES ),
DEFINE_FIELD( CRope, m_bDisallowPlayerAttachment, FIELD_BOOLEAN ),
DEFINE_FIELD( CRope, m_iszBodyModel, FIELD_STRING ),
DEFINE_FIELD( CRope, m_iszEndingModel, FIELD_STRING ),
DEFINE_FIELD( CRope, m_flAttachedObjectsOffset, FIELD_FLOAT ),
DEFINE_FIELD( CRope, m_bMakeSound, FIELD_BOOLEAN ),
};
LINK_ENTITY_TO_CLASS( env_rope, CRope );
CRope::CRope()
{
m_iszBodyModel = MAKE_STRING( "models/rope16.mdl" );
m_iszEndingModel = MAKE_STRING( "models/rope16.mdl" );
}
CRope::~CRope()
{
for( size_t uiIndex = 0; uiIndex < MAX_TEMP_SAMPLES; ++uiIndex )
{
delete[] m_TempSys[ uiIndex ];
m_TempSys[ uiIndex ] = nullptr;
}
delete[] m_pSprings;
}
void CRope::KeyValue( KeyValueData* pkvd )
{
if( FStrEq( pkvd->szKeyName, "segments" ) )
{
pkvd->fHandled = true;
m_uiSegments = strtoul( pkvd->szValue, nullptr, 10 );
if( m_uiSegments >= MAX_SEGMENTS )
m_uiSegments = MAX_SEGMENTS - 1;
}
else if( FStrEq( pkvd->szKeyName, "bodymodel" ) )
{
pkvd->fHandled = true;
m_iszBodyModel = ALLOC_STRING( pkvd->szValue );
}
else if( FStrEq( pkvd->szKeyName, "endingmodel" ) )
{
pkvd->fHandled = true;
m_iszEndingModel = ALLOC_STRING( pkvd->szValue );
}
else if( FStrEq( pkvd->szKeyName, "disable" ) )
{
pkvd->fHandled = true;
m_bDisallowPlayerAttachment = strtol( pkvd->szValue, nullptr, 10 ) != 0;
}
else
BaseClass::KeyValue( pkvd );
}
void CRope::Precache()
{
BaseClass::Precache();
UTIL_PrecacheOther( "rope_segment" );
UTIL_PrecacheOther( "rope_sample" );
PRECACHE_SOUND_ARRAY( g_pszCreakSounds );
}
void CRope::Spawn()
{
m_bMakeSound = true;
Precache();
m_bSpringsInitialized = false;
m_vecGravity.x = m_vecGravity.y = 0;
m_vecGravity.z = -50;
m_bObjectAttached = false;
pev->flags |= FL_ALWAYSTHINK;
m_uiNumSamples = m_uiSegments + 1;
for( size_t uiSample = 0; uiSample < m_uiNumSamples; ++uiSample )
{
m_CurrentSys[ uiSample ] = CRopeSample::CreateSample();
m_CurrentSys[ uiSample ]->SetMasterRope( this );
}
memset( m_CurrentSys + m_uiNumSamples, 0, sizeof( CRopeSample* ) * ( MAX_SAMPLES - m_uiNumSamples ) );
{
CRopeSegment* pSegment = seg[ 0 ] = CRopeSegment::CreateSegment( m_CurrentSys[ 0 ], GetBodyModel() );
UTIL_SetOrigin( pSegment->pev, pev->origin );
pSegment = altseg[ 0 ] = CRopeSegment::CreateSegment( m_CurrentSys[ 0 ], GetBodyModel() );
UTIL_SetOrigin( pSegment->pev, pev->origin );
}
Vector origin;
Vector angles;
const Vector vecGravity = m_vecGravity.Normalize();
if( m_uiSegments > 2 )
{
//CRopeSample** ppCurrentSys = m_CurrentSys;
for( size_t uiSeg = 1; uiSeg < m_uiSegments - 1; ++uiSeg )
{
CRopeSample* pSegSample = m_CurrentSys[ uiSeg ];
seg[ uiSeg ] = CRopeSegment::CreateSegment( pSegSample, GetBodyModel() );
altseg[ uiSeg ] = CRopeSegment::CreateSegment( pSegSample, GetBodyModel() );
CRopeSegment* pCurrent = seg[ uiSeg - 1 ];
pCurrent->GetAttachment( 0, origin, angles );
Vector vecPos = origin - pCurrent->pev->origin;
const float flLength = vecPos.Length();
origin = flLength * vecGravity + pCurrent->pev->origin;
UTIL_SetOrigin( seg[ uiSeg ]->pev, origin );
UTIL_SetOrigin( altseg[ uiSeg ]->pev, origin );
}
}
CRopeSample* pSegSample = m_CurrentSys[ m_uiSegments - 1 ];
seg[ m_uiSegments - 1 ] = CRopeSegment::CreateSegment( pSegSample, GetEndingModel() );
altseg[ m_uiSegments - 1 ] = CRopeSegment::CreateSegment( pSegSample, GetEndingModel() );
CRopeSegment* pCurrent = seg[ m_uiSegments - 2 ];
pCurrent->GetAttachment( 0, origin, angles );
Vector vecPos = origin - pCurrent->pev->origin;
const float flLength = vecPos.Length();
origin = flLength * vecGravity + pCurrent->pev->origin;
UTIL_SetOrigin(seg[ m_uiSegments - 1 ]->pev, origin );
UTIL_SetOrigin(altseg[ m_uiSegments - 1 ]->pev, origin );
memset( seg + m_uiSegments, 0, sizeof( CRopeSegment* ) * ( MAX_SEGMENTS - m_uiSegments ) );
memset( altseg + m_uiSegments, 0, sizeof( CRopeSegment* ) * ( MAX_SEGMENTS - m_uiSegments ) );
memset( m_TempSys, 0, sizeof( m_TempSys ) );
m_SpringCnt = 0;
m_bInitialDeltaTime = true;
m_flHookConstant = 2500;
m_flSpringDampning = 0.1;
InitializeRopeSim();
pev->nextthink = gpGlobals->time + 0.01;
}
void CRope::Think()
{
if( !m_bSpringsInitialized )
{
InitializeSprings( m_uiSegments );
}
m_bToggle = !m_bToggle;
RunSimOnSamples();
CRopeSegment** ppPrimarySegs;
CRopeSegment** ppHiddenSegs;
if( m_bToggle )
{
ppPrimarySegs = altseg;
ppHiddenSegs = seg;
}
else
{
ppPrimarySegs = seg;
ppHiddenSegs = altseg;
}
SetRopeSegments( m_uiSegments, ppPrimarySegs, ppHiddenSegs );
if( ShouldCreak() )
{
Creak();
}
pev->nextthink = gpGlobals->time + 0.001;
}
void CRope::Touch( CBaseEntity* pOther )
{
//Nothing.
}
int CRope::Save( CSave& save )
{
if( !BaseClass::Save( save ) )
return false;
return save.WriteFields( "CRope", this, m_SaveData, ARRAYSIZE( m_SaveData ) );
}
int CRope::Restore( CRestore& restore )
{
if( !BaseClass::Restore( restore ) )
{
return false;
}
auto status = restore.ReadFields( "CRope", this, m_SaveData, ARRAYSIZE( m_SaveData ) );
for( size_t uiIndex = 0; uiIndex < MAX_TEMP_SAMPLES; ++uiIndex )
{
m_TempSys[ uiIndex ] = new RopeSampleData[ m_uiNumSamples ];
}
m_bSpringsInitialized = false;
m_bInitialDeltaTime = true;
return status;
}
void CRope::InitializeRopeSim()
{
for( size_t uiIndex = 0; uiIndex < MAX_TEMP_SAMPLES; ++uiIndex )
{
delete[] m_TempSys[ uiIndex ];
m_TempSys[ uiIndex ] = nullptr;
}
for( size_t uiSample = 0; uiSample < m_uiNumSamples; ++uiSample )
{
m_TargetSys[ uiSample ] = CRopeSample::CreateSample();
m_TargetSys[ uiSample ]->SetMasterRope( this );
}
memset( m_TargetSys + m_uiNumSamples, 0, sizeof( CRopeSample* ) * ( MAX_SAMPLES - m_uiNumSamples ) );
for( size_t uiIndex = 0; uiIndex < MAX_TEMP_SAMPLES; ++uiIndex )
{
m_TempSys[ uiIndex ] = new RopeSampleData[ m_uiNumSamples ];
memset( m_TempSys[ uiIndex ], 0, sizeof( RopeSampleData ) * m_uiNumSamples );
}
for( size_t uiSeg = 0; uiSeg < m_uiSegments; ++uiSeg )
{
CRopeSegment* pSegment = seg[ uiSeg ];
CRopeSample* pSample = pSegment->GetSample();
auto& data = pSample->GetData();
data.mPosition = pSegment->pev->origin;
data.mVelocity = g_vecZero;
data.mForce = g_vecZero;
data.mMassReciprocal = 1;
data.mApplyExternalForce = false;
data.mExternalForce = g_vecZero;
pSegment->SetDefaultMass( data.mMassReciprocal );
}
{
//Zero out the anchored segment's mass so it stays in place.
auto pSample = m_CurrentSys[ 0 ];
pSample->GetData().mMassReciprocal = 0;
}
CRopeSegment* pSegment = seg[ m_uiSegments - 1 ];
Vector vecOrigin, vecAngles;
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
Vector vecDistance = vecOrigin - pSegment->pev->origin;
const float flLength = vecDistance.Length();
const Vector vecGravity = m_vecGravity.Normalize();
vecOrigin = vecGravity * flLength + pSegment->pev->origin;
CRopeSample* pSample = m_CurrentSys[ m_uiNumSamples - 1 ];
auto& data = pSample->GetData();
data.mPosition = vecOrigin;
m_vecLastEndPos = vecOrigin;
data.mVelocity = g_vecZero;
data.mForce = g_vecZero;
data.mMassReciprocal = 0.2;
data.mApplyExternalForce = false;
size_t uiNumSegs = 4;
if( m_uiSegments <= 4 )
uiNumSegs = m_uiSegments;
for( size_t uiIndex = 0; uiIndex < uiNumSegs; ++uiIndex )
{
seg[ uiIndex ]->SetCanBeGrabbed( false );
altseg[ uiIndex ]->SetCanBeGrabbed( false );
}
}
void CRope::InitializeSprings( const size_t uiNumSprings )
{
m_SpringCnt = uiNumSprings;
m_pSprings = new Spring[ uiNumSprings ];
if( uiNumSprings > 0 )
{
Vector vecOrigin, vecAngles;
for( size_t uiIndex = 0; uiIndex < m_SpringCnt; ++uiIndex )
{
Spring& spring = m_pSprings[ uiIndex ];
spring.p1 = uiIndex;
spring.p2 = uiIndex + 1;
if( uiIndex < m_uiSegments )
{
CRopeSegment* pSegment = seg[ uiIndex ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
spring.restLength = ( pSegment->pev->origin - vecOrigin ).Length();
}
else
spring.restLength = 0;
spring.hookConstant = m_flHookConstant;
spring.springDampning = m_flSpringDampning;
}
}
m_bSpringsInitialized = true;
}
void CRope::RunSimOnSamples()
{
float flDeltaTime = 0.025;
if( m_bInitialDeltaTime )
{
m_bInitialDeltaTime = false;
m_flLastTime = gpGlobals->time;
flDeltaTime = 0;
}
size_t uiIndex = 0;
CRopeSample** ppSampleSource = m_CurrentSys;
CRopeSample** ppSampleTarget = m_TargetSys;
while( true )
{
++uiIndex;
ComputeForces( ppSampleSource );
RK4Integrate( flDeltaTime, ppSampleSource, ppSampleTarget );
m_flLastTime += 0.007;
if( gpGlobals->time <= m_flLastTime )
{
if( ( uiIndex % 2 ) != 0 )
break;
}
std::swap( ppSampleSource, ppSampleTarget );
}
m_flLastTime = gpGlobals->time;
}
void CRope::ComputeForces( RopeSampleData* pSystem )
{
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex )
{
ComputeSampleForce( pSystem[ uiIndex ] );
}
Spring* pSpring = m_pSprings;
for( size_t uiIndex = 0; uiIndex < m_SpringCnt; ++uiIndex, ++pSpring )
{
ComputeSpringForce( pSystem[ pSpring->p1 ], pSystem[ pSpring->p2 ], *pSpring );
}
}
void CRope::ComputeForces( CRopeSample** ppSystem )
{
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex )
{
ComputeSampleForce( ppSystem[ uiIndex ]->GetData() );
}
Spring* pSpring = m_pSprings;
for( size_t uiIndex = 0; uiIndex < m_SpringCnt; ++uiIndex, ++pSpring )
{
ComputeSpringForce( ppSystem[ pSpring->p1 ]->GetData(), ppSystem[ pSpring->p2 ]->GetData(), *pSpring );
}
}
void CRope::ComputeSampleForce( RopeSampleData& data )
{
data.mForce = g_vecZero;
if( data.mMassReciprocal != 0.0 )
{
data.mForce = data.mForce + ( m_vecGravity / data.mMassReciprocal );
}
if( data.mApplyExternalForce )
{
data.mForce = data.mForce + data.mExternalForce;
data.mExternalForce = g_vecZero;
data.mApplyExternalForce = false;
}
if( DotProduct( m_vecGravity, data.mVelocity ) >= 0 )
{
data.mForce = data.mForce + data.mVelocity * -0.04;
}
else
{
data.mForce = data.mForce - data.mVelocity;
}
}
void CRope::ComputeSpringForce( RopeSampleData& first, RopeSampleData& second, const Spring& spring )
{
Vector vecDist = first.mPosition - second.mPosition;
const double flDistance = vecDist.Length();
const double flForce = ( flDistance - spring.restLength ) * spring.hookConstant;
const double flNewRelativeDist = DotProduct( first.mVelocity - second.mVelocity, vecDist ) * spring.springDampning;
vecDist = vecDist.Normalize();
const double flSpringFactor = -( flNewRelativeDist / flDistance + flForce );
const Vector vecForce = flSpringFactor * vecDist;
first.mForce = first.mForce + vecForce;
second.mForce = second.mForce - vecForce;
}
void CRope::RK4Integrate( const float flDeltaTime, CRopeSample** ppSampleSource, CRopeSample** ppSampleTarget )
{
const float flDeltas[ MAX_TEMP_SAMPLES - 1 ] =
{
flDeltaTime * 0.5f,
flDeltaTime * 0.5f,
flDeltaTime * 0.5f,
flDeltaTime
};
{
RopeSampleData* pTemp1 = m_TempSys[ 0 ];
RopeSampleData* pTemp2 = m_TempSys[ 1 ];
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex, ++pTemp1, ++pTemp2 )
{
const auto& data = ppSampleSource[ uiIndex ]->GetData();
pTemp2->mForce = data.mMassReciprocal * data.mForce * flDeltas[ 0 ];
pTemp2->mVelocity = data.mVelocity * flDeltas[ 0 ];
pTemp1->mMassReciprocal = data.mMassReciprocal;
pTemp1->mVelocity = data.mVelocity + pTemp2->mForce;
pTemp1->mPosition = data.mPosition + pTemp2->mVelocity;
}
ComputeForces( m_TempSys[ 0 ] );
}
for( size_t uiStep = 2; uiStep < MAX_TEMP_SAMPLES - 1; ++uiStep )
{
RopeSampleData* pTemp1 = m_TempSys[ 0 ];
RopeSampleData* pTemp2 = m_TempSys[ uiStep ];
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex, ++pTemp1, ++pTemp2 )
{
const auto& data = ppSampleSource[ uiIndex ]->GetData();
pTemp2->mForce = data.mMassReciprocal * pTemp1->mForce * flDeltas[ uiStep - 1 ];
pTemp2->mVelocity = pTemp1->mVelocity * flDeltas[ uiStep - 1 ];
pTemp1->mMassReciprocal = data.mMassReciprocal;
pTemp1->mVelocity = data.mVelocity + pTemp2->mForce;
pTemp1->mPosition = data.mPosition + pTemp2->mVelocity;
}
ComputeForces( m_TempSys[ 0 ] );
}
{
RopeSampleData* pTemp1 = m_TempSys[ 0 ];
RopeSampleData* pTemp2 = m_TempSys[ 4 ];
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex, ++pTemp1, ++pTemp2 )
{
const auto& data = ppSampleSource[ uiIndex ]->GetData();
pTemp2->mForce = data.mMassReciprocal * pTemp1->mForce * flDeltas[ 3 ];
pTemp2->mVelocity = pTemp1->mVelocity * flDeltas[ 3 ];
}
}
RopeSampleData* pTemp1 = m_TempSys[ 1 ];
RopeSampleData* pTemp2 = m_TempSys[ 2 ];
RopeSampleData* pTemp3 = m_TempSys[ 3 ];
RopeSampleData* pTemp4 = m_TempSys[ 4 ];
for( size_t uiIndex = 0; uiIndex < m_uiNumSamples; ++uiIndex, ++pTemp1, ++pTemp2, ++pTemp3, ++pTemp4 )
{
auto pSource = ppSampleSource[ uiIndex ];
auto pTarget = ppSampleTarget[ uiIndex ];
const Vector vecPosChange = 1.0f / 6.0f * ( pTemp1->mVelocity + ( pTemp2->mVelocity + pTemp3->mVelocity ) * 2 + pTemp4->mVelocity );
const Vector vecVelChange = 1.0f / 6.0f * ( pTemp1->mForce + ( pTemp2->mForce + pTemp3->mForce ) * 2 + pTemp4->mForce );
pTarget->GetData().mPosition = pSource->GetData().mPosition + ( vecPosChange );//* flDeltaTime );
pTarget->GetData().mVelocity = pSource->GetData().mVelocity + ( vecVelChange );//* flDeltaTime );
}
}
//TODO move to common header - Solokiller
static const Vector DOWN( 0, 0, -1 );
static const Vector RIGHT( 0, 1, 0 );
void GetAlignmentAngles( const Vector& vecTop, const Vector& vecBottom, Vector& vecOut )
{
Vector vecDist = vecBottom - vecTop;
Vector vecResult = vecDist.Normalize();
const float flRoll = acos( DotProduct( vecResult, RIGHT ) ) * ( 180.0 / M_PI );
vecOut.z = -flRoll;
vecDist.y = 0;
vecResult = vecDist.Normalize();
const float flPitch = acos( DotProduct( vecResult, DOWN ) ) * ( 180.0 / M_PI );
vecOut.x = ( vecResult.x >= 0.0 ) ? flPitch : -flPitch;
vecOut.y = 0;
}
void TruncateEpsilon( Vector& vec )
{
vec = ( ( vec * 10.0 ) + Vector( 0.5, 0.5, 0.5 ) ) / 10.0;
}
void CRope::TraceModels( CRopeSegment** ppPrimarySegs, CRopeSegment** ppHiddenSegs )
{
if( m_uiSegments > 1 )
{
Vector vecAngles;
GetAlignmentAngles(
m_CurrentSys[ 0 ]->GetData().mPosition,
m_CurrentSys[ 1 ]->GetData().mPosition,
vecAngles );
( *ppPrimarySegs )->pev->angles =vecAngles;
}
TraceResult tr;
if( m_bObjectAttached )
{
for( size_t uiSeg = 1; uiSeg < m_uiSegments; ++uiSeg )
{
CRopeSample* pSample = m_CurrentSys[ uiSeg ];
Vector vecDist = pSample->GetData().mPosition - ppHiddenSegs[ uiSeg ]->pev->origin;
vecDist = vecDist.Normalize();
const float flTraceDist = ( uiSeg - m_uiAttachedObjectsSegment + 2 ) < 5 ? 50 : 10;
const Vector vecTraceDist = vecDist * flTraceDist;
const Vector vecEnd = pSample->GetData().mPosition + vecTraceDist;
UTIL_TraceLine( ppHiddenSegs[ uiSeg ]->pev->origin, vecEnd, ignore_monsters, edict(), &tr );
if( tr.flFraction == 1.0 && tr.fAllSolid )
{
break;
}
if( tr.flFraction != 1.0 || tr.fStartSolid || !tr.fInOpen )
{
Vector vecOrigin = tr.vecEndPos - vecTraceDist;
TruncateEpsilon( vecOrigin );
UTIL_SetOrigin( ppPrimarySegs[ uiSeg ]->pev, vecOrigin );
Vector vecNormal = tr.vecPlaneNormal.Normalize() * 20000.0;
auto& data = ppPrimarySegs[ uiSeg ]->GetSample()->GetData();
data.mApplyExternalForce = true;
data.mExternalForce = vecNormal;
data.mVelocity = g_vecZero;
}
else
{
Vector vecOrigin = pSample->GetData().mPosition;
TruncateEpsilon( vecOrigin );
UTIL_SetOrigin( ppPrimarySegs[ uiSeg ]->pev, vecOrigin );
}
}
}
else
{
for( size_t uiSeg = 1; uiSeg < m_uiSegments; ++uiSeg )
{
UTIL_TraceLine(
ppHiddenSegs[ uiSeg ]->pev->origin,
m_CurrentSys[ uiSeg ]->GetData().mPosition,
ignore_monsters, edict(), &tr );
if( tr.flFraction == 1.0 )
{
Vector vecOrigin = m_CurrentSys[ uiSeg ]->GetData().mPosition;
TruncateEpsilon( vecOrigin );
UTIL_SetOrigin( ppPrimarySegs[ uiSeg ]->pev, vecOrigin );
}
else
{
//CBaseEntity* pEnt = GET_PRIVATE( tr.pHit );
const Vector vecNormal = tr.vecPlaneNormal.Normalize();
Vector vecOrigin = tr.vecEndPos + vecNormal * 10.0;
TruncateEpsilon( vecOrigin );
UTIL_SetOrigin( ppPrimarySegs[ uiSeg ]->pev, vecOrigin );
ppPrimarySegs[ uiSeg ]->GetSample()->GetData().mApplyExternalForce = true;
ppPrimarySegs[ uiSeg ]->GetSample()->GetData().mExternalForce = vecNormal * 40000.0;
}
}
}
Vector vecAngles;
for( size_t uiSeg = 1; uiSeg < m_uiSegments; ++uiSeg )
{
auto pSegment = ppPrimarySegs[ uiSeg - 1 ];
auto pSegment2 = ppPrimarySegs[ uiSeg ];
GetAlignmentAngles( pSegment->pev->origin, pSegment2->pev->origin, vecAngles );
pSegment->pev->angles =vecAngles;
}
if( m_uiSegments > 1 )
{
auto pSample = m_CurrentSys[ m_uiNumSamples - 1 ];
UTIL_TraceLine( m_vecLastEndPos, pSample->GetData().mPosition, ignore_monsters, edict(), &tr );
if( tr.flFraction == 1.0 )
{
m_vecLastEndPos = pSample->GetData().mPosition;
}
else
{
m_vecLastEndPos = tr.vecEndPos;
pSample->GetData().mApplyExternalForce = true;
pSample->GetData().mExternalForce = tr.vecPlaneNormal.Normalize() * 40000.0;
}
auto pSegment = ppPrimarySegs[ m_uiNumSamples - 2 ];
GetAlignmentAngles( pSegment->pev->origin, m_vecLastEndPos, vecAngles );
pSegment->pev->angles =vecAngles;
}
}
void CRope::SetRopeSegments( const size_t uiNumSegments,
CRopeSegment** ppPrimarySegs, CRopeSegment** ppHiddenSegs )
{
if( uiNumSegments > 0 )
{
TraceModels( ppPrimarySegs, ppHiddenSegs );
CRopeSegment** ppVisible = ppPrimarySegs;
CRopeSegment** ppActualHidden = ppHiddenSegs;
//In multiplayer, the constant toggling of visible segments makes them completely invisible.
//So always make the seg segments visible. - Solokiller
if( m_bToggle && g_pGameRules->IsMultiplayer() )
{
std::swap( ppVisible, ppActualHidden );
}
ppVisible[ 0 ]->pev->solid = SOLID_TRIGGER;
//TODO: maybe only set/unset the nodraw flag
ppVisible[ 0 ]->pev->effects = 0;
ppActualHidden[ 0 ]->pev->solid = SOLID_NOT;
ppActualHidden[ 0 ]->pev->effects = EF_NODRAW;
for( size_t uiIndex = 1; uiIndex < uiNumSegments; ++uiIndex )
{
CRopeSegment* pPrim = ppVisible[ uiIndex ];
CRopeSegment* pHidden = ppActualHidden[ uiIndex ];
pPrim->pev->solid = SOLID_TRIGGER;
pPrim->pev->effects = 0;
pHidden->pev->solid = SOLID_NOT;
pHidden->pev->effects = EF_NODRAW;
Vector vecOrigin = pPrim->pev->origin;
//vecOrigin.x += 10.0;
//vecOrigin.y += 10.0;
UTIL_SetOrigin( pHidden->pev, vecOrigin );
}
}
}
bool CRope::MoveUp( const float flDeltaTime )
{
if( m_uiAttachedObjectsSegment > 4 )
{
float flDistance = flDeltaTime * 128.0;
Vector vecOrigin, vecAngles;
while( true )
{
float flOldDist = flDistance;
flDistance = 0;
if( flOldDist <= 0 )
break;
if( m_uiAttachedObjectsSegment <= 3 )
break;
if( flOldDist > m_flAttachedObjectsOffset )
{
flDistance = flOldDist - m_flAttachedObjectsOffset;
--m_uiAttachedObjectsSegment;
float flNewOffset = 0;
if( m_uiAttachedObjectsSegment < m_uiSegments )
{
auto pSegment = seg[ m_uiAttachedObjectsSegment ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
flNewOffset = ( pSegment->pev->origin - vecOrigin ).Length();
}
m_flAttachedObjectsOffset = flNewOffset;
}
else
{
m_flAttachedObjectsOffset -= flOldDist;
}
}
}
return true;
}
bool CRope::MoveDown( const float flDeltaTime )
{
if( !m_bObjectAttached )
return false;
float flDistance = flDeltaTime * 128.0;
Vector vecOrigin, vecAngles;
CRopeSegment* pSegment;
bool bOnRope = true;
bool bDoIteration = true;
while( bDoIteration )
{
bDoIteration = false;
if( flDistance > 0.0 )
{
float flNewDist = flDistance;
float flSegLength = 0.0;
while( bOnRope )
{
if( m_uiAttachedObjectsSegment < m_uiSegments )
{
pSegment = seg[ m_uiAttachedObjectsSegment ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
flSegLength = ( pSegment->pev->origin - vecOrigin ).Length();
}
const float flOffset = flSegLength - m_flAttachedObjectsOffset;
if( flNewDist <= flOffset )
{
m_flAttachedObjectsOffset += flNewDist;
flDistance = 0;
bDoIteration = true;
break;
}
if( m_uiAttachedObjectsSegment + 1 == m_uiSegments )
bOnRope = false;
else
++m_uiAttachedObjectsSegment;
flNewDist -= flOffset;
flSegLength = 0;
m_flAttachedObjectsOffset = 0;
if( flNewDist <= 0 )
break;
}
}
}
return bOnRope;
}
Vector CRope::GetAttachedObjectsVelocity() const
{
if( !m_bObjectAttached )
return g_vecZero;
return seg[ m_uiAttachedObjectsSegment ]->GetSample()->GetData().mVelocity;
}
void CRope::ApplyForceFromPlayer( const Vector& vecForce )
{
if( !m_bObjectAttached )
return;
float flForce = 20000.0;
if( m_uiSegments < 26 )
flForce *= ( m_uiSegments / 26.0 );
const Vector vecScaledForce = vecForce * flForce;
ApplyForceToSegment( vecScaledForce, m_uiAttachedObjectsSegment );
}
void CRope::ApplyForceToSegment( const Vector& vecForce, const size_t uiSegment )
{
if( uiSegment < m_uiSegments )
{
seg[ uiSegment ]->ApplyExternalForce( vecForce );
}
else if( uiSegment == m_uiSegments )
{
//Apply force to the last sample.
auto& data = m_CurrentSys[ uiSegment - 1 ]->GetData();
data.mExternalForce = data.mExternalForce + vecForce;
data.mApplyExternalForce = true;
}
}
void CRope::AttachObjectToSegment( CRopeSegment* pSegment )
{
m_bObjectAttached = true;
m_flDetachTime = 0;
SetAttachedObjectsSegment( pSegment );
m_flAttachedObjectsOffset = 0;
}
void CRope::DetachObject()
{
m_bObjectAttached = false;
m_flDetachTime = gpGlobals->time;
}
bool CRope::IsAcceptingAttachment() const
{
if( gpGlobals->time - m_flDetachTime > 2.0 && !m_bObjectAttached )
{
return !m_bDisallowPlayerAttachment;
}
return false;
}
bool CRope::ShouldCreak() const
{
if( m_bObjectAttached && m_bMakeSound )
{
CRopeSample* pSample = seg[ m_uiAttachedObjectsSegment ]->GetSample();
if( pSample->GetData().mVelocity.Length() > 20.0 )
return RANDOM_LONG( 1, 5 ) == 1;
}
return false;
}
void CRope::Creak()
{
EMIT_SOUND( edict(), CHAN_BODY,
g_pszCreakSounds[ RANDOM_LONG( 0, ARRAYSIZE( g_pszCreakSounds ) - 1 ) ],
VOL_NORM, ATTN_NORM );
}
float CRope::GetSegmentLength( size_t uiSegmentIndex ) const
{
if( uiSegmentIndex < m_uiSegments )
{
Vector vecOrigin, vecAngles;
auto pSegment = seg[ uiSegmentIndex ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
return ( pSegment->pev->origin - vecOrigin ).Length();
}
return 0;
}
float CRope::GetRopeLength() const
{
float flLength = 0;
Vector vecOrigin, vecAngles;
for( size_t uiIndex = 0; uiIndex < m_uiSegments; ++uiIndex )
{
auto pSegment = seg[ uiIndex ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
flLength += ( pSegment->pev->origin - vecOrigin ).Length();
}
return flLength;
}
Vector CRope::GetRopeOrigin() const
{
return m_CurrentSys[ 0 ]->GetData().mPosition;
}
bool CRope::IsValidSegmentIndex( const size_t uiSegment ) const
{
return uiSegment < m_uiSegments;
}
Vector CRope::GetSegmentOrigin( const size_t uiSegment ) const
{
if( !IsValidSegmentIndex( uiSegment ) )
return g_vecZero;
return m_CurrentSys[ uiSegment ]->GetData().mPosition;
}
Vector CRope::GetSegmentAttachmentPoint( const size_t uiSegment ) const
{
if( !IsValidSegmentIndex( uiSegment ) )
return g_vecZero;
Vector vecOrigin, vecAngles;
auto pSegment = m_bToggle ? altseg[ uiSegment ] : seg[ uiSegment ];
pSegment->GetAttachment( 0, vecOrigin, vecAngles );
return vecOrigin;
}
void CRope::SetAttachedObjectsSegment( CRopeSegment* pSegment )
{
for( size_t uiIndex = 0; uiIndex < m_uiSegments; ++uiIndex )
{
if( seg[ uiIndex ] == pSegment || altseg[ uiIndex ] == pSegment )
{
m_uiAttachedObjectsSegment = uiIndex;
break;
}
}
}
Vector CRope::GetSegmentDirFromOrigin( const size_t uiSegmentIndex ) const
{
if( uiSegmentIndex >= m_uiSegments )
return g_vecZero;
//There is one more sample than there are segments, so this is fine.
const Vector vecResult =
m_CurrentSys[ uiSegmentIndex + 1 ]->GetData().mPosition -
m_CurrentSys[ uiSegmentIndex ]->GetData().mPosition;
return vecResult.Normalize();
}
Vector CRope::GetAttachedObjectsPosition() const
{
if( !m_bObjectAttached )
return g_vecZero;
Vector vecResult;
if( m_uiAttachedObjectsSegment < m_uiSegments )
vecResult = m_CurrentSys[ m_uiAttachedObjectsSegment ]->GetData().mPosition;
vecResult = vecResult +
( m_flAttachedObjectsOffset * GetSegmentDirFromOrigin( m_uiAttachedObjectsSegment ) );
return vecResult;
}
|
#include "channel_auth.hpp"
#include <ipmid/api.h>
#include <user_channel/channel_layer.hpp>
#include <user_channel/user_layer.hpp>
namespace command
{
std::vector<uint8_t>
GetChannelCapabilities(const std::vector<uint8_t>& inPayload,
const message::Handler& handler)
{
auto request =
reinterpret_cast<const GetChannelCapabilitiesReq*>(inPayload.data());
if (inPayload.size() != sizeof(*request))
{
std::vector<uint8_t> errorPayload{IPMI_CC_REQ_DATA_LEN_INVALID};
return errorPayload;
}
constexpr unsigned int channelMask = 0x0f;
uint8_t chNum = ipmi::convertCurrentChannelNum(
request->channelNumber & channelMask, getInterfaceIndex());
if (!ipmi::isValidChannel(chNum) ||
(ipmi::EChannelSessSupported::none ==
ipmi::getChannelSessionSupport(chNum)) ||
!ipmi::isValidPrivLimit(request->reqMaxPrivLevel))
{
std::vector<uint8_t> errorPayload{IPMI_CC_INVALID_FIELD_REQUEST};
return errorPayload;
}
std::vector<uint8_t> outPayload(sizeof(GetChannelCapabilitiesResp));
auto response =
reinterpret_cast<GetChannelCapabilitiesResp*>(outPayload.data());
// A canned response, since there is no user and channel management.
response->completionCode = IPMI_CC_OK;
response->channelNumber = chNum;
response->ipmiVersion = 1; // IPMI v2.0 extended capabilities available.
response->reserved1 = 0;
response->oem = 0;
response->straightKey = 0;
response->reserved2 = 0;
response->md5 = 0;
response->md2 = 0;
response->reserved3 = 0;
response->KGStatus = 0; // KG is set to default
response->perMessageAuth = 0; // Per-message Authentication is enabled
response->userAuth = 0; // User Level Authentication is enabled
uint8_t maxChUsers = 0;
uint8_t enabledUsers = 0;
uint8_t fixedUsers = 0;
ipmi::ipmiUserGetAllCounts(maxChUsers, enabledUsers, fixedUsers);
response->nonNullUsers = enabledUsers > 0 ? 1 : 0; // Non-null usernames
response->nullUsers = 0; // Null usernames disabled
response->anonymousLogin = 0; // Anonymous Login disabled
response->reserved4 = 0;
response->extCapabilities = 0x2; // Channel supports IPMI v2.0 connections
response->oemID[0] = 0;
response->oemID[1] = 0;
response->oemID[2] = 0;
response->oemAuxillary = 0;
return outPayload;
}
} // namespace command
|
/*
MANGO Multimedia Development Platform
Copyright (C) 2012-2017 Twilight Finland 3D Oy Ltd. All rights reserved.
*/
#pragma once
#include <atomic>
#include "configure.hpp"
namespace mango
{
/* WARNING!
Atomic locks are implemented as busy loops which consume significant
amounts of CPU time if the locks are congested and held for a long
period of time.
*/
// ----------------------------------------------------------------------------
// SpinLock
// ----------------------------------------------------------------------------
class SpinLock
{
private:
std::atomic_flag m_locked = ATOMIC_FLAG_INIT;
public:
bool tryLock()
{
return m_locked.test_and_set(std::memory_order_acquire);
}
void lock()
{
while (m_locked.test_and_set(std::memory_order_acquire)) {
}
}
void unlock()
{
m_locked.clear(std::memory_order_release);
}
};
class SpinLockGuard
{
private:
SpinLock& m_spinlock;
bool m_locked { false };
public:
SpinLockGuard(SpinLock& spinlock)
: m_spinlock(spinlock)
{
lock();
}
~SpinLockGuard()
{
unlock();
}
void lock()
{
if (!m_locked) {
m_locked = true;
m_spinlock.lock();
}
}
void unlock()
{
if (m_locked) {
m_locked = false;
m_spinlock.unlock();
}
}
};
// ----------------------------------------------------------------------------
// ReadWriteSpinLock
// ----------------------------------------------------------------------------
class ReadWriteSpinLock : protected SpinLock
{
private:
std::atomic<int> m_read_count { 0 };
public:
bool tryWriteLock()
{
bool status = tryLock();
if (status) {
// acquired exclusive access - flush all readers
while (m_read_count > 0) {
}
}
return status;
}
void writeLock()
{
// acquire exclusive access
lock();
// flush all readers
while (m_read_count > 0) {
}
}
void writeUnlock()
{
// release exclusivity
unlock();
}
bool tryReadLock()
{
bool status = tryLock();
if (status) {
// gained temporary exclusivity - add one reader
m_read_count.fetch_add(1, std::memory_order_acquire);
unlock();
}
return status;
}
void readLock()
{
// gain temporary exclusivity to add one reader
lock();
m_read_count.fetch_add(1, std::memory_order_acquire);
unlock();
}
void readUnlock()
{
// reader can be released at any time w/o exclusivity
m_read_count.fetch_sub(1, std::memory_order_release);
}
};
class WriteSpinLockGuard
{
private:
ReadWriteSpinLock& m_rwlock;
bool m_locked { false };
public:
WriteSpinLockGuard(ReadWriteSpinLock& rwlock)
: m_rwlock(rwlock)
{
lock();
}
~WriteSpinLockGuard()
{
unlock();
}
void lock()
{
if (!m_locked) {
m_locked = true;
m_rwlock.writeLock();
}
}
void unlock()
{
if (m_locked) {
m_locked = false;
m_rwlock.writeUnlock();
}
}
};
class ReadSpinLockGuard
{
private:
ReadWriteSpinLock& m_rwlock;
bool m_locked { false };
public:
ReadSpinLockGuard(ReadWriteSpinLock& rwlock)
: m_rwlock(rwlock)
{
lock();
}
~ReadSpinLockGuard()
{
unlock();
}
void lock()
{
if (!m_locked) {
m_locked = true;
m_rwlock.readLock();
}
}
void unlock()
{
if (m_locked) {
m_locked = false;
m_rwlock.readUnlock();
}
}
};
} // namespace mango
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MultiDataProvider.h"
#include <algorithm>
#include "paddle/utils/Logging.h"
#include "paddle/utils/Util.h"
namespace paddle {
using namespace std;
MultiDataProvider::MultiDataProvider(const DataConfig& config,
const ModelConfig& modelConfig,
bool useGpu)
: DataProvider(config, useGpu) {
bool atLeastOneMainDataFlag = false;
totalDataRatio_ = 0;
LOG(INFO) << "MultiDataProvider: sub data provider size: "
<< config.sub_data_configs_size();
LOG(INFO) << "MultiDataProvider: for_test: " << config.for_test();
isTestMode_ = config.for_test();
for (int i = 0; i < config.sub_data_configs_size(); i++) {
LOG(INFO) << "dataRatio of sub(" << i
<< ") is: " << config.sub_data_configs(i).data_ratio();
totalDataRatio_ += config.sub_data_configs(i).data_ratio();
if (config.sub_data_configs(i).is_main_data()) {
LOG(INFO) << "main data is [" << i << "]";
atLeastOneMainDataFlag = true;
}
}
CHECK(atLeastOneMainDataFlag) << "all sub dataproviders in MultiData do not"
<< " have is_main_data flag";
LOG(INFO) << "totalDataRatio_=" << totalDataRatio_;
DataConfig subConfig;
int subDataProviderCount = config.sub_data_configs_size();
if (isTestMode()) {
LOG(INFO) << "construct MultiDataProvider in test mode";
} else {
LOG(INFO) << "construct MultiDataProvider in train mode";
}
subDataProviders_.resize(subDataProviderCount);
for (int i = 0; i < subDataProviderCount; i++) {
subConfig = config.sub_data_configs(i);
if (subConfig.async_load_data()) {
LOG(INFO) << "can not use async_load_data in sub dataprovider of "
"MultiDataProvider";
subConfig.set_async_load_data(false);
}
subDataProviders_[i] = std::unique_ptr<DataProvider>(
DataProvider::create(subConfig, modelConfig, useGpu_));
}
}
void MultiDataProvider::reset() {
for (auto& elem : subDataProviders_) {
elem->reset();
}
DataProvider::reset();
}
void MultiDataProvider::shuffle() {
for (auto& elem : subDataProviders_) {
elem->shuffle();
}
}
int64_t MultiDataProvider::getNextBatchInternal(int64_t size,
DataBatch* batch) {
batch->clear();
for (size_t i = 0; i < subDataProviders_.size(); ++i) {
// calc size according to data ratio
int64_t subSize =
(int64_t)(1.0 * size * config_.sub_data_configs(i).data_ratio() /
totalDataRatio_);
DataBatch subBatch;
int64_t realSize =
subDataProviders_[i]->getNextBatchInternal(subSize, &subBatch);
if (realSize == 0) {
// current subDataProvider has no data
if (!isTestMode()) {
// in train mode
if (config_.sub_data_configs(i).is_main_data()) {
// is main data provider. then return 0
batch->clear();
return 0;
} else {
// not main data provider, reset current subDataProvider and try again
subDataProviders_[i]->reset();
subBatch.clear();
realSize =
subDataProviders_[i]->getNextBatchInternal(subSize, &subBatch);
CHECK_GT(realSize, 0);
}
} else {
// in test mode, make an empty argument
Argument emptyArgu;
std::vector<Argument> argus;
argus.push_back(emptyArgu);
batch->appendArguments(argus, 0, -1);
continue;
}
}
batch->appendArguments(subBatch.getStreams(), subBatch.getSize(), i);
}
return batch->getSize();
}
REGISTER_DATA_PROVIDER_EX(multi, MultiDataProvider);
} // namespace paddle
|
#pragma once
// This file is generated from the Game's Reflection data
#include <cstdint>
#include <RED4ext/Common.hpp>
#include <RED4ext/Scripting/Natives/Generated/ink/CallbackBase.hpp>
namespace RED4ext
{
namespace ink {
struct StepperChangedCallback : ink::CallbackBase
{
static constexpr const char* NAME = "inkStepperChangedCallback";
static constexpr const char* ALIAS = NAME;
uint8_t unk28[0x38 - 0x28]; // 28
};
RED4EXT_ASSERT_SIZE(StepperChangedCallback, 0x38);
} // namespace ink
} // namespace RED4ext
|
#include "undoedits.h"
#include "nodeproperties.h"
#include "propertyeditor.h"
#include "zodiacgraph/plug.h"
#include "zodiacgraph/plugedge.h"
#include <QDebug>
TextEditCommand::TextEditCommand(bool isDescription, const QString &oldText, const QString &newText, NodeCtrl* node, void (NodeCtrl::*nameChangeFunc)(const QString &), Collapsible *collapsible,
QUndoCommand *parent)
: QUndoCommand(parent)
,m_NewText(newText)
,m_OldText(oldText)
,m_isDescription(isDescription)
,m_pNameChangeFunc(nameChangeFunc)
,m_Node(node)
{
if(collapsible)
{
m_Collapsible = collapsible;
m_PropEdit = collapsible->getParent();
}
}
void TextEditCommand::undo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
//ensure the correct field is selected
if(m_isDescription)
m_TextItem = m_Collapsible->getNodeProperties()->getDescriptionEdit();
else
{
m_TextItem = m_Collapsible->getNodeProperties()->getNameEdit();
//update name in the property editor if necessary
m_Collapsible->updateTitle(m_OldText);
}
m_TextItem->setText(m_OldText);
(m_Node->*m_pNameChangeFunc)(m_OldText);
}
void TextEditCommand::redo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
//ensure the correct field is selected
if(m_isDescription)
m_TextItem = m_Collapsible->getNodeProperties()->getDescriptionEdit();
else
{
m_TextItem = m_Collapsible->getNodeProperties()->getNameEdit();
//update name in the property editor if necessary
m_Collapsible->updateTitle(m_OldText);
}
//store new text in the field (only has an effect on actual redo) and store in node
m_TextItem->setText(m_NewText);
(m_Node->*m_pNameChangeFunc)(m_NewText);
}
bool TextEditCommand::mergeWith(const QUndoCommand *command)
{
const TextEditCommand *textEditCommand = static_cast<const TextEditCommand *>(command);
QLineEdit *item = textEditCommand->m_TextItem;
if (m_TextItem != item)
return false;
m_NewText = item->text();
return true;
}
///
ParamEditCommand::ParamEditCommand(const QString &newText, const QString &oldText, const QUuid &cmdKey, const QString ¶mKey, NodeCtrl* node, void (NodeCtrl::*paramChangeFunc)(const QUuid &, const QString &, const QString &),
Collapsible *collapsible, CommandBlockTypes type, QUndoCommand *parent)
: QUndoCommand(parent)
,m_NewText(newText)
,m_OldText(oldText)
,m_pParamChangeFunc(paramChangeFunc)
,m_Node(node)
,m_cmdKey(cmdKey)
,m_paramKey(paramKey)
,m_Collapsible(collapsible)
,m_PropEdit(collapsible->getParent())
,m_type(type)
{
}
void ParamEditCommand::undo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
std::vector<std::pair<QLabel*,QLineEdit*>> paramsList;
switch(m_type)
{
case CMD_UNLOCK:
paramsList = m_Collapsible->getNodeProperties()->getOnUnlockCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
case CMD_FAIL:
paramsList = m_Collapsible->getNodeProperties()->getOnFailCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
case CMD_UNLOCKED:
paramsList = m_Collapsible->getNodeProperties()->getOnUnlockedCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
}
//revert text in field and use command and parameter keys to store the parameter value in the node using function pointer
m_TextItem->setText(m_OldText);
(m_Node->*m_pParamChangeFunc)(m_cmdKey, m_paramKey, m_OldText);
}
void ParamEditCommand::redo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
std::vector<std::pair<QLabel*,QLineEdit*>> paramsList;
switch(m_type)
{
case CMD_UNLOCK:
paramsList = m_Collapsible->getNodeProperties()->getOnUnlockCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
case CMD_FAIL:
paramsList = m_Collapsible->getNodeProperties()->getOnFailCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
case CMD_UNLOCKED:
paramsList = m_Collapsible->getNodeProperties()->getOnUnlockedCmds()[m_cmdKey]->getParams();
for(std::vector<std::pair<QLabel*,QLineEdit*>>::iterator paramIt = paramsList.begin(); paramIt != paramsList.end(); ++paramIt)
{
//make sure label text matches parameter key
QLabel *label = std::get<0>((*paramIt));
if(label->text() == m_paramKey)
{
m_TextItem = std::get<1>((*paramIt));
break;
}
}
break;
}
//store new text in the field (only has an effect on actual redo) and
//use command and parameter keys to store the parameter value in the node using function pointer
m_TextItem->setText(m_NewText);
(m_Node->*m_pParamChangeFunc)(m_cmdKey, m_paramKey, m_NewText);
}
bool ParamEditCommand::mergeWith(const QUndoCommand *command)
{
const ParamEditCommand *paramEditCommand = static_cast<const ParamEditCommand *>(command);
QLineEdit *item = paramEditCommand->m_TextItem;
if (m_TextItem != item)
return false;
m_NewText = item->text();
return true;
}
///
CommandEditCommand::CommandEditCommand(QComboBox *cmdItem, const QString &oldValue, const QString &oldText, const QString &newValue, const QString &newText, NodeCtrl* node, void (NodeCtrl::*cmdDeleteFunc)(const QUuid&),
void (NodeCtrl::*CmdAddFunc)(const QUuid&, const QString&, const QString&), void (NodeCtrl::*ParamAddFunc)(const QUuid&, const QString&, const QString&), NodeProperties *nodeProperties, void (NodeProperties::*deleteParams) (CommandRow *cmd),
void (NodeProperties::*addParams) (CommandBlockTypes, CommandRow*, const QUuid&),
CommandRow *cmd, CommandBlockTypes type, QHash<QUuid, zodiac::NodeCommand> (NodeCtrl::*getCmdTable)(), QUuid uniqueIdentifier, Collapsible *collapsible, QUndoCommand *parent)
: QUndoCommand(parent)
//store command key and label value
,m_CmdItem(cmdItem)
,m_OldValue(oldValue)
,m_NewValue(newValue)
,m_OldText(oldText)
,m_NewText(newText)
,m_uniqueIdentifier(uniqueIdentifier)
,m_Type(type)
,m_Node(node)
,m_pCmd(cmd)
,m_pNodeProperties(nodeProperties)
,m_Collapsible(collapsible)
,m_PropEdit(collapsible->getParent())
,m_pCmdDeleteFunc(cmdDeleteFunc)
,m_pCmdAddFunc(CmdAddFunc)
,m_pParamAddFunc(ParamAddFunc)
,m_pAddParams(addParams)
,m_pDeleteParams(deleteParams)
,m_pGetCmdTable(getCmdTable)
{
//store old parameters, needed for undo but should not be done every redo;
m_OldParameters = (m_Node->*m_pGetCmdTable)()[m_OldValue].parameters;
}
void CommandEditCommand::undo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
m_pCmd = m_pNodeProperties->getOnUnlockCmds()[m_uniqueIdentifier];
m_CmdItem = m_pCmd->getCommandBox();
//changing the value of the combo box triggers a new command, set bool to avoid
m_pCmd->SetUndo(true);
//set the new value and save the parameters of the previous command
m_CmdItem->setCurrentText(m_OldText);
m_NewParameters = (m_Node->*m_pGetCmdTable)()[m_uniqueIdentifier].parameters;
//delete the previous command and add the new one
(m_Node->*m_pCmdDeleteFunc)(m_NewValue);
(m_Node->*m_pCmdAddFunc)(m_uniqueIdentifier, m_OldValue, m_OldText);
//delete the old parameter fields from the layout, load the saved ones and add them to the layout
(m_pNodeProperties->*m_pDeleteParams)(m_pCmd);
for (QHash<QString, QString>::iterator i = m_OldParameters.begin(); i != m_OldParameters.end(); ++i)
(m_Node->*m_pParamAddFunc)(m_OldValue, i.key(), i.value());
(m_pNodeProperties->*m_pAddParams)(m_Type, m_pCmd, m_uniqueIdentifier);
//set the name in the command
m_pCmd->SetName(m_OldValue);
}
void CommandEditCommand::redo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
switch (m_Type)
{
case CMD_UNLOCK:
m_pCmd = m_pNodeProperties->getOnUnlockCmds()[m_uniqueIdentifier];
break;
case CMD_FAIL:
m_pCmd = m_pNodeProperties->getOnFailCmds()[m_uniqueIdentifier];
break;
case CMD_UNLOCKED:
m_pCmd = m_pNodeProperties->getOnUnlockedCmds()[m_uniqueIdentifier];
break;
}
m_CmdItem = m_pCmd->getCommandBox();
//only update the name if it is different (only on actual redo)
if(m_NewText != m_CmdItem->currentText())
{
m_pCmd->SetUndo(true); //as above, stop triggering command change function
m_CmdItem->setCurrentText(m_NewText);
}
//save old parameters for undo
m_OldParameters = (m_Node->*m_pGetCmdTable)()[m_uniqueIdentifier].parameters;
//delete the previous command and add the new one
(m_Node->*m_pCmdDeleteFunc)(m_OldValue);
(m_Node->*m_pCmdAddFunc)(m_uniqueIdentifier, m_NewValue, m_NewText);
//delete the old parameter fields from the layout, load the saved ones and add them to the layout
(m_pNodeProperties->*m_pDeleteParams)(m_pCmd);
for (QHash<QString, QString>::iterator i = m_NewParameters.begin(); i != m_NewParameters.end(); ++i)
(m_Node->*m_pParamAddFunc)(m_NewValue, i.key(), i.value());
(m_pNodeProperties->*m_pAddParams)(m_Type, m_pCmd, m_uniqueIdentifier);
//set the name in the command
m_pCmd->SetName(m_NewValue);
}
bool CommandEditCommand::mergeWith(const QUndoCommand *command)
{
const CommandEditCommand *cmdEditCommand = static_cast<const CommandEditCommand *>(command);
QComboBox *item = cmdEditCommand->m_CmdItem;
if (m_CmdItem != item)
return false;
m_NewValue = item->itemData(item->currentIndex()).toString();
return true;
}
///
CommandAddCommand:: CommandAddCommand(QGridLayout *grid, QHash<QUuid, CommandRow*> *commandRow, CommandBlockTypes type, CommandRow* (NodeProperties::*addCommand) (QGridLayout*, QHash<QUuid, CommandRow*>&, CommandBlockTypes, const QUuid&, zodiac::NodeCommand*),
NodeProperties *nodeProperties, Collapsible *collapsible, NodeCtrl *node, QUndoCommand *parent)
: QUndoCommand(parent)
,m_pGrid(grid)
,m_pCommandRow(commandRow)
,m_pNodeProperties(nodeProperties)
,m_pAddCommand(addCommand)
,m_type(type)
,m_pCmd(nullptr)
,m_Collapsible(collapsible)
,m_PropEdit(collapsible->getParent())
,m_Node(node)
{
}
void CommandAddCommand::undo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
switch(m_type)
{
case CMD_UNLOCK:
if(m_pNodeProperties->getOnUnlockCmds().contains(m_uniqueIdentifier))
m_pCmd = m_pNodeProperties->getOnUnlockCmds()[m_uniqueIdentifier];
break;
case CMD_FAIL:
if(m_pNodeProperties->getOnFailCmds().contains(m_uniqueIdentifier))
m_pCmd = m_pNodeProperties->getOnFailCmds()[m_uniqueIdentifier];
break;
case CMD_UNLOCKED:
if(m_pNodeProperties->getOnUnlockedCmds().contains(m_uniqueIdentifier))
m_pCmd = m_pNodeProperties->getOnUnlockedCmds()[m_uniqueIdentifier];
break;
}
if(m_pCmd != nullptr)
{
m_pCmd->removeCommand();
m_pCmd = nullptr;
}
}
void CommandAddCommand::redo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_Node->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_Node->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
switch(m_type)
{
case CMD_UNLOCK:
m_pGrid = m_pNodeProperties->getOnUnlockGrid();
break;
case CMD_FAIL:
m_pGrid = m_pNodeProperties->getOnFailGrid();
break;
case CMD_UNLOCKED:
m_pGrid = m_pNodeProperties->getOnUnlockedGrid();
break;
}
m_pCmd = (m_pNodeProperties->*m_pAddCommand)(m_pGrid, *m_pCommandRow, m_type, {00000000-0000-0000-0000-000000000000}, nullptr);
m_uniqueIdentifier = m_pCmd->getId();
}
///
CommandDeleteCommand::CommandDeleteCommand(QHash<QUuid, CommandRow*> *commandRow, CommandBlockTypes type, CommandRow* (NodeProperties::*addCommand) (QGridLayout*, QHash<QUuid, CommandRow*>&, CommandBlockTypes, const QUuid&, zodiac::NodeCommand*),
NodeProperties *nodeProperties, CommandRow *cmd, const QString &value, const QString &text, void (CommandRow::*deleteParams)(), NodeCtrl *node, void (NodeCtrl::*cmdAddFunc) (const QUuid &, const QString&, const QString&),
void (NodeCtrl::*paramAddFunc) (const QUuid&, const QString&, const QString&), QHash<QUuid, zodiac::NodeCommand> (NodeCtrl::*getCmdTable)(), QUuid uniqueIdentifier, Collapsible *collapsible, QUndoCommand *parent)
: QUndoCommand(parent)
,m_pCommandRow(commandRow)
,m_pNodeProperties(nodeProperties)
,m_pAddCommand(addCommand)
,m_type(type)
,m_pCmd(cmd)
,m_CommandValue(value)
,m_CommandText(text)
,m_uniqueIdentifier(uniqueIdentifier)
,m_pDeleteParams(deleteParams)
,m_pNode(node)
,m_pCmdAddFunc(cmdAddFunc)
,m_pParamAddFunc(paramAddFunc)
,m_pGetCmdTable(getCmdTable)
,m_Collapsible(collapsible)
,m_PropEdit(collapsible->getParent())
{
}
void CommandDeleteCommand::undo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_pNode->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_pNode->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
//add new command
(m_pNode->*m_pCmdAddFunc)(m_uniqueIdentifier, m_CommandValue, m_CommandText);
//delete the old parameter fields from the layout, load the saved ones and add them to the layout
//(m_pCmd->*m_pDeleteParams)();
for (QHash<QString, QString>::iterator i = m_SavedParameters.begin(); i != m_SavedParameters.end(); ++i)
(m_pNode->*m_pParamAddFunc)(m_CommandValue, i.key(), i.value());
switch(m_type)
{
case CMD_UNLOCK:
m_pGrid = m_pNodeProperties->getOnUnlockGrid();
m_pCommandRow = &m_pNodeProperties->getOnUnlockCmds();
break;
case CMD_FAIL:
m_pGrid = m_pNodeProperties->getOnFailGrid();
m_pCommandRow = &m_pNodeProperties->getOnFailCmds();
break;
case CMD_UNLOCKED:
m_pGrid = m_pNodeProperties->getOnUnlockedGrid();
m_pCommandRow = &m_pNodeProperties->getOnUnlockedCmds();
break;
}
m_pCmd = (m_pNodeProperties->*m_pAddCommand)(m_pGrid, *m_pCommandRow, m_type, m_uniqueIdentifier, &(m_pNode->*m_pGetCmdTable)()[m_CommandValue]);
}
void CommandDeleteCommand::redo()
{
//used to avoid incorrect memory access when a node is closed, this re-opens it
m_pNode->setSelected(true);
m_Collapsible = m_PropEdit->getCollapsible(m_pNode->getNodeHandle());
m_pNodeProperties = m_Collapsible->getNodeProperties();
switch(m_type)
{
case CMD_UNLOCK:
m_pCmd = m_pNodeProperties->getOnUnlockCmds()[m_uniqueIdentifier];
break;
case CMD_FAIL:
m_pCmd = m_pNodeProperties->getOnFailCmds()[m_uniqueIdentifier];
break;
case CMD_UNLOCKED:
m_pCmd = m_pNodeProperties->getOnUnlockedCmds()[m_uniqueIdentifier];
break;
}
//save parameters
m_SavedParameters = (m_pNode->*m_pGetCmdTable)()[m_CommandValue].parameters;
m_pCmd->removeCommand();
}
///
NodeRemoveLink::NodeRemoveLink(zodiac::Plug *outgoingPlug, zodiac::Plug *incomingPlug, PlugRow* row, void (PlugRow::*removePlugConnection)(QPair<QLabel*, QPushButton*> &), QPair<QLabel*, QPushButton*> uiElements, QUndoCommand *parent)
: QUndoCommand(parent)
, m_outgoingPlug(outgoingPlug)
, m_incomingPlug(incomingPlug)
, m_pRow(row)
, m_pRemovePlugConnection(removePlugConnection)
, m_uiElements(uiElements)
, m_edgeColor(QColor("#cc5d4e")) //default as precaution
{
QSet<zodiac::PlugEdge*> edges = outgoingPlug->getEdges();
foreach (zodiac::PlugEdge *edge, edges)
{
if(edge->getEndPlug() == incomingPlug)
{
m_edgeColor = edge->getBaseColor();
break;
}
}
}
void NodeRemoveLink::undo()
{
m_outgoingPlug.connectPlug(m_incomingPlug, m_edgeColor);
}
void NodeRemoveLink::redo()
{
m_outgoingPlug.disconnectPlug(m_incomingPlug);
if(m_pRow)
(m_pRow->*m_pRemovePlugConnection)(m_uiElements);
}
///
NodeAddLink::NodeAddLink(zodiac::PlugHandle &outgoingPlug, zodiac::PlugHandle &incomingPlug, QColor color, QUndoCommand *parent)
: QUndoCommand(parent)
, m_outgoingPlug(outgoingPlug)
, m_incomingPlug(incomingPlug)
, m_edgeColor(color)
{
}
void NodeAddLink::undo()
{
m_outgoingPlug.disconnectPlug(m_incomingPlug);
}
void NodeAddLink::redo()
{
m_outgoingPlug.connectPlug(m_incomingPlug, m_edgeColor);
}
///
NodeAddLinks::NodeAddLinks(zodiac::PlugHandle &outgoingPlug, QList<zodiac::PlugHandle> &incomingPlugs, QColor color, QUndoCommand *parent)
: QUndoCommand(parent)
, m_outgoingPlug(outgoingPlug)
, m_incomingPlugs(incomingPlugs)
, m_edgeColor(color)
{
}
void NodeAddLinks::undo()
{
for(QList<zodiac::PlugHandle>::iterator plugIt = m_incomingPlugs.begin(); plugIt != m_incomingPlugs.end(); ++plugIt)
m_outgoingPlug.disconnectPlug((*plugIt));
}
void NodeAddLinks::redo()
{
for(QList<zodiac::PlugHandle>::iterator plugIt = m_incomingPlugs.begin(); plugIt != m_incomingPlugs.end(); ++plugIt)
m_outgoingPlug.connectPlug((*plugIt), m_edgeColor);
}
|
/*!
@file
Forward declares `boost::hana::less`.
@copyright Louis Dionne 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_FWD_LESS_HPP
#define BOOST_HANA_FWD_LESS_HPP
#include <boost/hana/config.hpp>
#include <boost/hana/core/when.hpp>
#include <boost/hana/detail/nested_than_fwd.hpp>
BOOST_HANA_NAMESPACE_BEGIN
//! Returns a `Logical` representing whether `x` is less than `y`.
//! @ingroup group-Orderable
//!
//!
//! Signature
//! ---------
//! Given a Logical `Bool` and two Orderables `A` and `B` with a common
//! embedding, the signature is
//! @f$ \mathrm{less} : A \times B \to Bool @f$.
//!
//! @param x, y
//! Two objects to compare.
//!
//!
//! Example
//! -------
//! @include example/less.cpp
#ifdef BOOST_HANA_DOXYGEN_INVOKED
constexpr auto less = [](auto&& x, auto&& y) {
return tag-dispatched;
};
#else
template <typename T, typename U, typename = void>
struct less_impl : less_impl<T, U, when<true>> { };
struct less_t : detail::nested_than<less_t> {
template <typename X, typename Y>
constexpr auto operator()(X&& x, Y&& y) const;
};
BOOST_HANA_INLINE_VARIABLE constexpr less_t less{};
#endif
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_FWD_LESS_HPP
|
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Part of Injectable Generic Camera System
// Copyright(c) 2017, Frans Bouma
// All rights reserved.
// https://github.com/FransBouma/InjectableGenericCameraSystem
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met :
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and / or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "CameraManipulator.h"
#include "GameConstants.h"
#include "InterceptorHelper.h"
#include "Globals.h"
#include "OverlayConsole.h"
using namespace DirectX;
using namespace std;
extern "C" {
LPBYTE g_cameraStructAddress = nullptr;
LPBYTE g_cameraCutsceneStructAddress = nullptr;
LPBYTE g_timestopStructAddress = nullptr;
LPBYTE g_gamespeedStructAddress = nullptr;
LPBYTE g_dofStructAddress = nullptr;
LPBYTE g_todStructAddress = nullptr;
LPBYTE g_weatherStructAddress = nullptr;
}
namespace IGCS::GameSpecific::CameraManipulator
{
static float _originalQuaternion[4];
static float _originalCutsceneQuaternion[4];
static float _originalCoords[3];
static float _originalCutsceneCoords[3];
static float _originalFov;
static float _originalCutsceneFov;
static int _originalToD=12;
static int _originalWeatherA = 0;
static float _originalWeatherIntensity=1.0f;
void getSettingsFromGameState()
{
Settings& currentSettings = Globals::instance().settings();
if (nullptr != g_dofStructAddress)
{
float* apertureInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_APERTURE_IN_STRUCT_OFFSET);
currentSettings.dofAperture = *apertureInMemory;
float* focusDistanceInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_FOCUS_DISTANCE_IN_STRUCT_OFFSET);
currentSettings.dofDistance = *focusDistanceInMemory;
float* focalLengthInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_FOCAL_LENGTH_IN_STRUCT_OFFSET);
currentSettings.dofFocalLength = *focalLengthInMemory;
}
if (nullptr != g_todStructAddress)
{
int* todInMemory = reinterpret_cast<int*>(g_todStructAddress + TOD_IN_STRUCT_OFFSET);
int currentTodInSeconds = (*todInMemory);
currentSettings.todHour = currentTodInSeconds / 3600;
currentSettings.todMinute = (currentTodInSeconds - (currentSettings.todHour * 3600)) / 60;
}
if (nullptr != g_weatherStructAddress)
{
int* weatherInMemory = reinterpret_cast<int*>(g_weatherStructAddress + WEATHER_DIRECT_IN_STRUCT_OFFSET);
currentSettings.weatherA = weatherInMemory[0];
currentSettings.weatherB = weatherInMemory[2]; // 3 values, 0 and 2 are 2 weathers you can blend. 1 is transition effect
float* weatherIntensityInMemory = reinterpret_cast<float*>(g_weatherStructAddress + WEATHER_INTENSITY_IN_STRUCT_OFFSET);
currentSettings.weatherIntensity = *weatherIntensityInMemory;
}
}
void applySettingsToGameState()
{
Settings& currentSettings = Globals::instance().settings();
if (nullptr != g_dofStructAddress)
{
float* apertureInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_APERTURE_IN_STRUCT_OFFSET);
*apertureInMemory = currentSettings.dofAperture;
float* focusDistanceInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_FOCUS_DISTANCE_IN_STRUCT_OFFSET);
*focusDistanceInMemory = currentSettings.dofDistance;
float* focalLengthInMemory = reinterpret_cast<float*>(g_dofStructAddress + DOF_FOCAL_LENGTH_IN_STRUCT_OFFSET);
*focalLengthInMemory = currentSettings.dofFocalLength;
}
if (g_cameraEnabled)
{
if (nullptr != g_todStructAddress)
{
int todInSeconds = ((currentSettings.todHour % 24) * 3600) + ((currentSettings.todMinute % 60) * 60);
int* todInMemory = reinterpret_cast<int*>(g_todStructAddress + TOD_IN_STRUCT_OFFSET);
*todInMemory = todInSeconds;
}
writeWeatherValue(currentSettings.weatherA, currentSettings.weatherB, currentSettings.weatherIntensity);
}
}
// newValue: 1 == time should be frozen, 0 == normal gameplay
// returns true if the game was stopped by this call, false if the game was either already stopped or the state didn't change.
bool setTimeStopValue(byte newValue)
{
if (nullptr == g_timestopStructAddress)
{
return false;
}
byte* timestopAddress = (g_timestopStructAddress + TIMESTOP_IN_STRUCT_OFFSET);
bool toReturn = *timestopAddress == (byte)0 && (newValue == (byte)1);
*timestopAddress = newValue;
return toReturn;
}
// Resets the FOV to the one it got when we enabled the camera
void resetFoV()
{
float* fovInMemory = nullptr;
if (isCameraFound())
{
fovInMemory = reinterpret_cast<float*>(g_cameraStructAddress + FOV_IN_STRUCT_OFFSET);
*fovInMemory = _originalFov;
}
if (isCutsceneCameraFound())
{
fovInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + FOV_CUTSCENE_IN_STRUCT_OFFSET);
*fovInMemory = _originalCutsceneFov;
}
}
// changes the FoV with the specified amount
void changeFoV(float amount)
{
// fov is in focal length, so the higher the number the more zoomed in, hence the negation of the value.
float* fovInMemory = nullptr;
if (isCameraFound())
{
fovInMemory = reinterpret_cast<float*>(g_cameraStructAddress + FOV_IN_STRUCT_OFFSET);
*fovInMemory -= amount;
}
if (isCutsceneCameraFound())
{
fovInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + FOV_CUTSCENE_IN_STRUCT_OFFSET);
*fovInMemory -= amount;
}
}
XMFLOAT3 getCurrentCameraCoords()
{
float* coordsInMemory = reinterpret_cast<float*>(g_cameraStructAddress + COORDS_IN_STRUCT_OFFSET);
return XMFLOAT3(coordsInMemory[0], coordsInMemory[1], coordsInMemory[2]);
}
XMFLOAT3 getCurrentCutsceneCameraCoords()
{
float* coordsInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + COORDS_CUTSCENE_IN_STRUCT_OFFSET);
return XMFLOAT3(coordsInMemory[0], coordsInMemory[1], coordsInMemory[2]);
}
// newLookQuaternion: newly calculated quaternion of camera view space. Can be used to construct a 4x4 matrix if the game uses a matrix instead of a quaternion
// newCoords are the new coordinates for the camera in worldspace.
void writeNewCameraValuesToGameData(XMFLOAT3 newCoords, XMVECTOR newLookQuaternion, bool cutsceneCamera)
{
float* coordsInMemory = nullptr;
float* quaternionInMemory = nullptr;
if (cutsceneCamera)
{
if (nullptr == g_cameraCutsceneStructAddress)
{
return;
}
coordsInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + COORDS_CUTSCENE_IN_STRUCT_OFFSET);
quaternionInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + QUATERNION_CUTSCENE_IN_STRUCT_OFFSET);
}
else
{
if (nullptr == g_cameraStructAddress)
{
return;
}
coordsInMemory = reinterpret_cast<float*>(g_cameraStructAddress + COORDS_IN_STRUCT_OFFSET);
quaternionInMemory = reinterpret_cast<float*>(g_cameraStructAddress + QUATERNION_IN_STRUCT_OFFSET);
}
if (nullptr==coordsInMemory)
{
return;
}
if (nullptr == quaternionInMemory)
{
return;
}
coordsInMemory[0] = newCoords.x;
coordsInMemory[1] = newCoords.y;
coordsInMemory[2] = newCoords.z;
XMFLOAT4 qAsFloat4;
XMStoreFloat4(&qAsFloat4, newLookQuaternion);
quaternionInMemory[0] = qAsFloat4.x;
quaternionInMemory[1] = qAsFloat4.y;
quaternionInMemory[2] = qAsFloat4.z;
quaternionInMemory[3] = qAsFloat4.w;
}
bool isCameraFound()
{
return nullptr != g_cameraStructAddress;
}
bool isCutsceneCameraFound()
{
return nullptr != g_cameraCutsceneStructAddress;
}
void displayCameraStructAddress()
{
OverlayConsole::instance().logDebug("Camera struct address: %p", (void*)g_cameraStructAddress);
}
// should restore the camera values in the camera structures to the cached values. This assures the free camera is always enabled at the original camera location.
void restoreOriginalValuesAfterCameraDisable()
{
float* coordsInMemory = nullptr;
float* quaternionInMemory = nullptr;
float* fovInMemory = nullptr;
if (isCameraFound())
{
coordsInMemory = reinterpret_cast<float*>(g_cameraStructAddress + COORDS_IN_STRUCT_OFFSET);
memcpy(coordsInMemory, _originalCoords, 3 * sizeof(float));
quaternionInMemory = reinterpret_cast<float*>(g_cameraStructAddress + QUATERNION_IN_STRUCT_OFFSET);
memcpy(quaternionInMemory, _originalQuaternion, 4 * sizeof(float));
fovInMemory = reinterpret_cast<float*>(g_cameraStructAddress + FOV_IN_STRUCT_OFFSET);
*fovInMemory = _originalFov;
}
if (isCutsceneCameraFound())
{
coordsInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + COORDS_CUTSCENE_IN_STRUCT_OFFSET);
memcpy(coordsInMemory, _originalCutsceneCoords, 3 * sizeof(float));
quaternionInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + QUATERNION_CUTSCENE_IN_STRUCT_OFFSET);
memcpy(quaternionInMemory, _originalCutsceneQuaternion, 4 * sizeof(float));
fovInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + FOV_CUTSCENE_IN_STRUCT_OFFSET);
*fovInMemory = _originalCutsceneFov;
}
if (nullptr != g_todStructAddress)
{
int* todInMemory = reinterpret_cast<int*>(g_todStructAddress + TOD_IN_STRUCT_OFFSET);
*todInMemory = _originalToD;
}
// restore weathers all with weather A
writeWeatherValue(_originalWeatherA, _originalWeatherA, _originalWeatherIntensity);
// in theory we should reset the cutscene camera pointer to null as it could be the next time the user enables it, it might be somewhere else and the interception
// hasn't ran yet. This is such an edge case that we leave it for now, as otherwise the camera can't be enabled twice when the game is paused.
}
void cacheOriginalValuesBeforeCameraEnable()
{
float* coordsInMemory = nullptr;
float* quaternionInMemory = nullptr;
float* fovInMemory = nullptr;
if (isCameraFound())
{
coordsInMemory = reinterpret_cast<float*>(g_cameraStructAddress + COORDS_IN_STRUCT_OFFSET);
memcpy(_originalCoords, coordsInMemory, 3 * sizeof(float));
quaternionInMemory = reinterpret_cast<float*>(g_cameraStructAddress + QUATERNION_IN_STRUCT_OFFSET);
memcpy(_originalQuaternion, quaternionInMemory, 4 * sizeof(float));
fovInMemory = reinterpret_cast<float*>(g_cameraStructAddress + FOV_IN_STRUCT_OFFSET);
_originalFov = *fovInMemory;
}
if (isCutsceneCameraFound())
{
coordsInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + COORDS_CUTSCENE_IN_STRUCT_OFFSET);
memcpy(_originalCutsceneCoords, coordsInMemory, 3 * sizeof(float));
quaternionInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + QUATERNION_CUTSCENE_IN_STRUCT_OFFSET);
memcpy(_originalCutsceneQuaternion, quaternionInMemory, 4 * sizeof(float));
fovInMemory = reinterpret_cast<float*>(g_cameraCutsceneStructAddress + FOV_CUTSCENE_IN_STRUCT_OFFSET);
_originalCutsceneFov = *fovInMemory;
}
if (nullptr != g_todStructAddress)
{
int* todInMemory = reinterpret_cast<int*>(g_todStructAddress + TOD_IN_STRUCT_OFFSET);
_originalToD = *todInMemory;
}
if (nullptr != g_weatherStructAddress)
{
int* weatherInMemory = reinterpret_cast<int*>(g_weatherStructAddress + WEATHER_DIRECT_IN_STRUCT_OFFSET);
_originalWeatherA = weatherInMemory[0];
float* weatherIntensityInMemory = reinterpret_cast<float*>(g_weatherStructAddress + WEATHER_INTENSITY_IN_STRUCT_OFFSET);
_originalWeatherIntensity = *weatherIntensityInMemory;
}
}
void writeWeatherValue(int newWeatherValueA, int newWeatherValueB, float newWeatherIntensity)
{
if (nullptr != g_weatherStructAddress)
{
int* weatherInMemory = reinterpret_cast<int*>(g_weatherStructAddress + WEATHER_DIRECT_IN_STRUCT_OFFSET);
weatherInMemory[0] = (newWeatherValueA % 5);
weatherInMemory[2] = (newWeatherValueB % 5);
weatherInMemory[1] = (newWeatherValueB % 5); // load the intermediate weather last
float* weatherIntensityInMemory = reinterpret_cast<float*>(g_weatherStructAddress + WEATHER_INTENSITY_IN_STRUCT_OFFSET);
*weatherIntensityInMemory = newWeatherIntensity < 0.0f ? 1.0f : newWeatherIntensity;
}
}
}
|
/*
* Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef otbNearestPointDisplacementFieldGenerator_hxx
#define otbNearestPointDisplacementFieldGenerator_hxx
#include "otbNearestPointDisplacementFieldGenerator.h"
#include "itkImageRegionIteratorWithIndex.h"
#include "otbMacro.h"
namespace otb
{
/** Main computation method */
template <class TPointSet, class TDisplacementField>
void
NearestPointDisplacementFieldGenerator<TPointSet, TDisplacementField>
::GenerateData(void)
{
DisplacementFieldPointerType outputPtr = this->GetOutput();
PixelType defaultValue(2);
defaultValue.Fill(this->GetDefaultValue());
outputPtr->Allocate();
outputPtr->FillBuffer(defaultValue);
typedef itk::ImageRegionIteratorWithIndex<DisplacementFieldType> IteratorType;
IteratorType it(outputPtr, outputPtr->GetRequestedRegion());
for (it.GoToBegin(); !it.IsAtEnd(); ++it)
{
IndexVectorType indexVector = this->GenerateNearestValidPointsPointSet(it.GetIndex(), 1);
PixelType p(2);
if (indexVector.size() >= 1)
{
p[0] = static_cast<ValueType>(this->GetPointSet()->GetPointData()->GetElement(indexVector[0])[1]);
p[1] = static_cast<ValueType>(this->GetPointSet()->GetPointData()->GetElement(indexVector[0])[2]);
}
else
{
p = defaultValue;
}
it.Set(p);
}
}
/**
* PrintSelf Method
*/
template <class TPointSet, class TDisplacementField>
void
NearestPointDisplacementFieldGenerator<TPointSet, TDisplacementField>
::PrintSelf(std::ostream& os, itk::Indent indent) const
{
Superclass::PrintSelf(os, indent);
}
} // End namespace otb
#endif
|
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/vyigrat-config.h"
#endif
#include <cstddef>
#if defined(HAVE_SYS_SELECT_H)
#include <sys/select.h>
#endif
extern "C" void* memcpy(void* a, const void* b, size_t c);
void* memcpy_int(void* a, const void* b, size_t c)
{
return memcpy(a, b, c);
}
namespace
{
// trigger: Use the memcpy_int wrapper which calls our internal memcpy.
// A direct call to memcpy may be optimized away by the compiler.
// test: Fill an array with a sequence of integers. memcpy to a new empty array.
// Verify that the arrays are equal. Use an odd size to decrease the odds of
// the call being optimized away.
template <unsigned int T>
bool sanity_test_memcpy()
{
unsigned int memcpy_test[T];
unsigned int memcpy_verify[T] = {};
for (unsigned int i = 0; i != T; ++i)
memcpy_test[i] = i;
memcpy_int(memcpy_verify, memcpy_test, sizeof(memcpy_test));
for (unsigned int i = 0; i != T; ++i) {
if (memcpy_verify[i] != i)
return false;
}
return true;
}
#if defined(HAVE_SYS_SELECT_H)
// trigger: Call FD_SET to trigger __fdelt_chk. FORTIFY_SOURCE must be defined
// as >0 and optimizations must be set to at least -O2.
// test: Add a file descriptor to an empty fd_set. Verify that it has been
// correctly added.
bool sanity_test_fdelt()
{
fd_set fds;
FD_ZERO(&fds);
FD_SET(0, &fds);
return FD_ISSET(0, &fds);
}
#endif
} // anon namespace
bool glibc_sanity_test()
{
#if defined(HAVE_SYS_SELECT_H)
if (!sanity_test_fdelt())
return false;
#endif
return sanity_test_memcpy<1025>();
}
|
//Problem Statement
// We are given that the string "abc" is valid.
//
// From any valid string V, we may split V into two pieces X and Y such that X + Y
// (X concatenated with Y) is equal to V. (X or Y may be empty.)
// Then, X + "abc" + Y is also valid.
//
// If for example S = "abc", then examples of valid strings are: "abc", "aabcbc", "abcabc", "abcabcababcc".
// Examples of invalid strings are: "abccba", "ab", "cababc", "bac".
//
// Return true if and only if the given string S is valid.
#include <bits/stdc++.h>
using namespace std;
class Solution {
public:
bool isValid(string S) {
stack<char> dfs;
for(auto c : S){
switch(c){
case 'a':
dfs.push('a');
break;
case 'b':
if(dfs.empty() || dfs.top()!='a')
return false;
dfs.pop();
dfs.push('b');
break;
case 'c':
if(dfs.empty() || dfs.top()!='b')
return false;
dfs.pop();
break;
}
}
return dfs.empty();
}
};
int main(){
string S = "abcabcababcc";
Solution sol;
cout<<sol.isValid(S)<<endl;
return 0;
}
|
//===--- UUID.h - UUID generation -------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This is an interface over the standard OSF uuid library that gives UUIDs
// sane m_value semantics and operators.
//
//===----------------------------------------------------------------------===//
// This source file is part of the polarphp.org open source project
//
// Copyright (c) 2017 - 2019 polarphp software foundation
// Copyright (c) 2017 - 2019 zzu_softboy <zzu_softboy@163.com>
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://polarphp.org/LICENSE.txt for license information
// See https://polarphp.org/CONTRIBUTORS.txt for the list of polarphp project authors
//
// Created by polarboy on 2019/02/13.
#include "polarphp/basic/Uuid.h"
// WIN32 doesn't natively support <uuid/uuid.h>. Instead, we use Win32 APIs.
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <objbase.h>
#include <string>
#include <algorithm>
#else
#include <uuid/uuid.h>
#endif
namespace polar {
UUID::UUID(FromRandom_t) {
#if defined(_WIN32)
::UUID uuid;
::CoCreateGuid(&uuid);
memcpy(Value, &uuid, Size);
#else
uuid_generate_random(Value);
#endif
}
UUID::UUID(FromTime_t) {
#if defined(_WIN32)
::UUID uuid;
::CoCreateGuid(&uuid);
memcpy(Value, &uuid, Size);
#else
uuid_generate_time(Value);
#endif
}
UUID::UUID() {
#if defined(_WIN32)
::UUID uuid = *((::UUID *)&Value);
UuidCreateNil(&uuid);
memcpy(Value, &uuid, Size);
#else
uuid_clear(Value);
#endif
}
Optional<UUID> UUID::fromString(const char *s) {
#if defined(_WIN32)
RPC_CSTR t = const_cast<RPC_CSTR>(reinterpret_cast<const unsigned char*>(s));
::UUID uuid;
RPC_STATUS status = UuidFromStringA(t, &uuid);
if (status == RPC_S_INVALID_STRING_UUID) {
return None;
}
UUID result = UUID();
memcpy(result.Value, &uuid, Size);
return result;
#else
UUID result;
if (uuid_parse(s, result.Value))
return None;
return result;
#endif
}
void UUID::toString(llvm::SmallVectorImpl<char> &out) const {
out.resize(UUID::StringBufferSize);
#if defined(_WIN32)
::UUID uuid;
memcpy(&uuid, Value, Size);
RPC_CSTR str;
UuidToStringA(&uuid, &str);
char* signedStr = reinterpret_cast<char*>(str);
memcpy(out.data(), signedStr, StringBufferSize);
std::transform(std::begin(out), std::end(out), std::begin(out), toupper);
#else
uuid_unparse_upper(Value, out.data());
#endif
// Pop off the null terminator.
assert(out.back() == '\0' && "did not null-terminate?!");
out.pop_back();
}
int UUID::compare(UUID y) const {
#if defined(_WIN32)
RPC_STATUS s;
::UUID uuid1;
memcpy(&uuid1, Value, Size);
::UUID uuid2;
memcpy(&uuid2, y.Value, Size);
return UuidCompare(&uuid1, &uuid2, &s);
#else
return uuid_compare(Value, y.Value);
#endif
}
raw_ostream &operator<<(raw_ostream &os, UUID uuid)
{
SmallString<UUID::StringBufferSize> buf;
uuid.toString(buf);
os << buf;
return os;
}
} // polar
|
/**
* @author Moe_Sakiya sakiya@tun.moe
* @date 2018-03-14 13:37:02
*
*/
#include <iostream>
#include <string>
#include <algorithm>
#include <set>
#include <map>
#include <vector>
#include <stack>
#include <queue>
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include <cmath>
using namespace std;
int main(void) {
ios::sync_with_stdio(false);
cin.tie(NULL);
int t, a, b;
cin >> t;
while (t--) {
cin >> a >> b;
if (a >= b)
cout << a * 2 + b << endl;
else
cout << b * 2 + a << endl;
}
return 0;
}
|
/*
License: BSD
https://raw.githubusercontent.com/samiamlabs/dyno/master/LICENCE
*/
#include <ros/ros.h>
#include <rosgraph_msgs/Clock.h>
#include <controller_manager/controller_manager.h>
#include "forklift.h"
int main(int argc, char **argv)
{
ros::init(argc, argv, "forklift_unity_interface");
ros::NodeHandle nh;
// This should be set in launch files
// as well
nh.setParam("/use_sim_time", true);
Forklift robot;
ROS_WARN_STREAM("period: " << robot.getPeriod().toSec());
controller_manager::ControllerManager cm(&robot, nh);
ros::Publisher clock_publisher = nh.advertise<rosgraph_msgs::Clock>("/clock", 1);
ros::AsyncSpinner spinner(1);
spinner.start();
boost::chrono::system_clock::time_point begin = boost::chrono::system_clock::now();
boost::chrono::system_clock::time_point end = boost::chrono::system_clock::now();
ros::Time internal_time(0);
const ros::Duration dt = robot.getPeriod();
double elapsed_secs = 0;
while(ros::ok())
{
begin = boost::chrono::system_clock::now();
robot.read();
cm.update(internal_time, dt);
robot.write();
end = boost::chrono::system_clock::now();
elapsed_secs = boost::chrono::duration_cast<boost::chrono::duration<double> >((end - begin)).count();
if (dt.toSec() - elapsed_secs < 0.0)
{
ROS_WARN_STREAM_THROTTLE(
0.1, "Control cycle is taking to much time, elapsed: " << elapsed_secs);
}
else
{
// ROS_DEBUG_STREAM_THROTTLE(1.0, "Control cycle is, elapsed: " << elapsed_secs);
usleep((dt.toSec() - elapsed_secs) * 1e6);
}
rosgraph_msgs::Clock clock;
clock.clock = ros::Time(internal_time);
clock_publisher.publish(clock);
internal_time += dt;
}
spinner.stop();
return 0;
}
|
/*BEGIN_LEGAL
Intel Open Source License
Copyright (c) 2002-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
the Intel Corporation nor the names of its contributors may be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
END_LEGAL */
#include "ialarm.H"
#include "alarm_manager.H"
#include "parse_control.H"
#include "control_chain.H"
#include "alarms.H"
#include <iostream>
using namespace CONTROLLER;
set<ADDRINT> IALARM::_thread_first_ip;
ADDRINT IALARM::_threads_first_ip_vec[PIN_MAX_THREADS];
IALARM::IALARM(UINT32 tid ,UINT64 count,BOOL need_ctxt,
ALARM_MANAGER* manager){
_tid = tid;
_target_count._count = count;
_need_context = need_ctxt;
_alarm_manager = manager;
memset(_thread_count,0,sizeof(_thread_count));
memset(_armed,0,sizeof(_armed));
_activate_late_handler = FALSE;
PIN_InitLock(&_lock);
static BOOL thread_callback_added = FALSE;
if (!thread_callback_added) {
PIN_CALLBACK thread_start = PIN_AddThreadStartFunction(ThreadStart, this);
// other tools working with the controller might do some initialization in their
// thread start callback.
// need to make sure we are been call AFTER all thread start callbacks were called.
CALLBACK_SetExecutionPriority(thread_start, CALL_ORDER_LAST);
}
}
VOID IALARM::InsertIfCall_Count(IALARM* alarm, INS ins, UINT32 ninst, IPOINT point){
INS_InsertIfCall(ins, point,
AFUNPTR(Count),
IARG_FAST_ANALYSIS_CALL,
IARG_CALL_ORDER, alarm->GetInstrumentOrder(),
IARG_ADDRINT, alarm,
IARG_THREAD_ID,
IARG_UINT32, ninst,
IARG_END);
}
VOID IALARM::InsertThenCall_Fire(IALARM* alarm, INS ins, IPOINT point){
if (alarm->_need_context){
INS_InsertThenCall(ins, point,
AFUNPTR(Fire),
IARG_CALL_ORDER, alarm->GetInstrumentOrder(),
IARG_ADDRINT, alarm,
IARG_CONTEXT,
IARG_INST_PTR,
IARG_THREAD_ID,
IARG_END);
}
else{
INS_InsertThenCall(ins, point,
AFUNPTR(Fire),
IARG_CALL_ORDER, alarm->GetInstrumentOrder(),
IARG_ADDRINT, alarm,
IARG_ADDRINT, static_cast<ADDRINT>(0), // pass a null as context,
IARG_INST_PTR,
IARG_THREAD_ID,
IARG_END);
}
}
// Insert late file instrumentation
VOID IALARM::Insert_LateInstrumentation(IALARM* alarm, INS ins){
// Check if late handler is set
if (!alarm->_alarm_manager->HasLateHandler())
return;
// Determine ipoint
IPOINT ipoint = IPOINT_AFTER;
if (INS_IsInterrupt(ins) || INS_IsSyscall(ins))
{
// We don't want the region of interest (in tracing)
// to include these instructions. Since they close
// the trace we can't take their next instruction,
// therefore don't deliver the late handler at all.
return;
}
if (INS_IsBranchOrCall(ins))
{
ipoint = IPOINT_TAKEN_BRANCH;
}
// Add if-then analysis routines
INS_InsertIfCall(ins, ipoint,
AFUNPTR(ActivateLate),
IARG_FAST_ANALYSIS_CALL,
IARG_CALL_ORDER, alarm->GetLateInstrumentOrder(),
IARG_ADDRINT, alarm,
IARG_THREAD_ID,
IARG_END);
INS_InsertThenCall(ins, ipoint,
AFUNPTR(LateFire),
IARG_CALL_ORDER, alarm->GetLateInstrumentOrder(),
IARG_ADDRINT, alarm,
IARG_CONST_CONTEXT,
IARG_INST_PTR,
IARG_THREAD_ID,
IARG_END);
}
ADDRINT PIN_FAST_ANALYSIS_CALL IALARM::Count(IALARM* ialarm,
UINT32 tid,
UINT32 ninst){
UINT32 armed = ialarm->_armed[tid];
UINT32 correct_tid = (ialarm->_tid == tid) | (ialarm->_tid == ALL_THREADS);
UINT32 should_count = armed & correct_tid;
//if we are not in the correct thread
ialarm->_thread_count[tid]._count += ninst*(should_count);
return should_count & (ialarm->_thread_count[tid]._count >= ialarm->_target_count._count);
}
//we want to generate the context only when we really need it.
//that is way most of the code is in the If instrumentation.
//even if the If instrumentation is be not inlined.
VOID IALARM::Fire(IALARM* ialarm, CONTEXT* ctxt, VOID * ip, UINT32 tid){
// Check if flags was not already modified by another thread
// in interactive controller
if (ialarm->_alarm_manager->GetAlarmTypeFromManager() == ALARM_TYPE_INTERACTIVE)
{
ALARM_INTERACTIVE* interactive_alarm = static_cast<ALARM_INTERACTIVE*>(ialarm);
if (!interactive_alarm->GetListener()->CheckClearSignal())
return;
}
// Check if we need to activate late handler
// We should not activate it whenever this is precondition event
if (ialarm->_alarm_manager->HasLateHandler() && ialarm->_alarm_manager->GetEventType() != EVENT_PRECOND)
ialarm->_activate_late_handler = TRUE;
ialarm->_alarm_manager->Fire(ctxt, ip, tid);
}
// Late fire event
ADDRINT PIN_FAST_ANALYSIS_CALL IALARM::ActivateLate(IALARM* ialarm, UINT32 tid){
BOOL correct_tid = (ialarm->_tid == tid) | (ialarm->_tid == ALL_THREADS);
return ialarm->_activate_late_handler & correct_tid;
}
VOID IALARM::LateFire(IALARM* ialarm, CONTEXT* ctxt, VOID * ip, UINT32 tid) {
BOOL activate_late_fire = FALSE;
// Check if the late handler flag is set under lock
PIN_GetLock(&ialarm->_lock,0);
if (ialarm->_activate_late_handler)
{
ialarm->_activate_late_handler = FALSE;
activate_late_fire = TRUE;
}
PIN_ReleaseLock(&ialarm->_lock);
// Activate late fire
if (activate_late_fire)
ialarm->_alarm_manager->LateFire(ctxt, ip, tid);
}
VOID IALARM::Arm(){
PIN_GetLock(&_lock,0);
memset(_armed,1,sizeof(_armed));
PIN_ReleaseLock(&_lock);
}
VOID IALARM::Disarm(THREADID tid){
_armed[tid] = 0;
_thread_count[tid]._count = 0;
}
VOID IALARM::Disarm(){
PIN_GetLock(&_lock,0);
memset(_armed,0,sizeof(_armed));
memset(_thread_count,0,sizeof(_thread_count));
PIN_ReleaseLock(&_lock);
}
UINT32 IALARM::GetInstrumentOrder(){
return _alarm_manager->GetInsOrder();
}
UINT32 IALARM::GetLateInstrumentOrder(){
return _alarm_manager->GetLateInsOrder();
}
VOID IALARM::TraceAddress(TRACE trace, VOID* v)
{
IALARM* ialarm = static_cast<IALARM*>(v);
ADDRINT trace_addr = TRACE_Address(trace);
UINT32 trace_size = TRACE_Size(trace);
for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl))
{
// Check Target
// Get the last instruction in the BBL
INS ins = BBL_InsTail(bbl);
// Handle direct branches or calls
if ( INS_IsDirectBranchOrCall(ins) )
{
// Get the target and compare it to the address we need
ADDRINT target = INS_DirectBranchOrCallTargetAddress(ins);
if (target == ialarm->_address)
{
InsertIfCall_Count(ialarm, ins, 1, IPOINT_TAKEN_BRANCH);
InsertThenCall_Fire(ialarm, ins, IPOINT_TAKEN_BRANCH);
// Add late handler instrumentation if needed
Insert_LateInstrumentation(ialarm,ins);
}
}
// Handle indirect branches or calls
else if ( INS_IsIndirectBranchOrCall(ins))
{
InsertIfCall_Target(ialarm, ins);
InsertThenCall_Fire(ialarm, ins, IPOINT_TAKEN_BRANCH);
// Add late handler instrumentation if needed
Insert_LateInstrumentation(ialarm,ins);
}
// If the address is not inside the trace then no need to check the
// instructions in the BBL
if (ialarm->_address < trace_addr || ialarm->_address > trace_addr+trace_size)
continue;
// Handle all other instructions in the BBL which may be before
// The instruction we are looking for
for (INS ins = BBL_InsHead(bbl); INS_Valid(ins); ins = INS_Next(ins))
{
// Compare the address of instructions itself
// This is to handle rare scenario of first ip in the thread
if (INS_Address(ins) == ialarm->_address &&
_thread_first_ip.find(ialarm->_address) != _thread_first_ip.end())
{
IPOINT ipoint = IPOINT_AFTER;
if (INS_IsBranchOrCall(ins))
{
ipoint = IPOINT_TAKEN_BRANCH;
}
InsertIfCall_FirstIp(ialarm, ins, ipoint);
InsertThenCall_Fire(ialarm, ins, ipoint);
// Add late handler instrumentation if needed
Insert_LateInstrumentation(ialarm,ins);
}
// Only relevant for instructions will fall through path
if (!INS_HasFallThrough(ins))
return;
// Compare the address of the next instruction to check if we
// encountered the address of the alarm
if (INS_NextAddress(ins) == ialarm->_address)
{
InsertIfCall_Count(ialarm, ins, 1, IPOINT_AFTER);
InsertThenCall_Fire(ialarm, ins, IPOINT_AFTER);
// Add late handler instrumentation if needed
Insert_LateInstrumentation(ialarm,ins);
}
}
}
}
// Instrumentation of indirect branch checking
VOID IALARM::InsertIfCall_Target(IALARM* alarm, INS ins){
INS_InsertIfCall(ins, IPOINT_TAKEN_BRANCH,
AFUNPTR(CheckTarget),
IARG_FAST_ANALYSIS_CALL,
IARG_CALL_ORDER, alarm->GetInstrumentOrder(),
IARG_PTR, alarm,
IARG_THREAD_ID,
IARG_BRANCH_TARGET_ADDR,
IARG_END);
}
// Instrumentation of first ip address checking
VOID IALARM::InsertIfCall_FirstIp(IALARM* alarm, INS ins, IPOINT point){
INS_InsertIfCall(ins, point,
AFUNPTR(CheckFirstIp),
IARG_FAST_ANALYSIS_CALL,
IARG_CALL_ORDER, alarm->GetInstrumentOrder(),
IARG_PTR, alarm,
IARG_THREAD_ID,
IARG_INST_PTR,
IARG_END);
}
// Check if we have reached the target we need
ADDRINT PIN_FAST_ANALYSIS_CALL IALARM::CheckTarget(IALARM* ialarm,
THREADID tid,
ADDRINT branch_target) {
UINT32 armed = ialarm->_armed[tid];
UINT32 correct_tid = (ialarm->_tid == tid) | (ialarm->_tid == ALL_THREADS);
UINT32 should_count = armed & correct_tid & (ialarm->_address == branch_target);
// Increment counter if needed
ialarm->_thread_count[tid]._count += should_count;
return should_count & (ialarm->_thread_count[tid]._count >= ialarm->_target_count._count);
}
// Check if we have reached the target we need
ADDRINT PIN_FAST_ANALYSIS_CALL IALARM::CheckFirstIp(IALARM* ialarm,
THREADID tid,
ADDRINT addr) {
UINT32 armed = ialarm->_armed[tid];
UINT32 correct_tid = (ialarm->_tid == tid) | (ialarm->_tid == ALL_THREADS);
UINT32 should_count = armed & correct_tid & (addr == _threads_first_ip_vec[tid]);
// Reset the vector value of this thread so that next time
// we will not count it and comparison to first ip will not return true.
_threads_first_ip_vec[tid] = 0;
// Increment counter if needed
ialarm->_thread_count[tid]._count += should_count;
return should_count & (ialarm->_thread_count[tid]._count >= ialarm->_target_count._count);
}
// Thread start callback
VOID IALARM::ThreadStart(THREADID tid, CONTEXT *ctxt, INT32 flags, VOID *v){
ADDRINT first_ip = PIN_GetContextReg(ctxt, REG_INST_PTR);
_thread_first_ip.insert(first_ip);
_threads_first_ip_vec[tid] = first_ip;
// this IP might be already instrumented, so we need to reset
// the instrumentations on it's BB.
PIN_RemoveInstrumentationInRange(first_ip, first_ip+15);
}
|
#include "SDLTypingHandler.hpp"
#include <SDL/SDL.h>
#include <string>
using namespace std;
void SDLTypingHandler::update(SDL_Event event)
{
if(SDL_EnableUNICODE(-1)) unicode_enabled_globally = true;
else {
unicode_enabled_globally = false;
SDL_EnableUNICODE(1);
}
if(event.type == SDL_KEYDOWN){
Uint16 key = event.key.keysym.unicode;
if(key == (Uint16)' ') inputted_string+=(char)key;
else if(key>=(Uint16)'!'&& key<=(Uint16)'~') inputted_string+=(char)key;
if(event.key.keysym.sym == SDLK_BACKSPACE&&inputted_string.length() != 0){
inputted_string.erase(inputted_string.length() - 1);
}
}
}
std::string SDLTypingHandler::return_string()
{
return inputted_string;
}
const char* SDLTypingHandler::return_c_string()
{
return inputted_string.c_str();
}
void SDLTypingHandler::clear()
{
if(!unicode_enabled_globally) SDL_EnableUNICODE(0);
inputted_string.clear();
}
|
#include "opentick.h"
#include "logger.h"
namespace opentrade {
struct OpenTickLogger : public opentick::Logger {
void Info(const std::string& msg) noexcept override { LOG_INFO(msg); }
void Error(const std::string& msg) noexcept override { LOG_ERROR(msg); }
};
void OpenTick::Initialize(const std::string& url) {
auto strs1 = Split(url, "@");
if (strs1.size() < 1) {
LOG_FATAL("Invalid opentick url '" << url << "'");
}
std::string dbname = "opentrade";
if (strs1.size() > 1) dbname = strs1[1];
auto strs2 = Split(strs1[0], ":");
if (strs2.size() < 1) {
LOG_FATAL("Invalid opentick url '" << url << "'");
}
auto port = 1116;
if (strs2.size() > 2) port = atol(strs2[1].c_str());
auto host = strs2[0];
conn_ = opentick::Connection::Create(host, port, dbname);
conn_->SetLogger(std::make_shared<OpenTickLogger>());
conn_->SetAutoReconnect(3);
conn_->Start();
}
opentick::ResultSet OpenTick::Request(Security::IdType sec, int interval,
time_t start_time, time_t end_time,
const std::string& tbl,
opentick::Callback callback) {
if (!conn_ || !conn_->IsConnected()) {
if (callback) callback({}, "OpenTick not connected");
return {};
}
try {
auto fut = conn_->ExecuteAsync(
"select time, open, high, low, close, volume from " + tbl +
" where sec=? and interval=? and time>=? and time<?",
opentick::Args{sec, interval, start_time, end_time}, callback);
if (!callback) return fut->Get();
} catch (std::exception& e) {
if (callback) callback({}, e.what());
}
return {};
}
} // namespace opentrade
|
// Copyright Aleksey Gurtovoy 2001-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#include <boost/mpl/joint_view.hpp>
#include <boost/mpl/range_c.hpp>
#include <boost/mpl/list.hpp>
#include <boost/mpl/equal.hpp>
#include <boost/mpl/size.hpp>
#include <boost/mpl/aux_/test.hpp>
MPL_TEST_CASE()
{
typedef joint_view<
range_c<int,0,10>
, range_c<int,10,15>
> numbers;
typedef range_c<int,0,15> answer;
MPL_ASSERT(( equal<numbers,answer> ));
MPL_ASSERT_RELATION( size<numbers>::value, ==, 15 );
}
template< typename View > struct test_is_empty
{
typedef typename begin<View>::type first_;
typedef typename end<View>::type last_;
MPL_ASSERT_RELATION( size<View>::value, ==, 0 );
MPL_ASSERT(( is_same< first_,last_> ));
MPL_ASSERT_INSTANTIATION( View );
MPL_ASSERT_INSTANTIATION( first_ );
MPL_ASSERT_INSTANTIATION( last_ );
};
MPL_TEST_CASE()
{
test_is_empty< joint_view< list0<>,list0<> > >();
test_is_empty< joint_view< list<>,list0<> > >();
test_is_empty< joint_view< list<>,list<> > >();
test_is_empty< joint_view< list<>, joint_view< list0<>,list0<> > > >();
}
|
// Copyright (c) 2018 The Emircoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <util/threadnames.h>
#include <test/setup_common.h>
#include <thread>
#include <vector>
#include <set>
#include <mutex>
#if defined(HAVE_CONFIG_H)
#include <config/emircoin-config.h>
#endif
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(util_threadnames_tests, BasicTestingSetup)
const std::string TEST_THREAD_NAME_BASE = "test_thread.";
/**
* Run a bunch of threads to all call util::ThreadRename.
*
* @return the set of name each thread has after attempted renaming.
*/
std::set<std::string> RenameEnMasse(int num_threads)
{
std::vector<std::thread> threads;
std::set<std::string> names;
std::mutex lock;
auto RenameThisThread = [&](int i) {
util::ThreadRename(TEST_THREAD_NAME_BASE + std::to_string(i));
std::lock_guard<std::mutex> guard(lock);
names.insert(util::ThreadGetInternalName());
};
for (int i = 0; i < num_threads; ++i) {
threads.push_back(std::thread(RenameThisThread, i));
}
for (std::thread& thread : threads) thread.join();
return names;
}
/**
* Rename a bunch of threads with the same basename (expect_multiple=true), ensuring suffixes are
* applied properly.
*/
BOOST_AUTO_TEST_CASE(util_threadnames_test_rename_threaded)
{
BOOST_CHECK_EQUAL(util::ThreadGetInternalName(), "");
#if !defined(HAVE_THREAD_LOCAL)
// This test doesn't apply to platforms where we don't have thread_local.
return;
#endif
std::set<std::string> names = RenameEnMasse(100);
BOOST_CHECK_EQUAL(names.size(), 100);
// Names "test_thread.[n]" should exist for n = [0, 99]
for (int i = 0; i < 100; ++i) {
BOOST_CHECK(names.find(TEST_THREAD_NAME_BASE + std::to_string(i)) != names.end());
}
}
BOOST_AUTO_TEST_SUITE_END()
|
/**
* Copyright 2004-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SamplingProfiler.h"
#include "TimerManager.h"
#include <abort_with_reason.h>
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <setjmp.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <unistd.h>
#include <chrono>
#include <random>
#include <string>
#include <fb/log.h>
#include <fbjni/fbjni.h>
#include <profilo/ExternalApi.h>
#include <profilo/profiler/ExternalTracer.h>
#include <profilo/profiler/JavaBaseTracer.h>
#include <profilo/profiler/Retcode.h>
#include <profilo/LogEntry.h>
#include <profilo/TraceProviders.h>
#include <profilo/util/common.h>
using namespace facebook::jni;
namespace facebook {
namespace profilo {
namespace profiler {
SamplingProfiler& SamplingProfiler::getInstance() {
//
// Despite the fact that this is accessed from a signal handler (this routine
// is not async-signal safe due to the initialization lock for this variable),
// this is safe. The first access will always be before the first access from
// a signal context, so the variable is guaranteed to be initialized by then.
//
static SamplingProfiler profiler;
return profiler;
}
void SamplingProfiler::FaultHandler(
int signum,
siginfo_t* siginfo,
void* ucontext) {
auto scope = SignalHandler::EnterHandler(signum);
if (!scope.IsEnabled()) {
scope.CallPreviousHandler(signum, siginfo, ucontext);
return;
}
ProfileState& state = ((SamplingProfiler*)scope.GetData())->state_;
uint64_t tid = threadID();
uint64_t targetBusyState = (tid << 16) | StackSlotState::BUSY_WITH_METADATA;
// Find the most recent slot occupied by this thread.
// This allows us to handle crashes during nested unwinding from
// the most inner one out.
int64_t max_time = -1;
int max_idx = -1;
for (int i = 0; i < MAX_STACKS_COUNT; i++) {
auto& slot = state.stacks[i];
if (slot.state.load() == targetBusyState && slot.time > max_time) {
max_time = slot.time;
max_idx = i;
}
}
if (max_idx >= 0) {
state.errSigCrashes.fetch_add(1);
scope.siglongjmp(state.stacks[max_idx].sig_jmp_buf, 1);
} else {
scope.CallPreviousHandler(signum, siginfo, ucontext);
}
}
void SamplingProfiler::maybeSignalReader() {
uint32_t prevSlotCounter = state_.fullSlotsCounter.fetch_add(1);
if ((prevSlotCounter + 1) % FLUSH_STACKS_COUNT == 0) {
int res = sem_post(&state_.slotsCounterSem);
if (res != 0) {
abort(); // Something went wrong
}
}
}
// Finds the next FREE slot and atomically sets its state to BUSY, so that
// the acquiring thread can safely write to it, and returns the index via
// <outSlot>. Returns true if a FREE slot was found, false otherwise.
bool getSlotIndex(ProfileState& state_, uint64_t tid, uint32_t& outSlot) {
auto slotIndex = state_.currentSlot.fetch_add(1);
for (int i = 0; i < MAX_STACKS_COUNT; i++) {
auto nextSlotIndex = (slotIndex + i) % MAX_STACKS_COUNT;
auto& slot = state_.stacks[nextSlotIndex];
uint64_t expected = StackSlotState::FREE;
uint64_t targetBusyState = (tid << 16) | StackSlotState::BUSY;
if (slot.state.compare_exchange_strong(expected, targetBusyState)) {
outSlot = nextSlotIndex;
slot.time = monotonicTime();
memset(&slot.sig_jmp_buf, 0, sizeof(slot.sig_jmp_buf));
expected = targetBusyState;
targetBusyState = (tid << 16) | StackSlotState::BUSY_WITH_METADATA;
if (!slot.state.compare_exchange_strong(expected, targetBusyState)) {
abortWithReason(
"Invariant violation - BUSY to BUSY_WITH_METADATA failed");
}
return true;
}
}
// We didn't find an empty slot, so bump our counter
state_.errSlotMisses.fetch_add(1);
return false;
}
void SamplingProfiler::UnwindStackHandler(
int signum,
siginfo_t* siginfo,
void* ucontext) {
auto scope = SignalHandler::EnterHandler(signum);
if (!scope.IsEnabled()) {
return;
}
SamplingProfiler& profiler = *(SamplingProfiler*)scope.GetData();
ProfileState& state = profiler.state_;
uint64_t tid = threadID();
uint64_t busyState = (tid << 16) | StackSlotState::BUSY_WITH_METADATA;
for (const auto& tracerEntry : state.tracersMap) {
auto tracerType = tracerEntry.first;
if (!(tracerType & state.currentTracers)) {
continue;
}
// The external tracer is frequently disabled, so fail fast here
// if that is the case
if (ExternalTracer::isExternalTracer(tracerType)) {
if (!static_cast<ExternalTracer*>(tracerEntry.second.get())
->isEnabled()) {
continue;
}
}
uint32_t slotIndex;
bool slot_found = getSlotIndex(state, tid, slotIndex);
if (!slot_found) {
// We're out of slots, no tracer is likely to succeed.
break;
}
auto& slot = state.stacks[slotIndex];
// Can finally occupy the slot
if (sigsetjmp(slot.sig_jmp_buf, 1) == 0) {
memset(slot.method_names, 0, sizeof(slot.method_names));
memset(slot.class_descriptors, 0, sizeof(slot.class_descriptors));
uint8_t ret{StackSlotState::FREE};
if (JavaBaseTracer::isJavaTracer(tracerType)) {
ret = reinterpret_cast<JavaBaseTracer*>(tracerEntry.second.get())
->collectJavaStack(
(ucontext_t*)ucontext,
slot.frames,
slot.method_names,
slot.class_descriptors,
slot.depth,
MAX_STACK_DEPTH);
} else {
ret = tracerEntry.second->collectStack(
(ucontext_t*)ucontext, slot.frames, slot.depth, MAX_STACK_DEPTH);
}
slot.profilerType = tracerType;
if (StackCollectionRetcode::STACK_OVERFLOW == ret) {
state.errStackOverflows.fetch_add(1);
}
// Ignore TRACER_DISABLED errors for now and free the slot.
// TODO T42938550
if (StackCollectionRetcode::TRACER_DISABLED == ret) {
if (!slot.state.compare_exchange_strong(
busyState, StackSlotState::FREE)) {
abortWithReason(
"Invariant violation - BUSY_WITH_METADATA to FREE failed");
}
continue;
}
auto nextSlotState = (tid << 16) | ret;
// In case if a Tracer class handles collection on it's own the slot is
// freed after the signal is processed.
if (ret == StackCollectionRetcode::IGNORE) {
nextSlotState = StackSlotState::FREE;
}
if (!slot.state.compare_exchange_strong(busyState, nextSlotState)) {
// Slot was overwritten by another thread.
// This is an ordering violation, so abort.
abortWithReason(
"Invariant violation - BUSY_WITH_METADATA to return code failed");
}
if (nextSlotState != StackSlotState::FREE) {
profiler.maybeSignalReader();
}
continue;
} else {
// We came from the longjmp in sigcatch_handler.
// Something must have crashed.
// Log the error information and bail out
slot.time = monotonicTime();
slot.profilerType = tracerType;
if (!slot.state.compare_exchange_strong(
busyState,
(tid << 16) | StackCollectionRetcode::SIGNAL_INTERRUPT)) {
abortWithReason(
"Invariant violation - BUSY_WITH_METADATA to SIGNAL_INTERRUPT failed");
}
break;
}
}
}
void SamplingProfiler::registerSignalHandlers() {
//
// Register a handler for SIGPROF.
//
// Also, register a handler for SIGSEGV and SIGBUS, so that we can safely
// jump away in the case of a crash in our SIGPROF handler.
//
signal_handlers_.sigprof =
&SignalHandler::Initialize(SIGPROF, UnwindStackHandler);
signal_handlers_.sigsegv = &SignalHandler::Initialize(SIGSEGV, FaultHandler);
signal_handlers_.sigbus = &SignalHandler::Initialize(SIGBUS, FaultHandler);
signal_handlers_.sigbus->SetData(this);
signal_handlers_.sigbus->Enable();
signal_handlers_.sigsegv->SetData(this);
signal_handlers_.sigsegv->Enable();
signal_handlers_.sigprof->SetData(this);
signal_handlers_.sigprof->Enable();
}
void SamplingProfiler::unregisterSignalHandlers() {
// There are multiple cases we need to worry about:
// a) currently executing profiling handlers
// b) pending profiling signals
// c) currently executing fault handlers
// d) pending fault signals
//
// Observe that fault handlers return to the profiling handler and
// are conceptually nested within them.
// PROF_ENTER
// FAULT_ENTER
// FAULT_LONGJMP
// PROF_EXIT
//
// By waiting for all profiling handlers to finish (which Disable
// does internally), we solve a), c), and d) (pending fault signals during a
// profiling signal means we won't exit the corresponding profiling handler
// until we've handled the fault).
//
// We solve b) by never unregistering our signal handler.
// Once registered, we will bail out on the HandlerScope::IsEnabled check and
// all will be well on the normal path.
//
signal_handlers_.sigprof->Disable();
signal_handlers_.sigbus->Disable();
signal_handlers_.sigsegv->Disable();
}
void SamplingProfiler::flushStackTraces(
std::unordered_set<uint64_t>& loggedFramesSet) {
int processedCount = 0;
auto& logger = *state_.logger;
for (size_t i = 0; i < MAX_STACKS_COUNT; i++) {
auto& slot = state_.stacks[i];
uint64_t slotStateCombo = slot.state.load();
uint16_t slotState = slotStateCombo & 0xffff;
if (slotState == StackSlotState::FREE ||
slotState == StackSlotState::BUSY ||
slotState == StackSlotState::BUSY_WITH_METADATA) {
continue;
}
// Ignore remains from a previous trace
if (slot.time > state_.profileStartTime) {
auto& tracer = state_.tracersMap[slot.profilerType];
auto tid = slotStateCombo >> 16;
if (StackCollectionRetcode::SUCCESS == slotState) {
tracer->flushStack(logger, slot.frames, slot.depth, tid, slot.time);
} else {
StackCollectionEntryConverter::logRetcode(
logger, slotState, tid, slot.time, slot.profilerType);
}
if (JavaBaseTracer::isJavaTracer(slot.profilerType)) {
for (int i = 0; i < slot.depth; i++) {
bool expectedResetState = true;
if (state_.resetFrameworkSymbols.compare_exchange_strong(
expectedResetState, false)) {
loggedFramesSet.clear();
}
if (loggedFramesSet.find(slot.frames[i]) == loggedFramesSet.end() &&
JavaBaseTracer::isFramework(slot.class_descriptors[i])) {
StandardEntry entry{};
entry.tid = tid;
entry.timestamp = slot.time;
entry.type = EntryType::JAVA_FRAME_NAME;
entry.extra = slot.frames[i];
int32_t id = logger.write(std::move(entry));
std::string full_name{slot.class_descriptors[i]};
full_name += slot.method_names[i];
logger.writeBytes(
EntryType::STRING_VALUE,
id,
(const uint8_t*)full_name.c_str(),
full_name.length());
}
// Mark the frame as "logged" or "visited" so that we don't do a
// string comparison for it next time, regardless of whether it was
// a framework frame or not
loggedFramesSet.insert(slot.frames[i]);
}
}
}
uint64_t expected = slotStateCombo;
// Release the slot
if (!slot.state.compare_exchange_strong(expected, StackSlotState::FREE)) {
// Slot was re-used in the middle of the processing by another thread.
// Aborting.
abort();
}
processedCount++;
}
}
void logProfilingErrAnnotation(
MultiBufferLogger& logger,
int32_t key,
uint16_t value) {
if (value == 0) {
return;
}
logger.write(StandardEntry{
.id = 0,
.type = EntryType::TRACE_ANNOTATION,
.timestamp = monotonicTime(),
.tid = threadID(),
.callid = key,
.matchid = 0,
.extra = value,
});
}
/**
* Initializes the profiler. Registers handler for custom defined SIGPROF
* symbol which will collect traces and inits thread/process ids
*/
bool SamplingProfiler::initialize(
MultiBufferLogger& logger,
int32_t available_tracers,
std::unordered_map<int32_t, std::shared_ptr<BaseTracer>> tracers) {
state_.processId = getpid();
state_.logger = &logger;
state_.availableTracers = available_tracers;
state_.tracersMap = std::move(tracers);
state_.timerManager.reset();
// Init semaphore for stacks flush to the Ring Buffer
int res = sem_init(&state_.slotsCounterSem, 0, 0);
if (res != 0) {
FBLOGV("Can not init slotsCounterSem semaphore: %s", strerror(errno));
errno = 0;
return false;
}
return true;
}
/**
* Called via JNI from CPUProfiler
*
* Must only be called if SamplingProfiler::startProfiling() returns true.
*
* Waits in a loop for semaphore wakeup and then flushes the current profiling
* stacks.
*/
void SamplingProfiler::loggerLoop() {
FBLOGV("Logger thread %d is going into the loop...", threadID());
int res = 0;
std::unordered_set<uint64_t> loggedFramesSet{};
do {
res = sem_wait(&state_.slotsCounterSem);
if (res == 0) {
flushStackTraces(loggedFramesSet);
}
} while (!state_.isLoggerLoopDone && (res == 0 || errno == EINTR));
FBLOGV("Logger thread is shutting down...");
}
bool SamplingProfiler::startProfilingTimers() {
FBLOGI("Starting profiling timers w/sample rate %d", state_.samplingRateMs);
state_.timerManager.reset(new TimerManager(
state_.threadDetectIntervalMs,
state_.samplingRateMs,
state_.wallClockModeEnabled,
state_.wallClockModeEnabled ? state_.whitelist : nullptr));
state_.timerManager->start();
return true;
}
bool SamplingProfiler::stopProfilingTimers() {
state_.timerManager->stop();
state_.timerManager.reset();
return true;
}
bool SamplingProfiler::startProfiling(
int requested_tracers,
int sampling_rate_ms,
int thread_detect_interval_ms,
bool wall_clock_mode_enabled) {
if (state_.isProfiling) {
throw std::logic_error("startProfiling called while already profiling");
}
state_.isProfiling = true;
FBLOGV("Start profiling");
registerSignalHandlers();
state_.profileStartTime = monotonicTime();
state_.currentTracers = state_.availableTracers & requested_tracers;
if (state_.currentTracers == 0) {
return false;
}
constexpr auto kMinThreadDetectIntervalMs = 7; // TODO_YM T63620953
if (thread_detect_interval_ms < kMinThreadDetectIntervalMs) {
thread_detect_interval_ms = kMinThreadDetectIntervalMs;
}
state_.samplingRateMs = sampling_rate_ms;
state_.wallClockModeEnabled = wall_clock_mode_enabled;
state_.threadDetectIntervalMs = thread_detect_interval_ms;
state_.isLoggerLoopDone = false;
for (const auto& tracerEntry : state_.tracersMap) {
if (tracerEntry.first & state_.currentTracers) {
tracerEntry.second->startTracing();
}
}
return startProfilingTimers();
}
/**
* Stop the profiler. Write collected stack traces to profilo
* The value to write will be a 64 bit <method_id, dex_number>.
* Unfortunately, DvmDex or DvmHeader doesn't contain a unique dex number that
* we could reuse. Until this is possibly written custom by redex, we'll use
* checksum for the dex identification which should collide rare.
*/
void SamplingProfiler::stopProfiling() {
if (!state_.isProfiling) {
throw std::logic_error("stopProfiling called while not profiling");
}
FBLOGV("Stopping profiling");
if (!stopProfilingTimers()) {
abort();
}
state_.isLoggerLoopDone.store(true);
int res = sem_post(&state_.slotsCounterSem);
if (res != 0) {
FBLOGV("Can not execute sem_post for logger thread");
errno = 0;
}
// Logging errors
logProfilingErrAnnotation(
*state_.logger,
QuickLogConstants::PROF_ERR_SIG_CRASHES,
state_.errSigCrashes);
logProfilingErrAnnotation(
*state_.logger,
QuickLogConstants::PROF_ERR_SLOT_MISSES,
state_.errSlotMisses);
logProfilingErrAnnotation(
*state_.logger,
QuickLogConstants::PROF_ERR_STACK_OVERFLOWS,
state_.errStackOverflows);
FBLOGV(
"Stack overflows = %d, Sig crashes = %d, Slot misses = %d",
state_.errStackOverflows.load(),
state_.errSigCrashes.load(),
state_.errSlotMisses.load());
state_.currentSlot = 0;
state_.errSigCrashes = 0;
state_.errSlotMisses = 0;
state_.errStackOverflows = 0;
for (const auto& tracerEntry : state_.tracersMap) {
if (tracerEntry.first & state_.currentTracers) {
tracerEntry.second->stopTracing();
}
}
unregisterSignalHandlers();
state_.isProfiling = false;
}
void SamplingProfiler::addToWhitelist(int targetThread) {
std::unique_lock<std::mutex> lock(state_.whitelist->whitelistedThreadsMtx);
state_.whitelist->whitelistedThreads.insert(
static_cast<int32_t>(targetThread));
}
void SamplingProfiler::removeFromWhitelist(int targetThread) {
std::unique_lock<std::mutex> lock(state_.whitelist->whitelistedThreadsMtx);
state_.whitelist->whitelistedThreads.erase(targetThread);
}
void SamplingProfiler::resetFrameworkNamesSet() {
// Let the logger loop know we should reset our cache of frames
state_.resetFrameworkSymbols.store(true);
}
} // namespace profiler
} // namespace profilo
} // namespace facebook
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Yiffcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "ui_interface.h"
#include "init.h"
#include "yiffcoinrpc.h"
#include <string>
static bool noui_ThreadSafeMessageBox(const std::string& message, const std::string& caption, unsigned int style)
{
std::string strCaption;
// Check for usage of predefined caption
switch (style) {
case CClientUIInterface::MSG_ERROR:
strCaption += _("Error");
break;
case CClientUIInterface::MSG_WARNING:
strCaption += _("Warning");
break;
case CClientUIInterface::MSG_INFORMATION:
strCaption += _("Information");
break;
default:
strCaption += caption; // Use supplied caption (can be empty)
}
printf("%s: %s\n", strCaption.c_str(), message.c_str());
fprintf(stderr, "%s: %s\n", strCaption.c_str(), message.c_str());
return false;
}
static bool noui_ThreadSafeAskFee(int64 /*nFeeRequired*/)
{
return true;
}
static void noui_InitMessage(const std::string &message)
{
printf("init message: %s\n", message.c_str());
}
void noui_connect()
{
// Connect yiffcoind signal handlers
uiInterface.ThreadSafeMessageBox.connect(noui_ThreadSafeMessageBox);
uiInterface.ThreadSafeAskFee.connect(noui_ThreadSafeAskFee);
uiInterface.InitMessage.connect(noui_InitMessage);
}
|
#include "GraphReader.h"
#include "Vertex.h"
#include "PreferenceList.h"
#include "Utils.h"
#include <cstdlib>
#include <iostream>
#include <stdexcept>
/// Lexer class defined here
Lexer::Lexer(std::istream& in)
: ch_(' '), lineno_(0), in_(in)
{
if (not in_) {
throw ReaderException("error reading input stream.");
}
}
void Lexer::read_character() {
ch_ = in_.get();
if (ch_ == '\n') {
++lineno_;
}
}
int Lexer::line_number() {
return lineno_;
}
Token Lexer::next_token() {
// skip whitespace
while (isspace(ch_)) {
read_character();
}
// skip comments
if (ch_ == '#') {
while (ch_ != '\n' and ch_ != EOF) {
read_character();
}
}
// skip newline and spaces
if (isspace(ch_)) { read_character(); return next_token(); }
if (ch_ == EOF) { return TOK_EOF; }
if (ch_ == ':') { read_character(); return TOK_COLON; }
if (ch_ == '@') { read_character(); return TOK_AT; }
if (ch_ == ',') { read_character(); return TOK_COMMA; }
if (ch_ == ';') { read_character(); return TOK_SEMICOLON; }
if (ch_ == '(') { read_character(); return TOK_LEFT_BRACE; }
if (ch_ == ')') { read_character(); return TOK_RIGHT_BRACE; }
// a directive or a string
if (isalnum(ch_)) {
lexeme_.clear();
while (isalnum(ch_) or ch_ == '+') {
lexeme_.push_back(static_cast<char>(ch_));
read_character();
}
if (lexeme_ == "End") return TOK_END;
if (lexeme_ == "PartitionA") return TOK_PARTITION_A;
if (lexeme_ == "PartitionB") return TOK_PARTITION_B;
if (lexeme_ == "PreferenceListsA") return TOK_PREF_LISTS_A;
if (lexeme_ == "PreferenceListsB") return TOK_PREF_LISTS_B;
return TOK_STRING;
}
// flag error, return the erraneous character
lexeme_.clear();
lexeme_.push_back(static_cast<char>(ch_));
read_character();
return TOK_ERROR;
}
std::string const& Lexer::get_lexeme() const {
return lexeme_;
}
/// GraphReader class defined here
GraphReader::GraphReader(std::istream& in) {
lexer_ = std::make_unique<Lexer>(in);
consume(); // read first token
}
void GraphReader::consume() {
curtok_ = lexer_->next_token();
}
void GraphReader::match(Token expected) {
if (curtok_ != expected) {
//std::cout << "Line " << lexer_->line_number() << error_message("invalid data in file", curtok_, { expected }) << "\n";
//consume();
throw ReaderException(error_message("invalid data in file", curtok_, {expected}));
} else {
consume(); // skip the token
}
}
const char* GraphReader::error_message(const char* prefix, Token got, const std::vector<Token>& expected) {
static char buf[1024];
int ret = snprintf(buf, sizeof buf, "%d: %s", lexer_->line_number(), prefix);
ret += snprintf(buf + ret, sizeof buf, ", got: '%s', expected one of: {", token_to_string(got));
for (std::vector<Token>::size_type i = 0; i < expected.size(); ++i) {
// token description
ret += snprintf(buf + ret, sizeof buf, "'%s'", token_to_string(expected[i]));
// pretty printing
ret += snprintf(buf + ret, sizeof buf, "%s", (i == expected.size() - 1) ? "}" : ", ");
}
return buf;
}
/// partition are of the format
/// @Partition (A|B)
/// a, b, c ;
/// @End
void GraphReader::read_partition(BipartiteGraph::ContainerType& vmap) {
// read the vertices in the partion
while (curtok_ != TOK_SEMICOLON) {
std::string v = lexer_->get_lexeme();
// if there is already a vertex with id v
if (vmap.find(v) != vmap.end()) {
error_occurred = true;
std::cout <<"Line " <<lexer_->line_number() <<": Duplicate vertex : "<<v<<"\n";
}
int lower_quota = 0, upper_quota = 1;
match(TOK_STRING);
// does this vertex specify quota(s)
// (upper) or (lower, upper)
if (curtok_ == TOK_LEFT_BRACE) {
// eat '('
match(TOK_LEFT_BRACE);
// if quota is not given as positive integer or 0
if (!is_number(lexer_->get_lexeme())) {
error_occurred = true;
std::cout << "Line " << lexer_->line_number() << ": Expected number for quota for vertex : " << v << "\n";
}
// read the upper quota
upper_quota = to_integer(lexer_->get_lexeme());
match(TOK_STRING);
// check if this vertex has a lower quota as well
if (curtok_ == TOK_COMMA) {
match(TOK_COMMA);
// the quota read first was the lower quota
lower_quota = upper_quota;
// if quota is not given as positive integer or 0
if (!is_number(lexer_->get_lexeme())) {
std::cout << "Line " << lexer_->line_number() << ": Expected number for quota for vertex : " << v << "\n";
}
upper_quota = to_integer(lexer_->get_lexeme());
match(TOK_STRING);
}
// eat ')'
match(TOK_RIGHT_BRACE);
}
// if lower quota is greater than upper quota
if (lower_quota > upper_quota) {
error_occurred = true;
std::cout << "Line " << lexer_->line_number() << ": Lower quota cannot greater than Upper quota for vertex " << v << "\n";
}
// add this vertex with the required quotas
vmap.emplace(v, std::make_shared<Vertex>(v, lower_quota, upper_quota));
// if there are more vertices, they must
// be delimited using commas
if (curtok_ != TOK_SEMICOLON) {
match(TOK_COMMA);
}
}
// list should be delimited by a semicolon
match(TOK_SEMICOLON);
// end of this directive
match(TOK_AT);
match(TOK_END);
}
/// @PreferenceLists (A|B)
/// preference lists
/// @End
/// preference lists for a vertex are given in this format
/// v: a, b, c ;
/// we read the preference lists for vertices in partition A
/// and lookup the other side in partition B
void GraphReader::read_preference_lists(BipartiteGraph::ContainerType& A, BipartiteGraph::ContainerType& B) {
// read the lists
while (curtok_ != TOK_AT) {
// read the vertex for which the preference list is given
std::string a = lexer_->get_lexeme();
// if there is no vertex with that id
if (A.find(a) == A.end()) {
error_occurred = true;
std::cout << "Line " << lexer_->line_number() << ": Vertex not found: " << a << "\n";
match(TOK_STRING);
match(TOK_COLON);
while (curtok_ != TOK_SEMICOLON && curtok_ != TOK_EOF) {
consume();
}
if (curtok_ == TOK_EOF)return;
match(TOK_SEMICOLON);
continue;
}
match(TOK_STRING);
match(TOK_COLON); // skip the colon
// read and store the preference list
PreferenceList& pref_list = A[a]->get_preference_list();
while (curtok_ != TOK_SEMICOLON) {
std::string b = lexer_->get_lexeme();
match(TOK_STRING);
// if there is no vertex with that id
if (B.find(b) == B.end()) {
error_occurred = true;
std::cout << "Line " << lexer_->line_number() << ": Vertex not found: " << b << "\n";
if (curtok_ != TOK_SEMICOLON) {
match(TOK_COMMA);
}
continue;
}
// if b is already present in a's pref list
if (pref_list.find(B[b]) != pref_list.cend()) {
error_occurred = true;
std::cout << "Line " << lexer_->line_number() << ": Vertex " << b << " is inserted multiple times in " << a << "'s preference list\n";
}
pref_list.emplace_back(B[b]);
// if there are more vertices, they must
// be delimited using commas
if (curtok_ != TOK_SEMICOLON) {
match(TOK_COMMA);
}
}
// preference list should be delimited by a semicolon
match(TOK_SEMICOLON);
}
// directive should be properly terminated
match(TOK_AT);
match(TOK_END);
}
void GraphReader::handle_partition(BipartiteGraph::ContainerType& A, BipartiteGraph::ContainerType& B) {
if (curtok_ == TOK_PARTITION_A) {
match(TOK_PARTITION_A);
read_partition(A);
} else if(curtok_ == TOK_PARTITION_B) {
match(TOK_PARTITION_B);
read_partition(B);
} else {
throw ReaderException(error_message("", curtok_, {TOK_PARTITION_A, TOK_PARTITION_B}));
}
}
void GraphReader::handle_preference_lists(BipartiteGraph::ContainerType& A, BipartiteGraph::ContainerType& B) {
if (curtok_ == TOK_PREF_LISTS_A) {
match(TOK_PREF_LISTS_A);
read_preference_lists(A, B);
} else if(curtok_ == TOK_PREF_LISTS_B) {
match(TOK_PREF_LISTS_B);
read_preference_lists(B, A);
} else {
throw ReaderException(error_message("", curtok_, {TOK_PREF_LISTS_A, TOK_PREF_LISTS_B}));
}
}
std::shared_ptr<BipartiteGraph> GraphReader::read_graph() {
BipartiteGraph::ContainerType A, B;
// read the partitions
match(TOK_AT);
Token partition = curtok_;
handle_partition(A, B);
match(TOK_AT);
// shouldn't have partition listed twice
if (curtok_ != partition) {
handle_partition(A, B);
} else {
throw ReaderException(error_message("duplicate partition listing", curtok_,
{(partition == TOK_PARTITION_A) ? TOK_PARTITION_B : TOK_PARTITION_A}));
}
// read the preference lists
match(TOK_AT);
Token pref_lists = curtok_;
handle_preference_lists(A, B);
match(TOK_AT);
// shouldn't have preference lists twice
if (curtok_ != pref_lists) {
handle_preference_lists(A, B);
} else {
throw ReaderException(error_message("duplicate preference listing", curtok_,
{(pref_lists == TOK_PREF_LISTS_A) ? TOK_PREF_LISTS_B : TOK_PREF_LISTS_A}));
}
if (error_occurred) {
return NULL;
}
return std::make_shared<BipartiteGraph>(A, B);
}
|
/*
* Copyright 2019 Xilinx, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/xf_headers.hpp"
#include "xcl2.hpp"
#include "xf_gaussian_diff_config.h"
int main(int argc, char** argv) {
if (argc != 2) {
fprintf(stderr, "Usage: %s <INPUT IMAGE PATH 1>\n", argv[0]);
return EXIT_FAILURE;
}
cv::Mat out_img, ocv_ref, in_gray, diff;
// Reading in the image:
in_gray = cv::imread(argv[1], 0);
if (!in_gray.data) {
fprintf(stderr, "ERROR: Cannot open image %s\n ", argv[1]);
return EXIT_FAILURE;
}
// Create memory for output image
ocv_ref.create(in_gray.rows, in_gray.cols, in_gray.depth());
out_img.create(in_gray.rows, in_gray.cols, in_gray.depth());
#if FILTER_WIDTH == 3
float sigma = 0.5f;
#endif
#if FILTER_WIDTH == 7
float sigma = 1.16666f;
#endif
#if FILTER_WIDTH == 5
float sigma = 0.8333f;
#endif
// OpenCL section:
size_t image_in_size_bytes = in_gray.rows * in_gray.cols * sizeof(unsigned char);
size_t image_out_size_bytes = image_in_size_bytes;
cl_int err;
std::cout << "INFO: Running OpenCL section." << std::endl;
int rows = in_gray.rows;
int cols = in_gray.cols;
// Get the device:
std::vector<cl::Device> devices = xcl::get_xil_devices();
cl::Device device = devices[0];
// Context, command queue and device name:
OCL_CHECK(err, cl::Context context(device, NULL, NULL, NULL, &err));
OCL_CHECK(err, cl::CommandQueue queue(context, device, CL_QUEUE_PROFILING_ENABLE, &err));
OCL_CHECK(err, std::string device_name = device.getInfo<CL_DEVICE_NAME>(&err));
std::cout << "INFO: Device found - " << device_name << std::endl;
// Load binary:
unsigned fileBufSize;
std::string binaryFile = xcl::find_binary_file(device_name, "krnl_gaussiandifference");
cl::Program::Binaries bins = xcl::import_binary_file(binaryFile);
devices.resize(1);
OCL_CHECK(err, cl::Program program(context, devices, bins, NULL, &err));
// Create a kernel:
OCL_CHECK(err, cl::Kernel kernel(program, "gaussiandiference", &err));
// Allocate the buffers:
OCL_CHECK(err, cl::Buffer buffer_inImage(context, CL_MEM_READ_ONLY, image_in_size_bytes, NULL, &err));
OCL_CHECK(err, cl::Buffer buffer_outImage(context, CL_MEM_WRITE_ONLY, image_out_size_bytes, NULL, &err));
// Set kernel arguments:
OCL_CHECK(err, err = kernel.setArg(0, buffer_inImage));
OCL_CHECK(err, err = kernel.setArg(1, sigma));
OCL_CHECK(err, err = kernel.setArg(2, buffer_outImage));
OCL_CHECK(err, err = kernel.setArg(3, rows));
OCL_CHECK(err, err = kernel.setArg(4, cols));
// Initialize the buffers:
cl::Event event;
OCL_CHECK(err, queue.enqueueWriteBuffer(buffer_inImage, // buffer on the FPGA
CL_TRUE, // blocking call
0, // buffer offset in bytes
image_in_size_bytes, // Size in bytes
in_gray.data, // Pointer to the data to copy
nullptr, &event));
// Execute the kernel:
OCL_CHECK(err, err = queue.enqueueTask(kernel));
// Copy Result from Device Global Memory to Host Local Memory
queue.enqueueReadBuffer(buffer_outImage, // This buffers data will be read
CL_TRUE, // blocking call
0, // offset
image_out_size_bytes,
out_img.data, // Data will be stored here
nullptr, &event);
// Clean up:
queue.finish();
// Write the output of kernel:
cv::imwrite("output_hls.png", out_img);
std::cout << "Test Passed " << std::endl;
return 0;
}
|
#include <simpleble_c/simpleble.h>
#include <cstdlib>
void simpleble_free(void* handle) {
free(handle);
}
|
// Copyright (c) 2012-2016 The Bitcoin Core developers
// Copyright (c) 2017 The Sucrecoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <boost/test/unit_test.hpp>
#include "cuckoocache.h"
#include "script/sigcache.h"
#include "test/test_sucrecoin.h"
#include "random.h"
#include <thread>
/** Test Suite for CuckooCache
*
* 1) All tests should have a deterministic result (using insecure rand
* with deterministic seeds)
* 2) Some test methods are templated to allow for easier testing
* against new versions / comparing
* 3) Results should be treated as a regression test, i.e., did the behavior
* change significantly from what was expected. This can be OK, depending on
* the nature of the change, but requires updating the tests to reflect the new
* expected behavior. For example improving the hit rate may cause some tests
* using BOOST_CHECK_CLOSE to fail.
*
*/
FastRandomContext local_rand_ctx(true);
BOOST_AUTO_TEST_SUITE(cuckoocache_tests);
/** insecure_GetRandHash fills in a uint256 from local_rand_ctx
*/
void insecure_GetRandHash(uint256& t)
{
uint32_t* ptr = (uint32_t*)t.begin();
for (uint8_t j = 0; j < 8; ++j)
*(ptr++) = local_rand_ctx.rand32();
}
/* Test that no values not inserted into the cache are read out of it.
*
* There are no repeats in the first 200000 insecure_GetRandHash calls
*/
BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
{
local_rand_ctx = FastRandomContext(true);
CuckooCache::cache<uint256, SignatureCacheHasher> cc{};
size_t megabytes = 4;
cc.setup_bytes(megabytes << 20);
uint256 v;
for (int x = 0; x < 100000; ++x) {
insecure_GetRandHash(v);
cc.insert(v);
}
for (int x = 0; x < 100000; ++x) {
insecure_GetRandHash(v);
BOOST_CHECK(!cc.contains(v, false));
}
};
/** This helper returns the hit rate when megabytes*load worth of entries are
* inserted into a megabytes sized cache
*/
template <typename Cache>
double test_cache(size_t megabytes, double load)
{
local_rand_ctx = FastRandomContext(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
set.setup_bytes(bytes);
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
hashes.resize(n_insert);
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
*(ptr++) = local_rand_ctx.rand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
* "future proofed".
*/
std::vector<uint256> hashes_insert_copy = hashes;
/** Do the insert */
for (uint256& h : hashes_insert_copy)
set.insert(h);
/** Count the hits */
uint32_t count = 0;
for (uint256& h : hashes)
count += set.contains(h, false);
double hit_rate = ((double)count) / ((double)n_insert);
return hit_rate;
}
/** The normalized hit rate for a given load.
*
* The semantics are a little confusing, so please see the below
* explanation.
*
* Examples:
*
* 1) at load 0.5, we expect a perfect hit rate, so we multiply by
* 1.0
* 2) at load 2.0, we expect to see half the entries, so a perfect hit rate
* would be 0.5. Therefore, if we see a hit rate of 0.4, 0.4*2.0 = 0.8 is the
* normalized hit rate.
*
* This is basically the right semantics, but has a bit of a glitch depending on
* how you measure around load 1.0 as after load 1.0 your normalized hit rate
* becomes effectively perfect, ignoring freshness.
*/
double normalize_hit_rate(double hits, double load)
{
return hits * std::max(load, 1.0);
}
/** Check the hit rate on loads ranging from 0.1 to 2.0 */
BOOST_AUTO_TEST_CASE(cuckoocache_hit_rate_ok)
{
/** Arbitrarily selected Hit Rate threshold that happens to work for this test
* as a lower bound on performance.
*/
double HitRateThresh = 0.98;
size_t megabytes = 4;
for (double load = 0.1; load < 2; load *= 2) {
double hits = test_cache<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes, load);
BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh);
}
}
/** This helper checks that erased elements are preferentially inserted onto and
* that the hit rate of "fresher" keys is reasonable*/
template <typename Cache>
void test_cache_erase(size_t megabytes)
{
double load = 1;
local_rand_ctx = FastRandomContext(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
set.setup_bytes(bytes);
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
hashes.resize(n_insert);
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
*(ptr++) = local_rand_ctx.rand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
* "future proofed".
*/
std::vector<uint256> hashes_insert_copy = hashes;
/** Insert the first half */
for (uint32_t i = 0; i < (n_insert / 2); ++i)
set.insert(hashes_insert_copy[i]);
/** Erase the first quarter */
for (uint32_t i = 0; i < (n_insert / 4); ++i)
set.contains(hashes[i], true);
/** Insert the second half */
for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
set.insert(hashes_insert_copy[i]);
/** elements that we marked erased but that are still there */
size_t count_erased_but_contained = 0;
/** elements that we did not erase but are older */
size_t count_stale = 0;
/** elements that were most recently inserted */
size_t count_fresh = 0;
for (uint32_t i = 0; i < (n_insert / 4); ++i)
count_erased_but_contained += set.contains(hashes[i], false);
for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
count_stale += set.contains(hashes[i], false);
for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
count_fresh += set.contains(hashes[i], false);
double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
// Check that our hit_rate_fresh is perfect
BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
// Check that we have a more than 2x better hit rate on stale elements than
// erased elements.
BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
}
BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok)
{
size_t megabytes = 4;
test_cache_erase<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes);
}
template <typename Cache>
void test_cache_erase_parallel(size_t megabytes)
{
double load = 1;
local_rand_ctx = FastRandomContext(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
set.setup_bytes(bytes);
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
hashes.resize(n_insert);
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
*(ptr++) = local_rand_ctx.rand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
* "future proofed".
*/
std::vector<uint256> hashes_insert_copy = hashes;
boost::shared_mutex mtx;
{
/** Grab lock to make sure we release inserts */
boost::unique_lock<boost::shared_mutex> l(mtx);
/** Insert the first half */
for (uint32_t i = 0; i < (n_insert / 2); ++i)
set.insert(hashes_insert_copy[i]);
}
/** Spin up 3 threads to run contains with erase.
*/
std::vector<std::thread> threads;
/** Erase the first quarter */
for (uint32_t x = 0; x < 3; ++x)
/** Each thread is emplaced with x copy-by-value
*/
threads.emplace_back([&, x] {
boost::shared_lock<boost::shared_mutex> l(mtx);
size_t ntodo = (n_insert/4)/3;
size_t start = ntodo*x;
size_t end = ntodo*(x+1);
for (uint32_t i = start; i < end; ++i)
set.contains(hashes[i], true);
});
/** Wait for all threads to finish
*/
for (std::thread& t : threads)
t.join();
/** Grab lock to make sure we observe erases */
boost::unique_lock<boost::shared_mutex> l(mtx);
/** Insert the second half */
for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
set.insert(hashes_insert_copy[i]);
/** elements that we marked erased but that are still there */
size_t count_erased_but_contained = 0;
/** elements that we did not erase but are older */
size_t count_stale = 0;
/** elements that were most recently inserted */
size_t count_fresh = 0;
for (uint32_t i = 0; i < (n_insert / 4); ++i)
count_erased_but_contained += set.contains(hashes[i], false);
for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
count_stale += set.contains(hashes[i], false);
for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
count_fresh += set.contains(hashes[i], false);
double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
// Check that our hit_rate_fresh is perfect
BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
// Check that we have a more than 2x better hit rate on stale elements than
// erased elements.
BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
}
BOOST_AUTO_TEST_CASE(cuckoocache_erase_parallel_ok)
{
size_t megabytes = 4;
test_cache_erase_parallel<CuckooCache::cache<uint256, SignatureCacheHasher>>(megabytes);
}
template <typename Cache>
void test_cache_generations()
{
// This test checks that for a simulation of network activity, the fresh hit
// rate is never below 99%, and the number of times that it is worse than
// 99.9% are less than 1% of the time.
double min_hit_rate = 0.99;
double tight_hit_rate = 0.999;
double max_rate_less_than_tight_hit_rate = 0.01;
// A cache that meets this specification is therefore shown to have a hit
// rate of at least tight_hit_rate * (1 - max_rate_less_than_tight_hit_rate) +
// min_hit_rate*max_rate_less_than_tight_hit_rate = 0.999*99%+0.99*1% == 99.89%
// hit rate with low variance.
// We use deterministic values, but this test has also passed on many
// iterations with non-deterministic values, so it isn't "overfit" to the
// specific entropy in FastRandomContext(true) and implementation of the
// cache.
local_rand_ctx = FastRandomContext(true);
// block_activity models a chunk of network activity. n_insert elements are
// adde to the cache. The first and last n/4 are stored for removal later
// and the middle n/2 are not stored. This models a network which uses half
// the signatures of recently (since the last block) added transactions
// immediately and never uses the other half.
struct block_activity {
std::vector<uint256> reads;
block_activity(uint32_t n_insert, Cache& c) : reads()
{
std::vector<uint256> inserts;
inserts.resize(n_insert);
reads.reserve(n_insert / 2);
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)inserts[i].begin();
for (uint8_t j = 0; j < 8; ++j)
*(ptr++) = local_rand_ctx.rand32();
}
for (uint32_t i = 0; i < n_insert / 4; ++i)
reads.push_back(inserts[i]);
for (uint32_t i = n_insert - (n_insert / 4); i < n_insert; ++i)
reads.push_back(inserts[i]);
for (auto h : inserts)
c.insert(h);
}
};
const uint32_t BLOCK_SIZE = 1000;
// We expect window size 60 to perform reasonably given that each epoch
// stores 45% of the cache size (~472k).
const uint32_t WINDOW_SIZE = 60;
const uint32_t POP_AMOUNT = (BLOCK_SIZE / WINDOW_SIZE) / 2;
const double load = 10;
const size_t megabytes = 4;
const size_t bytes = megabytes * (1 << 20);
const uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
std::vector<block_activity> hashes;
Cache set{};
set.setup_bytes(bytes);
hashes.reserve(n_insert / BLOCK_SIZE);
std::deque<block_activity> last_few;
uint32_t out_of_tight_tolerance = 0;
uint32_t total = n_insert / BLOCK_SIZE;
// we use the deque last_few to model a sliding window of blocks. at each
// step, each of the last WINDOW_SIZE block_activities checks the cache for
// POP_AMOUNT of the hashes that they inserted, and marks these erased.
for (uint32_t i = 0; i < total; ++i) {
if (last_few.size() == WINDOW_SIZE)
last_few.pop_front();
last_few.emplace_back(BLOCK_SIZE, set);
uint32_t count = 0;
for (auto& act : last_few)
for (uint32_t k = 0; k < POP_AMOUNT; ++k) {
count += set.contains(act.reads.back(), true);
act.reads.pop_back();
}
// We use last_few.size() rather than WINDOW_SIZE for the correct
// behavior on the first WINDOW_SIZE iterations where the deque is not
// full yet.
double hit = (double(count)) / (last_few.size() * POP_AMOUNT);
// Loose Check that hit rate is above min_hit_rate
BOOST_CHECK(hit > min_hit_rate);
// Tighter check, count number of times we are less than tight_hit_rate
// (and implicitly, greater than min_hit_rate)
out_of_tight_tolerance += hit < tight_hit_rate;
}
// Check that being out of tolerance happens less than
// max_rate_less_than_tight_hit_rate of the time
BOOST_CHECK(double(out_of_tight_tolerance) / double(total) < max_rate_less_than_tight_hit_rate);
}
BOOST_AUTO_TEST_CASE(cuckoocache_generations)
{
test_cache_generations<CuckooCache::cache<uint256, SignatureCacheHasher>>();
}
BOOST_AUTO_TEST_SUITE_END();
|
//
// Copyright (c) 2019 Maxime Pinard
//
// Distributed under the MIT license
// See accompanying file LICENSE or copy at
// https://opensource.org/licenses/MIT
//
#include <sul/dynamic_bitset.hpp>
#include <iostream>
int main()
{
std::cout << std::boolalpha;
// declare bitset with 12 bits from a value
sul::dynamic_bitset<uint32_t> a(12, 0b0100010110111);
// remove all bits, resize to 0
a.clear();
// add a bit at 1 to the end of the bitset
a.push_back(true);
// remove last bit if not empty
a.pop_back();
// append a full block to the bitset (including the leftmost leading 0s)
// in this case append 32 bits
a.append(314153u);
// same as above with multiple blocks
a.append({112u, 5146u, 546u});
// resize the bitset, keep 12 first bits, discard others
a.resize(12);
// set bits 3 to 7 to 1
a.set(3, 4, true);
// set bit 2 to 0
a.set(2, false);
// set bit 1 to 1
a.set(1);
// set all bits to 1
a.set();
// reset bits 4 to 9 to 0
a.reset(4, 5);
// reset bit 8 to 0
a.reset(8);
// reset all bits to 0
a.reset();
// flip bits 5 to 8
a.flip(5, 3);
// flip bit 2
a.flip(2);
// flip all bits
a.flip();
std::cout << "a = " << a << std::endl;
// test the value of bit 2
std::cout << "Bit 2 is on? " << a.test(2) << std::endl;
// test bit 4 and set it to 0
std::cout << "Bit 4 is on? " << a.test_set(4, false) << " (set it to 0)" << std::endl;
std::cout << "a = " << a << std::endl;
// test bit 7 and set it to 1
std::cout << "Bit 7 is on? " << a.test_set(7) << " (set it to 1)" << std::endl;
std::cout << "a = " << a << std::endl;
// test if the bitset is empty
std::cout << "Is the bitset empty? " << a.empty() << std::endl;
// get bitset size
std::cout << "Bitset size: " << a.size() << std::endl;
// test if all bits are 1
std::cout << "All bits are on? " << a.all() << std::endl;
// test if there is at least one bit at 1
std::cout << "Any bits are on? " << a.any() << std::endl;
// test if all bits are 0
std::cout << "All bits are off? " << a.none() << std::endl;
// count bits to 1
std::cout << "Number of bits on: " << a.count() << std::endl;
// get number of blocks used by the bitset
std::cout << "Number of blocks used by the bitset: " << a.num_blocks() << std::endl;
// get bitset capacity
std::cout << "Bitset capacity: " << a.capacity() << std::endl;
// find position of the first bit to 1
const size_t pos = a.find_first();
std::cout << "First bit on position: " << pos << std::endl;
// find position of the next bit to 1
std::cout << "Second bit on position: " << a.find_next(pos) << std::endl;
// conversion to string with . and *
std::cout << "String representation (. and *): " << a.to_string('.', '*') << std::endl;
// conversion to string with 0 and 1
std::cout << "String representation (0 and 1): " << a.to_string() << std::endl;
// iterate on bits on
std::cout << "Bits on: ";
a.iterate_bits_on([](size_t bit_pos) noexcept { std::cout << bit_pos << ' '; });
std::cout << std::endl;
// (it is possible to pass parameters and return a 'continue' bool)
size_t bit_counter = 0;
std::cout << "3 first bits on: ";
a.iterate_bits_on(
[&bit_counter](size_t bit_pos, size_t limit) noexcept {
std::cout << bit_pos << ' ';
return ++bit_counter < limit;
},
3);
std::cout << std::endl;
// reserve 64 bits
a.reserve(64);
// requests the removal of unused capacity
a.shrink_to_fit();
// get allocator
const sul::dynamic_bitset<uint32_t>::allocator_type allocator = a.get_allocator();
// get pointer to the underlying array of blocks
const uint32_t* data = a.data();
std::cout << "address of the underlying array of blocks: " << data << std::endl;
// declare bitset from string
sul::dynamic_bitset<uint32_t> b("011001010101");
std::cout << "b = " << b << std::endl;
// determine if a bitset is a subset of another bitset
// (if it only contain bits from the other bitset)
std::cout << "a is a subset of b? " << a.is_subset_of(b) << std::endl;
// determine if a bitset is a proper subset of another bitset
// (if it is different and only contain bits from the other bitset)
std::cout << "a is a proper subset of b? " << a.is_proper_subset_of(b) << std::endl;
// determine if a bitset intersect another bitset
// (if they have common bits to 1)
std::cout << "Does a intersect b? " << a.intersects(b) << std::endl;
// operators
std::cout << "~a: " << ~a << std::endl;
std::cout << "a << 3: " << (a << 3) << std::endl;
std::cout << "a >> 3: " << (a >> 3) << std::endl;
std::cout << "a & b: " << (a & b) << std::endl;
std::cout << "a | b: " << (a | b) << std::endl;
std::cout << "a ^ b: " << (a ^ b) << std::endl;
std::cout << "a - b: " << (a - b) << std::endl;
std::cout << "a == b: " << (a == b) << std::endl;
std::cout << "a < b: " << (a < b) << std::endl;
// operator[] access
std::cout << "a[3]: " << a[3] << std::endl;
std::cout << "~a[3]: " << ~a[3] << std::endl;
a[3] = false;
a[3].set();
a[3].reset();
a[3].flip();
a[3].assign(true);
a[3] = a[2];
a[3] &= a[2];
a[3] |= a[2];
a[3] ^= a[2];
a[3] -= a[2];
return 0;
}
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2018 The Zumba Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "base58.h"
#include "clientversion.h"
#include "init.h"
#include "main.h"
#include "net.h"
#include "netbase.h"
#include "rpcserver.h"
#include "timedata.h"
#include "txmempool.h"
#include "util.h"
#include "spork.h"
#include "utilstrencodings.h"
#ifdef ENABLE_WALLET
#include "masternode-sync.h"
#include "wallet/wallet.h"
#include "wallet/walletdb.h"
#endif
#include <stdint.h>
#include <boost/assign/list_of.hpp>
#include <boost/algorithm/string.hpp>
#include <univalue.h>
using namespace std;
/**
* @note Do not add or change anything in the information returned by this
* method. `getinfo` exists for backwards-compatibility only. It combines
* information from wildly different sources in the program, which is a mess,
* and is thus planned to be deprecated eventually.
*
* Based on the source of the information, new information should be added to:
* - `getblockchaininfo`,
* - `getnetworkinfo` or
* - `getwalletinfo`
*
* Or alternatively, create a specific query method for the information.
**/
UniValue getinfo(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 0)
throw runtime_error(
"getinfo\n"
"Returns an object containing various state info.\n"
"\nResult:\n"
"{\n"
" \"version\": xxxxx, (numeric) the server version\n"
" \"protocolversion\": xxxxx, (numeric) the protocol version\n"
" \"walletversion\": xxxxx, (numeric) the wallet version\n"
" \"balance\": xxxxxxx, (numeric) the total zumba balance of the wallet\n"
" \"privatesend_balance\": xxxxxx, (numeric) the anonymized zumba balance of the wallet\n"
" \"blocks\": xxxxxx, (numeric) the current number of blocks processed in the server\n"
" \"timeoffset\": xxxxx, (numeric) the time offset\n"
" \"connections\": xxxxx, (numeric) the number of connections\n"
" \"proxy\": \"host:port\", (string, optional) the proxy used by the server\n"
" \"difficulty\": xxxxxx, (numeric) the current difficulty\n"
" \"testnet\": true|false, (boolean) if the server is using testnet or not\n"
" \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since GMT epoch) of the oldest pre-generated key in the key pool\n"
" \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated\n"
" \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
" \"paytxfee\": x.xxxx, (numeric) the transaction fee set in " + CURRENCY_UNIT + "/kB\n"
" \"relayfee\": x.xxxx, (numeric) minimum relay fee for non-free transactions in " + CURRENCY_UNIT + "/kB\n"
" \"errors\": \"...\" (string) any error messages\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getinfo", "")
+ HelpExampleRpc("getinfo", "")
);
#ifdef ENABLE_WALLET
LOCK2(cs_main, pwalletMain ? &pwalletMain->cs_wallet : NULL);
#else
LOCK(cs_main);
#endif
proxyType proxy;
GetProxy(NET_IPV4, proxy);
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("version", CLIENT_VERSION));
obj.push_back(Pair("protocolversion", PROTOCOL_VERSION));
#ifdef ENABLE_WALLET
if (pwalletMain) {
obj.push_back(Pair("walletversion", pwalletMain->GetVersion()));
obj.push_back(Pair("balance", ValueFromAmount(pwalletMain->GetBalance())));
if(!fLiteMode)
obj.push_back(Pair("privatesend_balance", ValueFromAmount(pwalletMain->GetAnonymizedBalance())));
}
#endif
obj.push_back(Pair("blocks", (int)chainActive.Height()));
obj.push_back(Pair("timeoffset", GetTimeOffset()));
obj.push_back(Pair("connections", (int)vNodes.size()));
obj.push_back(Pair("proxy", (proxy.IsValid() ? proxy.proxy.ToStringIPPort() : string())));
obj.push_back(Pair("difficulty", (double)GetDifficulty()));
obj.push_back(Pair("testnet", Params().TestnetToBeDeprecatedFieldRPC()));
#ifdef ENABLE_WALLET
if (pwalletMain) {
obj.push_back(Pair("keypoololdest", pwalletMain->GetOldestKeyPoolTime()));
obj.push_back(Pair("keypoolsize", (int)pwalletMain->GetKeyPoolSize()));
}
if (pwalletMain && pwalletMain->IsCrypted())
obj.push_back(Pair("unlocked_until", nWalletUnlockTime));
obj.push_back(Pair("paytxfee", ValueFromAmount(payTxFee.GetFeePerK())));
#endif
obj.push_back(Pair("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())));
obj.push_back(Pair("errors", GetWarnings("statusbar")));
return obj;
}
UniValue debug(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"debug ( 0|1|addrman|alert|bench|coindb|db|lock|rand|rpc|selectcoins|mempool"
"|mempoolrej|net|proxy|prune|http|libevent|tor|zmq|"
"zumba|privatesend|instantsend|masternode|spork|keepass|mnpayments|gobject )\n"
"Change debug category on the fly. Specify single category or use comma to specify many.\n"
"\nExamples:\n"
+ HelpExampleCli("debug", "zumba")
+ HelpExampleRpc("debug", "zumba,net")
);
std::string strMode = params[0].get_str();
mapMultiArgs["-debug"].clear();
boost::split(mapMultiArgs["-debug"], strMode, boost::is_any_of(","));
mapArgs["-debug"] = mapMultiArgs["-debug"][mapMultiArgs["-debug"].size() - 1];
fDebug = mapArgs["-debug"] != "0";
return "Debug mode: " + (fDebug ? strMode : "off");
}
UniValue mnsync(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"mnsync [status|next|reset]\n"
"Returns the sync status, updates to the next step or resets it entirely.\n"
);
std::string strMode = params[0].get_str();
if(strMode == "status") {
UniValue objStatus(UniValue::VOBJ);
objStatus.push_back(Pair("AssetID", masternodeSync.GetAssetID()));
objStatus.push_back(Pair("AssetName", masternodeSync.GetAssetName()));
objStatus.push_back(Pair("Attempt", masternodeSync.GetAttempt()));
objStatus.push_back(Pair("IsBlockchainSynced", masternodeSync.IsBlockchainSynced()));
objStatus.push_back(Pair("IsMasternodeListSynced", masternodeSync.IsMasternodeListSynced()));
objStatus.push_back(Pair("IsWinnersListSynced", masternodeSync.IsWinnersListSynced()));
objStatus.push_back(Pair("IsSynced", masternodeSync.IsSynced()));
objStatus.push_back(Pair("IsFailed", masternodeSync.IsFailed()));
return objStatus;
}
if(strMode == "next")
{
masternodeSync.SwitchToNextAsset();
return "sync updated to " + masternodeSync.GetAssetName();
}
if(strMode == "reset")
{
masternodeSync.Reset();
return "success";
}
return "failure";
}
#ifdef ENABLE_WALLET
class DescribeAddressVisitor : public boost::static_visitor<UniValue>
{
public:
UniValue operator()(const CNoDestination &dest) const { return UniValue(UniValue::VOBJ); }
UniValue operator()(const CKeyID &keyID) const {
UniValue obj(UniValue::VOBJ);
CPubKey vchPubKey;
obj.push_back(Pair("isscript", false));
if (pwalletMain && pwalletMain->GetPubKey(keyID, vchPubKey)) {
obj.push_back(Pair("pubkey", HexStr(vchPubKey)));
obj.push_back(Pair("iscompressed", vchPubKey.IsCompressed()));
}
return obj;
}
UniValue operator()(const CScriptID &scriptID) const {
UniValue obj(UniValue::VOBJ);
CScript subscript;
obj.push_back(Pair("isscript", true));
if (pwalletMain && pwalletMain->GetCScript(scriptID, subscript)) {
std::vector<CTxDestination> addresses;
txnouttype whichType;
int nRequired;
ExtractDestinations(subscript, whichType, addresses, nRequired);
obj.push_back(Pair("script", GetTxnOutputType(whichType)));
obj.push_back(Pair("hex", HexStr(subscript.begin(), subscript.end())));
UniValue a(UniValue::VARR);
BOOST_FOREACH(const CTxDestination& addr, addresses)
a.push_back(CBitcoinAddress(addr).ToString());
obj.push_back(Pair("addresses", a));
if (whichType == TX_MULTISIG)
obj.push_back(Pair("sigsrequired", nRequired));
}
return obj;
}
};
#endif
/*
Used for updating/reading spork settings on the network
*/
UniValue spork(const UniValue& params, bool fHelp)
{
if(params.size() == 1 && params[0].get_str() == "show"){
UniValue ret(UniValue::VOBJ);
for(int nSporkID = SPORK_START; nSporkID <= SPORK_END; nSporkID++){
if(sporkManager.GetSporkNameByID(nSporkID) != "Unknown")
ret.push_back(Pair(sporkManager.GetSporkNameByID(nSporkID), sporkManager.GetSporkValue(nSporkID)));
}
return ret;
} else if(params.size() == 1 && params[0].get_str() == "active"){
UniValue ret(UniValue::VOBJ);
for(int nSporkID = SPORK_START; nSporkID <= SPORK_END; nSporkID++){
if(sporkManager.GetSporkNameByID(nSporkID) != "Unknown")
ret.push_back(Pair(sporkManager.GetSporkNameByID(nSporkID), sporkManager.IsSporkActive(nSporkID)));
}
return ret;
} else if (params.size() == 2){
int nSporkID = sporkManager.GetSporkIDByName(params[0].get_str());
if(nSporkID == -1){
return "Invalid spork name";
}
// SPORK VALUE
int64_t nValue = params[1].get_int64();
//broadcast new spork
if(sporkManager.UpdateSpork(nSporkID, nValue)){
sporkManager.ExecuteSpork(nSporkID, nValue);
return "success";
} else {
return "failure";
}
}
throw runtime_error(
"spork <name> [<value>]\n"
"<name> is the corresponding spork name, or 'show' to show all current spork settings, active to show which sporks are active"
"<value> is a epoch datetime to enable or disable spork"
+ HelpRequiringPassphrase());
}
UniValue validateaddress(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"validateaddress \"zumbaaddress\"\n"
"\nReturn information about the given zumba address.\n"
"\nArguments:\n"
"1. \"zumbaaddress\" (string, required) The zumba address to validate\n"
"\nResult:\n"
"{\n"
" \"isvalid\" : true|false, (boolean) If the address is valid or not. If not, this is the only property returned.\n"
" \"address\" : \"zumbaaddress\", (string) The zumba address validated\n"
" \"scriptPubKey\" : \"hex\", (string) The hex encoded scriptPubKey generated by the address\n"
" \"ismine\" : true|false, (boolean) If the address is yours or not\n"
" \"iswatchonly\" : true|false, (boolean) If the address is watchonly\n"
" \"isscript\" : true|false, (boolean) If the key is a script\n"
" \"pubkey\" : \"publickeyhex\", (string) The hex value of the raw public key\n"
" \"iscompressed\" : true|false, (boolean) If the address is compressed\n"
" \"account\" : \"account\" (string) DEPRECATED. The account associated with the address, \"\" is the default account\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("validateaddress", "\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"")
+ HelpExampleRpc("validateaddress", "\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"")
);
#ifdef ENABLE_WALLET
LOCK2(cs_main, pwalletMain ? &pwalletMain->cs_wallet : NULL);
#else
LOCK(cs_main);
#endif
CBitcoinAddress address(params[0].get_str());
bool isValid = address.IsValid();
UniValue ret(UniValue::VOBJ);
ret.push_back(Pair("isvalid", isValid));
if (isValid)
{
CTxDestination dest = address.Get();
string currentAddress = address.ToString();
ret.push_back(Pair("address", currentAddress));
CScript scriptPubKey = GetScriptForDestination(dest);
ret.push_back(Pair("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end())));
#ifdef ENABLE_WALLET
isminetype mine = pwalletMain ? IsMine(*pwalletMain, dest) : ISMINE_NO;
ret.push_back(Pair("ismine", (mine & ISMINE_SPENDABLE) ? true : false));
ret.push_back(Pair("iswatchonly", (mine & ISMINE_WATCH_ONLY) ? true: false));
UniValue detail = boost::apply_visitor(DescribeAddressVisitor(), dest);
ret.pushKVs(detail);
if (pwalletMain && pwalletMain->mapAddressBook.count(dest))
ret.push_back(Pair("account", pwalletMain->mapAddressBook[dest].name));
#endif
}
return ret;
}
/**
* Used by addmultisigaddress / createmultisig:
*/
CScript _createmultisig_redeemScript(const UniValue& params)
{
int nRequired = params[0].get_int();
const UniValue& keys = params[1].get_array();
// Gather public keys
if (nRequired < 1)
throw runtime_error("a multisignature address must require at least one key to redeem");
if ((int)keys.size() < nRequired)
throw runtime_error(
strprintf("not enough keys supplied "
"(got %u keys, but need at least %d to redeem)", keys.size(), nRequired));
if (keys.size() > 16)
throw runtime_error("Number of addresses involved in the multisignature address creation > 16\nReduce the number");
std::vector<CPubKey> pubkeys;
pubkeys.resize(keys.size());
for (unsigned int i = 0; i < keys.size(); i++)
{
const std::string& ks = keys[i].get_str();
#ifdef ENABLE_WALLET
// Case 1: Zumba address and we have full public key:
CBitcoinAddress address(ks);
if (pwalletMain && address.IsValid())
{
CKeyID keyID;
if (!address.GetKeyID(keyID))
throw runtime_error(
strprintf("%s does not refer to a key",ks));
CPubKey vchPubKey;
if (!pwalletMain->GetPubKey(keyID, vchPubKey))
throw runtime_error(
strprintf("no full public key for address %s",ks));
if (!vchPubKey.IsFullyValid())
throw runtime_error(" Invalid public key: "+ks);
pubkeys[i] = vchPubKey;
}
// Case 2: hex public key
else
#endif
if (IsHex(ks))
{
CPubKey vchPubKey(ParseHex(ks));
if (!vchPubKey.IsFullyValid())
throw runtime_error(" Invalid public key: "+ks);
pubkeys[i] = vchPubKey;
}
else
{
throw runtime_error(" Invalid public key: "+ks);
}
}
CScript result = GetScriptForMultisig(nRequired, pubkeys);
if (result.size() > MAX_SCRIPT_ELEMENT_SIZE)
throw runtime_error(
strprintf("redeemScript exceeds size limit: %d > %d", result.size(), MAX_SCRIPT_ELEMENT_SIZE));
return result;
}
UniValue createmultisig(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() < 2 || params.size() > 2)
{
string msg = "createmultisig nrequired [\"key\",...]\n"
"\nCreates a multi-signature address with n signature of m keys required.\n"
"It returns a json object with the address and redeemScript.\n"
"\nArguments:\n"
"1. nrequired (numeric, required) The number of required signatures out of the n keys or addresses.\n"
"2. \"keys\" (string, required) A json array of keys which are zumba addresses or hex-encoded public keys\n"
" [\n"
" \"key\" (string) zumba address or hex-encoded public key\n"
" ,...\n"
" ]\n"
"\nResult:\n"
"{\n"
" \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
" \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
"}\n"
"\nExamples:\n"
"\nCreate a multisig address from 2 addresses\n"
+ HelpExampleCli("createmultisig", "2 \"[\\\"Xt4qk9uKvQYAonVGSZNXqxeDmtjaEWgfrs\\\",\\\"XoSoWQkpgLpppPoyyzbUFh1fq2RBvW6UK1\\\"]\"") +
"\nAs a json rpc call\n"
+ HelpExampleRpc("createmultisig", "2, \"[\\\"Xt4qk9uKvQYAonVGSZNXqxeDmtjaEWgfrs\\\",\\\"XoSoWQkpgLpppPoyyzbUFh1fq2RBvW6UK1\\\"]\"")
;
throw runtime_error(msg);
}
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
CScriptID innerID(inner);
CBitcoinAddress address(innerID);
UniValue result(UniValue::VOBJ);
result.push_back(Pair("address", address.ToString()));
result.push_back(Pair("redeemScript", HexStr(inner.begin(), inner.end())));
return result;
}
UniValue verifymessage(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 3)
throw runtime_error(
"verifymessage \"zumbaaddress\" \"signature\" \"message\"\n"
"\nVerify a signed message\n"
"\nArguments:\n"
"1. \"zumbaaddress\" (string, required) The zumba address to use for the signature.\n"
"2. \"signature\" (string, required) The signature provided by the signer in base 64 encoding (see signmessage).\n"
"3. \"message\" (string, required) The message that was signed.\n"
"\nResult:\n"
"true|false (boolean) If the signature is verified or not.\n"
"\nExamples:\n"
"\nUnlock the wallet for 30 seconds\n"
+ HelpExampleCli("walletpassphrase", "\"mypassphrase\" 30") +
"\nCreate the signature\n"
+ HelpExampleCli("signmessage", "\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\" \"my message\"") +
"\nVerify the signature\n"
+ HelpExampleCli("verifymessage", "\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\" \"signature\" \"my message\"") +
"\nAs json rpc\n"
+ HelpExampleRpc("verifymessage", "\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\", \"signature\", \"my message\"")
);
LOCK(cs_main);
string strAddress = params[0].get_str();
string strSign = params[1].get_str();
string strMessage = params[2].get_str();
CBitcoinAddress addr(strAddress);
if (!addr.IsValid())
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address");
CKeyID keyID;
if (!addr.GetKeyID(keyID))
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
bool fInvalid = false;
vector<unsigned char> vchSig = DecodeBase64(strSign.c_str(), &fInvalid);
if (fInvalid)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding");
CHashWriter ss(SER_GETHASH, 0);
ss << strMessageMagic;
ss << strMessage;
CPubKey pubkey;
if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
return false;
return (pubkey.GetID() == keyID);
}
UniValue setmocktime(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"setmocktime timestamp\n"
"\nSet the local time to given timestamp (-regtest only)\n"
"\nArguments:\n"
"1. timestamp (integer, required) Unix seconds-since-epoch timestamp\n"
" Pass 0 to go back to using the system time."
);
if (!Params().MineBlocksOnDemand())
throw runtime_error("setmocktime for regression testing (-regtest mode) only");
// cs_vNodes is locked and node send/receive times are updated
// atomically with the time change to prevent peers from being
// disconnected because we think we haven't communicated with them
// in a long time.
LOCK2(cs_main, cs_vNodes);
RPCTypeCheck(params, boost::assign::list_of(UniValue::VNUM));
SetMockTime(params[0].get_int64());
uint64_t t = GetTime();
BOOST_FOREACH(CNode* pnode, vNodes) {
pnode->nLastSend = pnode->nLastRecv = t;
}
return NullUniValue;
}
bool getAddressFromIndex(const int &type, const uint160 &hash, std::string &address)
{
if (type == 2) {
address = CBitcoinAddress(CScriptID(hash)).ToString();
} else if (type == 1) {
address = CBitcoinAddress(CKeyID(hash)).ToString();
} else {
return false;
}
return true;
}
bool getAddressesFromParams(const UniValue& params, std::vector<std::pair<uint160, int> > &addresses)
{
if (params[0].isStr()) {
CBitcoinAddress address(params[0].get_str());
uint160 hashBytes;
int type = 0;
if (!address.GetIndexKey(hashBytes, type)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
addresses.push_back(std::make_pair(hashBytes, type));
} else if (params[0].isObject()) {
UniValue addressValues = find_value(params[0].get_obj(), "addresses");
if (!addressValues.isArray()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Addresses is expected to be an array");
}
std::vector<UniValue> values = addressValues.getValues();
for (std::vector<UniValue>::iterator it = values.begin(); it != values.end(); ++it) {
CBitcoinAddress address(it->get_str());
uint160 hashBytes;
int type = 0;
if (!address.GetIndexKey(hashBytes, type)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
addresses.push_back(std::make_pair(hashBytes, type));
}
} else {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
return true;
}
bool heightSort(std::pair<CAddressUnspentKey, CAddressUnspentValue> a,
std::pair<CAddressUnspentKey, CAddressUnspentValue> b) {
return a.second.blockHeight < b.second.blockHeight;
}
bool timestampSort(std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> a,
std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> b) {
return a.second.time < b.second.time;
}
UniValue getaddressmempool(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"getaddressmempool\n"
"\nReturns all mempool deltas for an address (requires addressindex to be enabled).\n"
"\nArguments:\n"
"{\n"
" \"addresses\"\n"
" [\n"
" \"address\" (string) The base58check encoded address\n"
" ,...\n"
" ]\n"
"}\n"
"\nResult:\n"
"[\n"
" {\n"
" \"address\" (string) The base58check encoded address\n"
" \"txid\" (string) The related txid\n"
" \"index\" (number) The related input or output index\n"
" \"satoshis\" (number) The difference of satoshis\n"
" \"timestamp\" (number) The time the transaction entered the mempool (seconds)\n"
" \"prevtxid\" (string) The previous txid (if spending)\n"
" \"prevout\" (string) The previous transaction output index (if spending)\n"
" }\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getaddressmempool", "'{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}'")
+ HelpExampleRpc("getaddressmempool", "{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}")
);
std::vector<std::pair<uint160, int> > addresses;
if (!getAddressesFromParams(params, addresses)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
std::vector<std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> > indexes;
if (!mempool.getAddressIndex(addresses, indexes)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
std::sort(indexes.begin(), indexes.end(), timestampSort);
UniValue result(UniValue::VARR);
for (std::vector<std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> >::iterator it = indexes.begin();
it != indexes.end(); it++) {
std::string address;
if (!getAddressFromIndex(it->first.type, it->first.addressBytes, address)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Unknown address type");
}
UniValue delta(UniValue::VOBJ);
delta.push_back(Pair("address", address));
delta.push_back(Pair("txid", it->first.txhash.GetHex()));
delta.push_back(Pair("index", (int)it->first.index));
delta.push_back(Pair("satoshis", it->second.amount));
delta.push_back(Pair("timestamp", it->second.time));
if (it->second.amount < 0) {
delta.push_back(Pair("prevtxid", it->second.prevhash.GetHex()));
delta.push_back(Pair("prevout", (int)it->second.prevout));
}
result.push_back(delta);
}
return result;
}
UniValue getaddressutxos(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"getaddressutxos\n"
"\nReturns all unspent outputs for an address (requires addressindex to be enabled).\n"
"\nArguments:\n"
"{\n"
" \"addresses\"\n"
" [\n"
" \"address\" (string) The base58check encoded address\n"
" ,...\n"
" ]\n"
"}\n"
"\nResult\n"
"[\n"
" {\n"
" \"address\" (string) The address base58check encoded\n"
" \"txid\" (string) The output txid\n"
" \"height\" (number) The block height\n"
" \"outputIndex\" (number) The output index\n"
" \"script\" (strin) The script hex encoded\n"
" \"satoshis\" (number) The number of satoshis of the output\n"
" }\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getaddressutxos", "'{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}'")
+ HelpExampleRpc("getaddressutxos", "{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}")
);
std::vector<std::pair<uint160, int> > addresses;
if (!getAddressesFromParams(params, addresses)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
std::vector<std::pair<CAddressUnspentKey, CAddressUnspentValue> > unspentOutputs;
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
if (!GetAddressUnspent((*it).first, (*it).second, unspentOutputs)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
}
std::sort(unspentOutputs.begin(), unspentOutputs.end(), heightSort);
UniValue result(UniValue::VARR);
for (std::vector<std::pair<CAddressUnspentKey, CAddressUnspentValue> >::const_iterator it=unspentOutputs.begin(); it!=unspentOutputs.end(); it++) {
UniValue output(UniValue::VOBJ);
std::string address;
if (!getAddressFromIndex(it->first.type, it->first.hashBytes, address)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Unknown address type");
}
output.push_back(Pair("address", address));
output.push_back(Pair("txid", it->first.txhash.GetHex()));
output.push_back(Pair("outputIndex", (int)it->first.index));
output.push_back(Pair("script", HexStr(it->second.script.begin(), it->second.script.end())));
output.push_back(Pair("satoshis", it->second.satoshis));
output.push_back(Pair("height", it->second.blockHeight));
result.push_back(output);
}
return result;
}
UniValue getaddressdeltas(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1 || !params[0].isObject())
throw runtime_error(
"getaddressdeltas\n"
"\nReturns all changes for an address (requires addressindex to be enabled).\n"
"\nArguments:\n"
"{\n"
" \"addresses\"\n"
" [\n"
" \"address\" (string) The base58check encoded address\n"
" ,...\n"
" ]\n"
" \"start\" (number) The start block height\n"
" \"end\" (number) The end block height\n"
"}\n"
"\nResult:\n"
"[\n"
" {\n"
" \"satoshis\" (number) The difference of satoshis\n"
" \"txid\" (string) The related txid\n"
" \"index\" (number) The related input or output index\n"
" \"height\" (number) The block height\n"
" \"address\" (string) The base58check encoded address\n"
" }\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getaddressdeltas", "'{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}'")
+ HelpExampleRpc("getaddressdeltas", "{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}")
);
UniValue startValue = find_value(params[0].get_obj(), "start");
UniValue endValue = find_value(params[0].get_obj(), "end");
int start = 0;
int end = 0;
if (startValue.isNum() && endValue.isNum()) {
start = startValue.get_int();
end = endValue.get_int();
if (end < start) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "End value is expected to be greater than start");
}
}
std::vector<std::pair<uint160, int> > addresses;
if (!getAddressesFromParams(params, addresses)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
std::vector<std::pair<CAddressIndexKey, CAmount> > addressIndex;
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
if (start > 0 && end > 0) {
if (!GetAddressIndex((*it).first, (*it).second, addressIndex, start, end)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
} else {
if (!GetAddressIndex((*it).first, (*it).second, addressIndex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
}
}
UniValue result(UniValue::VARR);
for (std::vector<std::pair<CAddressIndexKey, CAmount> >::const_iterator it=addressIndex.begin(); it!=addressIndex.end(); it++) {
std::string address;
if (!getAddressFromIndex(it->first.type, it->first.hashBytes, address)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Unknown address type");
}
UniValue delta(UniValue::VOBJ);
delta.push_back(Pair("satoshis", it->second));
delta.push_back(Pair("txid", it->first.txhash.GetHex()));
delta.push_back(Pair("index", (int)it->first.index));
delta.push_back(Pair("blockindex", (int)it->first.txindex));
delta.push_back(Pair("height", it->first.blockHeight));
delta.push_back(Pair("address", address));
result.push_back(delta);
}
return result;
}
UniValue getaddressbalance(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"getaddressbalance\n"
"\nReturns the balance for an address(es) (requires addressindex to be enabled).\n"
"\nArguments:\n"
"{\n"
" \"addresses\"\n"
" [\n"
" \"address\" (string) The base58check encoded address\n"
" ,...\n"
" ]\n"
"}\n"
"\nResult:\n"
"{\n"
" \"balance\" (string) The current balance in satoshis\n"
" \"received\" (string) The total number of satoshis received (including change)\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getaddressbalance", "'{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}'")
+ HelpExampleRpc("getaddressbalance", "{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}")
);
std::vector<std::pair<uint160, int> > addresses;
if (!getAddressesFromParams(params, addresses)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
std::vector<std::pair<CAddressIndexKey, CAmount> > addressIndex;
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
if (!GetAddressIndex((*it).first, (*it).second, addressIndex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
}
CAmount balance = 0;
CAmount received = 0;
for (std::vector<std::pair<CAddressIndexKey, CAmount> >::const_iterator it=addressIndex.begin(); it!=addressIndex.end(); it++) {
if (it->second > 0) {
received += it->second;
}
balance += it->second;
}
UniValue result(UniValue::VOBJ);
result.push_back(Pair("balance", balance));
result.push_back(Pair("received", received));
return result;
}
UniValue getaddresstxids(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"getaddresstxids\n"
"\nReturns the txids for an address(es) (requires addressindex to be enabled).\n"
"\nArguments:\n"
"{\n"
" \"addresses\"\n"
" [\n"
" \"address\" (string) The base58check encoded address\n"
" ,...\n"
" ]\n"
" \"start\" (number) The start block height\n"
" \"end\" (number) The end block height\n"
"}\n"
"\nResult:\n"
"[\n"
" \"transactionid\" (string) The transaction id\n"
" ,...\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getaddresstxids", "'{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}'")
+ HelpExampleRpc("getaddresstxids", "{\"addresses\": [\"XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg\"]}")
);
std::vector<std::pair<uint160, int> > addresses;
if (!getAddressesFromParams(params, addresses)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
}
int start = 0;
int end = 0;
if (params[0].isObject()) {
UniValue startValue = find_value(params[0].get_obj(), "start");
UniValue endValue = find_value(params[0].get_obj(), "end");
if (startValue.isNum() && endValue.isNum()) {
start = startValue.get_int();
end = endValue.get_int();
}
}
std::vector<std::pair<CAddressIndexKey, CAmount> > addressIndex;
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
if (start > 0 && end > 0) {
if (!GetAddressIndex((*it).first, (*it).second, addressIndex, start, end)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
} else {
if (!GetAddressIndex((*it).first, (*it).second, addressIndex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available for address");
}
}
}
std::set<std::pair<int, std::string> > txids;
UniValue result(UniValue::VARR);
for (std::vector<std::pair<CAddressIndexKey, CAmount> >::const_iterator it=addressIndex.begin(); it!=addressIndex.end(); it++) {
int height = it->first.blockHeight;
std::string txid = it->first.txhash.GetHex();
if (addresses.size() > 1) {
txids.insert(std::make_pair(height, txid));
} else {
if (txids.insert(std::make_pair(height, txid)).second) {
result.push_back(txid);
}
}
}
if (addresses.size() > 1) {
for (std::set<std::pair<int, std::string> >::const_iterator it=txids.begin(); it!=txids.end(); it++) {
result.push_back(it->second);
}
}
return result;
}
UniValue getspentinfo(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1 || !params[0].isObject())
throw runtime_error(
"getspentinfo\n"
"\nReturns the txid and index where an output is spent.\n"
"\nArguments:\n"
"{\n"
" \"txid\" (string) The hex string of the txid\n"
" \"index\" (number) The start block height\n"
"}\n"
"\nResult:\n"
"{\n"
" \"txid\" (string) The transaction id\n"
" \"index\" (number) The spending input index\n"
" ,...\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getspentinfo", "'{\"txid\": \"0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9\", \"index\": 0}'")
+ HelpExampleRpc("getspentinfo", "{\"txid\": \"0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9\", \"index\": 0}")
);
UniValue txidValue = find_value(params[0].get_obj(), "txid");
UniValue indexValue = find_value(params[0].get_obj(), "index");
if (!txidValue.isStr() || !indexValue.isNum()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid txid or index");
}
uint256 txid = ParseHashV(txidValue, "txid");
int outputIndex = indexValue.get_int();
CSpentIndexKey key(txid, outputIndex);
CSpentIndexValue value;
if (!GetSpentIndex(key, value)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Unable to get spent info");
}
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("txid", value.txid.GetHex()));
obj.push_back(Pair("index", (int)value.inputIndex));
obj.push_back(Pair("height", value.blockHeight));
return obj;
}
|
// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com)
// (C) Copyright 2004-2007 Jonathan Turkanis
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.)
// See http://www.boost.org/libs/iostreams for documentation.
#ifndef BOOST_IOSTREAMS_FILTER_STREAM_HPP_INCLUDED
#define BOOST_IOSTREAMS_FILTER_STREAM_HPP_INCLUDED
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <memory> // allocator.
#include <boost/iostreams/detail/access_control.hpp>
#include <boost/iostreams/detail/char_traits.hpp>
#include <boost/iostreams/detail/iostream.hpp> // standard streams.
#include <boost/iostreams/detail/push.hpp>
#include <boost/iostreams/detail/select.hpp>
#include <boost/iostreams/detail/streambuf.hpp> // pubsync.
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/is_convertible.hpp>
// Must come last.
#include <boost/iostreams/detail/config/disable_warnings.hpp> // MSVC.
namespace boost { namespace iostreams {
//--------------Definition of filtered_istream--------------------------------//
namespace detail {
template<typename Mode, typename Ch, typename Tr>
struct filtering_stream_traits {
typedef typename
iostreams::select< // Disambiguation for Tru64
mpl::and_<
is_convertible<Mode, input>,
is_convertible<Mode, output>
>,
BOOST_IOSTREAMS_BASIC_IOSTREAM(Ch, Tr),
is_convertible<Mode, input>,
BOOST_IOSTREAMS_BASIC_ISTREAM(Ch, Tr),
else_,
BOOST_IOSTREAMS_BASIC_OSTREAM(Ch, Tr)
>::type stream_type;
typedef typename
iostreams::select< // Dismbiguation required for Tru64.
mpl::and_<
is_convertible<Mode, input>,
is_convertible<Mode, output>
>,
iostream_tag,
is_convertible<Mode, input>,
istream_tag,
else_,
ostream_tag
>::type stream_tag;
};
template<typename Chain, typename Access>
class filtering_stream_base
: public access_control<
boost::iostreams::detail::chain_client<Chain>,
Access
>,
public filtering_stream_traits<
typename Chain::mode,
typename Chain::char_type,
typename Chain::traits_type
>::stream_type
{
public:
typedef Chain chain_type;
typedef access_control<
boost::iostreams::detail::chain_client<Chain>,
Access
> client_type;
protected:
typedef typename
filtering_stream_traits<
typename Chain::mode,
typename Chain::char_type,
typename Chain::traits_type
>::stream_type stream_type;
filtering_stream_base() : stream_type(0) { this->set_chain(&chain_); }
private:
void notify() { this->rdbuf(chain_.empty() ? 0 : &chain_.front()); }
Chain chain_;
};
} // End namespace detail.
//
// Macro: BOOST_IOSTREAMS_DEFINE_FILTER_STREAM(name_, chain_type_, default_char_)
// Description: Defines a template derived from std::basic_streambuf which uses
// a chain to perform i/o. The template has the following parameters:
// Mode - the i/o mode.
// Ch - The character type.
// Tr - The character traits type.
// Alloc - The allocator type.
// Access - Indicates accessibility of the chain interface; must be either
// public_ or protected_; defaults to public_.
// Macro parameters:
// name_ - The name of the template to be defined.
// chain_type_ - The name of the chain template.
// default_char_ - The default value for the char template parameter.
//
#define BOOST_IOSTREAMS_DEFINE_FILTER_STREAM(name_, chain_type_, default_char_) \
template< typename Mode, \
typename Ch = default_char_, \
typename Tr = BOOST_IOSTREAMS_CHAR_TRAITS(Ch), \
typename Alloc = std::allocator<Ch>, \
typename Access = public_ > \
class name_ \
: public boost::iostreams::detail::filtering_stream_base< \
chain_type_<Mode, Ch, Tr, Alloc>, Access \
> \
{ \
public: \
typedef Ch char_type; \
struct category \
: Mode, \
closable_tag, \
detail::filtering_stream_traits<Mode, Ch, Tr>::stream_tag \
{ }; \
BOOST_IOSTREAMS_STREAMBUF_TYPEDEFS(Tr) \
typedef Mode mode; \
typedef chain_type_<Mode, Ch, Tr, Alloc> chain_type; \
name_() { } \
BOOST_IOSTREAMS_DEFINE_PUSH_CONSTRUCTOR(name_, mode, Ch, push_impl) \
~name_() { \
if (this->is_complete()) \
this->rdbuf()->BOOST_IOSTREAMS_PUBSYNC(); \
} \
private: \
typedef access_control< \
boost::iostreams::detail::chain_client< \
chain_type_<Mode, Ch, Tr, Alloc> \
>, \
Access \
> client_type; \
template<typename T> \
void push_impl(const T& t BOOST_IOSTREAMS_PUSH_PARAMS()) \
{ client_type::push(t BOOST_IOSTREAMS_PUSH_ARGS()); } \
}; \
/**/
BOOST_IOSTREAMS_DEFINE_FILTER_STREAM(filtering_stream, boost::iostreams::chain, char)
BOOST_IOSTREAMS_DEFINE_FILTER_STREAM(wfiltering_stream, boost::iostreams::chain, wchar_t)
typedef filtering_stream<input> filtering_istream;
typedef filtering_stream<output> filtering_ostream;
typedef wfiltering_stream<input> filtering_wistream;
typedef wfiltering_stream<output> filtering_wostream;
//----------------------------------------------------------------------------//
} } // End namespace iostreams, boost
#include <boost/iostreams/detail/config/enable_warnings.hpp> // MSVC
#endif // #ifndef BOOST_IOSTREAMS_FILTER_STREAM_HPP_INCLUDED
|
#include <atomic>
#include <boost/test/unit_test.hpp>
#include <boost/throw_exception.hpp>
#include <glo.hpp>
using namespace glo;
using namespace std;
BOOST_AUTO_TEST_CASE(test_format_pointer_to_updating_string)
{
string val = "str";
group g;
g.add(&val, "a_str", {tag::COUNT}, 0, "A string.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"a_str:count","level":0,"desc":"A string.","value":"str"})"", ss.str());
}
val = "123";
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"a_str:count","level":0,"desc":"A string.","value":"123"})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_pointer_to_updating_uint32)
{
uint32_t val = 12;
group g;
g.add(&val, "an_int", {tag::COUNT}, 0, "An int.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"an_int:count","level":0,"desc":"An int.","value":12})"", ss.str());
}
val = 123;
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"an_int:count","level":0,"desc":"An int.","value":123})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_ref_to_updating_int64)
{
int64_t val = -12;
group g;
g.add(ref(val), "neg_int", {tag::LAST}, level::LOW, "Negative int.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"neg_int:last","level":3,"desc":"Negative int.","value":-12})"", ss.str());
}
val = -123;
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"neg_int:last","level":3,"desc":"Negative int.","value":-123})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_cref_to_updating_bool)
{
bool val = false;
group g;
g.add(cref(val), "bool", {tag::LAST}, level::LOW, "Bool.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"bool:last","level":3,"desc":"Bool.","value":false})"", ss.str());
}
val = true;
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"bool:last","level":3,"desc":"Bool.","value":true})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_pointer_to_updating_atomic_uint8)
{
atomic<uint8_t> val(12);
group g;
g.add(&val, "atomic", {tag::COUNT}, 0, "Atomic.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"atomic:count","level":0,"desc":"Atomic.","value":12})"", ss.str());
}
val = 123;
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"atomic:count","level":0,"desc":"Atomic.","value":123})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_shared_ptr_to_updating_int8)
{
auto val = make_shared<int8_t>(-12);
group g;
g.add(val, "shared", {tag::COUNT}, 0, "Shared.");
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"shared:count","level":0,"desc":"Shared.","value":-12})"", ss.str());
}
*val = 123;
{
stringstream ss;
const char* delimiter = "";
g.json_format_items(ss, "", delimiter);
BOOST_CHECK_EQUAL(R""({"key":"shared:count","level":0,"desc":"Shared.","value":123})"", ss.str());
}
}
BOOST_AUTO_TEST_CASE(test_format_json_escaping)
{
BOOST_CHECK_EQUAL("abc", escape_json("abc"));
BOOST_CHECK_EQUAL("\\u0022", escape_json("\""));
BOOST_CHECK_EQUAL("\\u000a", escape_json("\n"));
BOOST_CHECK_EQUAL("ä", escape_json("ä"));
}
BOOST_AUTO_TEST_CASE(test_json_format)
{
{
stringstream ss;
json_format(ss, "abc"s);
BOOST_CHECK_EQUAL("\"abc\"", ss.str());
}
{
stringstream ss;
json_format(ss, "abc");
BOOST_CHECK_EQUAL("\"abc\"", ss.str());
}
{
stringstream ss;
json_format(ss, 'a');
BOOST_CHECK_EQUAL("\"a\"", ss.str());
}
{
stringstream ss;
json_format(ss, false);
BOOST_CHECK_EQUAL("false", ss.str());
}
}
|
/*
Copyright (c) 2009-2010 Christopher A. Taylor. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of LibCat nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <cat/crypt/rand/Fortuna.hpp>
using namespace cat;
// Used for MacOSX, iPhone, PS3, XBox, and others (for now)
// I want to have more of these operating systems defined
#if !defined(CAT_OS_WINDOWS) && !defined(CAT_OS_LINUX)
#include <fcntl.h>
#include <unistd.h>
#if !defined(CAT_NO_ENTROPY_THREAD)
bool FortunaFactory::ThreadFunction(void *)
{
// Generic version does not spawn a thread
return true;
}
#endif // !defined(CAT_NO_ENTROPY_THREAD)
bool FortunaFactory::InitializeEntropySources()
{
// Fire poll for entropy all goes into pool 0
PollInvariantSources(0);
return true;
}
void FortunaFactory::ShutdownEntropySources()
{
}
void FortunaFactory::PollInvariantSources(int pool_index)
{
Skein &pool = Pool[pool_index];
struct {
u32 cycles_start;
u8 system_prng[32];
u32 cycles_end;
} Sources;
// Cycles at the start
Sources.cycles_start = Clock::cycles();
int random_fd = open("/dev/random", O_RDONLY);
// /dev/random large request
if (random_fd >= 0)
{
read(random_fd, Sources.system_prng, sizeof(Sources.system_prng));
close(random_fd);
}
// Cycles at the end
Sources.cycles_end = Clock::cycles();
pool.Crunch(&Sources, sizeof(Sources));
}
void FortunaFactory::PollSlowEntropySources(int pool_index)
{
}
void FortunaFactory::PollFastEntropySources(int pool_index)
{
}
#endif
|
#ifndef DISKCHANGER_HH
#define DISKCHANGER_HH
#include "DiskContainer.hh"
#include "StateChangeListener.hh"
#include "serialize_meta.hh"
#include "span.hh"
#include <functional>
#include <memory>
#include <string>
namespace openmsx {
class CommandController;
class StateChangeDistributor;
class Scheduler;
class MSXMotherBoard;
class Reactor;
class Disk;
class DiskCommand;
class TclObject;
class DiskName;
class DiskChanger final : public DiskContainer, private StateChangeListener
{
public:
DiskChanger(MSXMotherBoard& board,
std::string driveName,
bool createCmd = true,
bool doubleSidedDrive = true,
std::function<void()> preChangeCallback = {});
DiskChanger(Reactor& reactor,
std::string driveName); // for virtual_drive
~DiskChanger() override;
void createCommand();
[[nodiscard]] const std::string& getDriveName() const { return driveName; }
[[nodiscard]] const DiskName& getDiskName() const;
[[nodiscard]] bool peekDiskChanged() const { return diskChangedFlag; }
void forceDiskChange() { diskChangedFlag = true; }
[[nodiscard]] Disk& getDisk() { return *disk; }
// DiskContainer
[[nodiscard]] SectorAccessibleDisk* getSectorAccessibleDisk() override;
[[nodiscard]] std::string_view getContainerName() const override;
bool diskChanged() override;
int insertDisk(const std::string& filename) override;
// for NowindCommand
void changeDisk(std::unique_ptr<Disk> newDisk);
// for DirAsDSK
[[nodiscard]] Scheduler* getScheduler() const { return scheduler; }
[[nodiscard]] bool isDoubleSidedDrive() const { return doubleSidedDrive; }
template<typename Archive>
void serialize(Archive& ar, unsigned version);
private:
void init(std::string_view prefix, bool createCmd);
void insertDisk(span<const TclObject> args);
void ejectDisk();
void sendChangeDiskEvent(span<std::string> args);
// StateChangeListener
void signalStateChange(const std::shared_ptr<StateChange>& event) override;
void stopReplay(EmuTime::param time) override;
private:
Reactor& reactor;
CommandController& controller;
StateChangeDistributor* stateChangeDistributor;
Scheduler* scheduler;
std::function<void()> preChangeCallback;
const std::string driveName;
std::unique_ptr<Disk> disk;
friend class DiskCommand;
std::unique_ptr<DiskCommand> diskCommand; // must come after driveName
const bool doubleSidedDrive; // for DirAsDSK
bool diskChangedFlag;
};
SERIALIZE_CLASS_VERSION(DiskChanger, 2);
} // namespace openmsx
#endif
|
// This file was generated based on '(multiple files)'.
// WARNING: Changes might be lost if you edit this file directly.
#include <duktape_helpers.h>
#include <Fuse.Scripting.Callback.h>
#include <Fuse.Scripting.Duktape.callback.h>
#include <Fuse.Scripting.Duktape.CallbackClosure.h>
#include <Fuse.Scripting.Duktape.CompileFlag.h>
#include <Fuse.Scripting.Duktape.Context.h>
#include <Fuse.Scripting.Duktape.duktape.h>
#include <Fuse.Scripting.Duktape.EnumFlags.h>
#include <Fuse.Scripting.Duktape.JSArray.h>
#include <Fuse.Scripting.Duktape.JSFunction.h>
#include <Fuse.Scripting.Duktape.JSObject.h>
#include <Fuse.Scripting.Duktape.JSONObject.h>
#include <Fuse.Scripting.Function.h>
#include <Fuse.Scripting.Object.h>
#include <Fuse.Scripting.ScriptException.h>
#include <Uno.Action-1.h>
#include <Uno.Bool.h>
#include <Uno.Collections.Dictionary-2.h>
#include <Uno.Collections.List-1.h>
#include <Uno.Delegate.h>
#include <Uno.Double.h>
#include <Uno.Exception.h>
#include <Uno.Int.h>
#include <Uno.IntPtr.h>
#include <Uno.Long.h>
#include <Uno.Object.h>
#include <Uno.String.h>
#include <Uno.Threading.IDispatcher.h>
#include <Uno.UInt.h>
static uString* STRINGS[16];
static uType* TYPES[18];
namespace g{
namespace Fuse{
namespace Scripting{
namespace Duktape{
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(551)
// ------------------------------------------------------------------------------------------
// internal extern delegate int callback(Uno.IntPtr ctx) :551
uDelegateType* callback_typeof()
{
static uSStrong<uDelegateType*> type;
if (type != NULL) return type;
type = uDelegateType::New("Fuse.Scripting.Duktape.callback", 1, 0);
type->SetSignature(::g::Uno::Int_typeof(),
::g::Uno::IntPtr_typeof());
return type;
}
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(91)
// -----------------------------------------------------------------------------------------
// internal sealed extern class CallbackClosure :91
// {
uType* CallbackClosure_typeof()
{
static uSStrong<uType*> type;
if (type != NULL) return type;
uTypeOptions options;
options.FieldCount = 2;
options.ObjectSize = sizeof(CallbackClosure);
options.TypeSize = sizeof(uType);
type = uClassType::New("Fuse.Scripting.Duktape.CallbackClosure", options);
::TYPES[0] = ::g::Fuse::Scripting::Callback_typeof();
::TYPES[1] = ::g::Fuse::Scripting::ScriptException_typeof();
type->SetFields(0,
::g::Fuse::Scripting::Callback_typeof(), offsetof(::g::Fuse::Scripting::Duktape::CallbackClosure, _callback), 0,
::g::Uno::IntPtr_typeof(), offsetof(::g::Fuse::Scripting::Duktape::CallbackClosure, _context), 0);
return type;
}
// public CallbackClosure(Uno.IntPtr context, Fuse.Scripting.Callback callback) :96
void CallbackClosure__ctor__fn(CallbackClosure* __this, void** context, uDelegate* callback)
{
__this->ctor_(*context, callback);
}
// public CallbackClosure New(Uno.IntPtr context, Fuse.Scripting.Callback callback) :96
void CallbackClosure__New1_fn(void** context, uDelegate* callback, CallbackClosure** __retval)
{
*__retval = CallbackClosure::New1(*context, callback);
}
// public object Proxy(object[] args) :102
void CallbackClosure__Proxy_fn(CallbackClosure* __this, uArray* args, uObject** __retval)
{
*__retval = __this->Proxy(args);
}
// public CallbackClosure(Uno.IntPtr context, Fuse.Scripting.Callback callback) [instance] :96
void CallbackClosure::ctor_(void* context, uDelegate* callback)
{
uStackFrame __("Fuse.Scripting.Duktape.CallbackClosure", ".ctor(Uno.IntPtr,Fuse.Scripting.Callback)");
_context = context;
_callback = callback;
}
// public object Proxy(object[] args) [instance] :102
uObject* CallbackClosure::Proxy(uArray* args)
{
uStackFrame __("Fuse.Scripting.Duktape.CallbackClosure", "Proxy(object[])");
try
{
uObject* res = uPtr(_callback)->Invoke(1, args);
return res;
}
catch (const uThrowable& __t)
{
if (uIs(__t.Exception, ::TYPES[1/*Fuse.Scripting.ScriptException*/]))
{
::g::Fuse::Scripting::ScriptException* e = (::g::Fuse::Scripting::ScriptException*)__t.Exception;
::g::Fuse::Scripting::Duktape::duktape::error(_context, uPtr(e)->Message());
return NULL;
}
else throw __t;
}
}
// public CallbackClosure New(Uno.IntPtr context, Fuse.Scripting.Callback callback) [static] :96
CallbackClosure* CallbackClosure::New1(void* context, uDelegate* callback)
{
CallbackClosure* obj1 = (CallbackClosure*)uNew(CallbackClosure_typeof());
obj1->ctor_(context, callback);
return obj1;
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(554)
// ------------------------------------------------------------------------------------------
// internal extern enum CompileFlag :554
uEnumType* CompileFlag_typeof()
{
static uSStrong<uEnumType*> type;
if (type != NULL) return type;
type = uEnumType::New("Fuse.Scripting.Duktape.CompileFlag", ::g::Uno::UInt_typeof(), 7);
type->SetLiterals(
"DUK_COMPILE_EVAL", 1LL,
"DUK_COMPILE_FUNCTION", 2LL,
"DUK_COMPILE_STRICT", 4LL,
"DUK_COMPILE_SAFE", 8LL,
"DUK_COMPILE_NORESULT", 16LL,
"DUK_COMPILE_NOSOURCE", 32LL,
"DUK_COMPILE_STRLEN", 64LL);
return type;
}
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(118)
// ------------------------------------------------------------------------------------------
// public sealed extern class Context :118
// {
// static Context() :118
static void Context__cctor__fn(uType* __type)
{
Context::_emptyArgs_ = uArray::New(::TYPES[2/*object[]*/], 0);
}
::g::Fuse::Scripting::Context_type* Context_typeof()
{
static uSStrong< ::g::Fuse::Scripting::Context_type*> type;
if (type != NULL) return type;
uTypeOptions options;
options.FieldCount = 10;
options.InterfaceCount = 1;
options.ObjectSize = sizeof(Context);
options.TypeSize = sizeof(::g::Fuse::Scripting::Context_type);
type = (::g::Fuse::Scripting::Context_type*)uClassType::New("Fuse.Scripting.Duktape.Context", options);
type->SetBase(::g::Fuse::Scripting::Context_typeof());
type->fp_cctor_ = Context__cctor__fn;
type->fp_Dispose = (void(*)(::g::Fuse::Scripting::Context*))Context__Dispose_fn;
type->fp_Evaluate1 = (void(*)(::g::Fuse::Scripting::Context*, uString*, uString*, uObject**))Context__Evaluate1_fn;
type->fp_get_GlobalObject = (void(*)(::g::Fuse::Scripting::Context*, ::g::Fuse::Scripting::Object**))Context__get_GlobalObject_fn;
type->interface0.fp_Dispose = (void(*)(uObject*))Context__Dispose_fn;
::STRINGS[0] = uString::Const("var __global_object__ = (1,eval)('this');");
::STRINGS[1] = uString::Const("name");
::STRINGS[2] = uString::Const("message");
::STRINGS[3] = uString::Const("fileName");
::STRINGS[4] = uString::Const("lineNumber");
::STRINGS[5] = uString::Const("stack");
::STRINGS[6] = uString::Const("Could not convert index to object");
::STRINGS[7] = uString::Const("__callback_proxy__");
::STRINGS[8] = uString::Const("function() { return __global_object__.__callback_proxy__(arguments, ");
::STRINGS[9] = uString::Const("); }");
::STRINGS[10] = uString::Const("Cannot push value: ");
::STRINGS[11] = uString::Const("__stashKey");
::TYPES[2] = uObject_typeof()->Array();
::TYPES[3] = ::g::Uno::Collections::Dictionary_typeof()->MakeType(::g::Uno::Int_typeof(), ::g::Fuse::Scripting::Callback_typeof());
::TYPES[0] = ::g::Fuse::Scripting::Callback_typeof();
::TYPES[4] = uObject_typeof();
::TYPES[5] = ::g::Uno::Action1_typeof()->MakeType(::g::Uno::IntPtr_typeof());
::TYPES[6] = ::g::Uno::Int_typeof();
::TYPES[7] = ::g::Uno::Double_typeof();
::TYPES[8] = ::g::Uno::String_typeof();
::TYPES[9] = ::g::Uno::Bool_typeof();
::TYPES[10] = ::g::Uno::Delegate_typeof();
::TYPES[11] = ::g::Fuse::Scripting::Duktape::JSFunction_typeof();
::TYPES[12] = ::g::Fuse::Scripting::Duktape::JSArray_typeof();
::TYPES[13] = ::g::Fuse::Scripting::Duktape::JSObject_typeof();
::TYPES[14] = ::g::Uno::UInt_typeof();
::TYPES[15] = ::g::Fuse::Scripting::Object_typeof();
type->SetInterfaces(
::g::Uno::IDisposable_typeof(), offsetof(::g::Fuse::Scripting::Context_type, interface0));
type->SetFields(2,
::g::Uno::Collections::Dictionary_typeof()->MakeType(::g::Uno::Int_typeof(), ::g::Fuse::Scripting::Callback_typeof()), offsetof(::g::Fuse::Scripting::Duktape::Context, _callbacks), 0,
::g::Uno::Int_typeof(), offsetof(::g::Fuse::Scripting::Duktape::Context, _callbacksCount), 0,
::g::Fuse::Scripting::Duktape::JSObject_typeof(), offsetof(::g::Fuse::Scripting::Duktape::Context, _globalObject), 0,
::g::Uno::IntPtr_typeof(), offsetof(::g::Fuse::Scripting::Duktape::Context, _handle), 0,
::g::Uno::Bool_typeof(), offsetof(::g::Fuse::Scripting::Duktape::Context, _isAlive), 0,
::g::Uno::Action1_typeof()->MakeType(::g::Uno::IntPtr_typeof()), offsetof(::g::Fuse::Scripting::Duktape::Context, _proxyCallback), 0,
::g::Uno::Int_typeof(), offsetof(::g::Fuse::Scripting::Duktape::Context, _stashKey), 0,
uObject_typeof()->Array(), (uintptr_t)&::g::Fuse::Scripting::Duktape::Context::_emptyArgs_, uFieldFlagsStatic);
type->Reflection.SetFunctions(5,
new uFunction("Evaluate", NULL, (void*)Context__Evaluate2_fn, 0, false, uObject_typeof(), 1, ::g::Uno::String_typeof()),
new uFunction("EvaluateNoResult", NULL, (void*)Context__EvaluateNoResult_fn, 0, false, uVoid_typeof(), 1, ::g::Uno::String_typeof()),
new uFunction("MakeArray", NULL, (void*)Context__MakeArray_fn, 0, false, ::g::Fuse::Scripting::Duktape::JSArray_typeof(), 0),
new uFunction("MakeObject", NULL, (void*)Context__MakeObject_fn, 0, false, ::g::Fuse::Scripting::Duktape::JSObject_typeof(), 0),
new uFunction(".ctor", NULL, (void*)Context__New1_fn, 0, true, Context_typeof(), 1, ::g::Uno::Threading::IDispatcher_typeof()));
return type;
}
// public Context(Uno.Threading.IDispatcher dispatcher) :149
void Context__ctor_1_fn(Context* __this, uObject* dispatcher)
{
__this->ctor_1(dispatcher);
}
// internal int AddCallback(Fuse.Scripting.Callback callback) :158
void Context__AddCallback_fn(Context* __this, uDelegate* callback, int* __retval)
{
*__retval = __this->AddCallback(callback);
}
// internal void CheckError(int errorCode) :420
void Context__CheckError_fn(Context* __this, int* errorCode)
{
__this->CheckError(*errorCode);
}
// internal void Construct(int argc) :502
void Context__Construct_fn(Context* __this, int* argc)
{
__this->Construct(*argc);
}
// internal bool DelProperty(int index, string key) :434
void Context__DelProperty2_fn(Context* __this, int* index, uString* key, bool* __retval)
{
*__retval = __this->DelProperty2(*index, key);
}
// public override sealed void Dispose() :242
void Context__Dispose_fn(Context* __this)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Dispose()");
::g::Fuse::Scripting::Duktape::duktape::destroy_heap(__this->_handle);
__this->_isAlive = false;
}
// internal bool EnumeratorNext(int index, bool getValue) :524
void Context__EnumeratorNext_fn(Context* __this, int* index, bool* getValue, bool* __retval)
{
*__retval = __this->EnumeratorNext(*index, *getValue);
}
// public object Evaluate(string code) :215
void Context__Evaluate2_fn(Context* __this, uString* code, uObject** __retval)
{
*__retval = __this->Evaluate2(code);
}
// public override sealed object Evaluate(string fileName, string code) :196
void Context__Evaluate1_fn(Context* __this, uString* fileName, uString* code, uObject** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Evaluate(string,string)");
__this->Push5(code);
__this->Push5(fileName);
__this->CheckError(__this->SafeCompile(0));
__this->CheckError(__this->SafeCall(0));
uObject* result = __this->IndexToObject(__this->GetTop() - 1);
__this->Pop();
return *__retval = result, void();
}
// public void EvaluateNoResult(string code) :209
void Context__EvaluateNoResult_fn(Context* __this, uString* code)
{
__this->EvaluateNoResult(code);
}
// internal bool GetBool(int index) :436
void Context__GetBool_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->GetBool(*index);
}
// internal Fuse.Scripting.ScriptException GetError(int index) :372
void Context__GetError_fn(Context* __this, int* index, ::g::Fuse::Scripting::ScriptException** __retval)
{
*__retval = __this->GetError(*index);
}
// internal Uno.IntPtr GetHeapPtr(int index) :488
void Context__GetHeapPtr_fn(Context* __this, int* index, void** __retval)
{
*__retval = __this->GetHeapPtr(*index);
}
// internal int GetInt(int index) :468
void Context__GetInt_fn(Context* __this, int* index, int* __retval)
{
*__retval = __this->GetInt(*index);
}
// internal long GetLength(int index) :490
void Context__GetLength_fn(Context* __this, int* index, int64_t* __retval)
{
*__retval = __this->GetLength(*index);
}
// internal double GetNumber(int index) :466
void Context__GetNumber_fn(Context* __this, int* index, double* __retval)
{
*__retval = __this->GetNumber(*index);
}
// internal bool GetProperty(int index, int arrIndex) :438
void Context__GetProperty_fn(Context* __this, int* index, int* arrIndex, bool* __retval)
{
*__retval = __this->GetProperty(*index, *arrIndex);
}
// internal bool GetProperty(int index, string key) :440
void Context__GetProperty1_fn(Context* __this, int* index, uString* key, bool* __retval)
{
*__retval = __this->GetProperty1(*index, key);
}
// internal string GetString(int index) :492
void Context__GetString_fn(Context* __this, int* index, uString** __retval)
{
*__retval = __this->GetString(*index);
}
// internal int GetTop() :470
void Context__GetTop_fn(Context* __this, int* __retval)
{
*__retval = __this->GetTop();
}
// public override sealed Fuse.Scripting.Object get_GlobalObject() :123
void Context__get_GlobalObject_fn(Context* __this, ::g::Fuse::Scripting::Object** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "get_GlobalObject()");
if (__this->_globalObject == NULL)
{
__this->PushGlobalObject();
void* handle = __this->GetHeapPtr(__this->GetTop() - 1);
__this->Pop();
__this->_globalObject = ::g::Fuse::Scripting::Duktape::JSObject::New1(__this, handle);
}
return *__retval = __this->_globalObject, void();
}
// internal bool HasProperty(int index, string key) :464
void Context__HasProperty_fn(Context* __this, int* index, uString* key, bool* __retval)
{
*__retval = __this->HasProperty(*index, key);
}
// internal object IndexToObject(int index) :248
void Context__IndexToObject_fn(Context* __this, int* index, uObject** __retval)
{
*__retval = __this->IndexToObject(*index);
}
// private void InitCallbacks() :166
void Context__InitCallbacks_fn(Context* __this)
{
__this->InitCallbacks();
}
// internal bool IsArray(int index) :442
void Context__IsArray_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsArray(*index);
}
// internal bool IsBool(int index) :444
void Context__IsBool_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsBool(*index);
}
// internal bool IsFunction(int index) :446
void Context__IsFunction_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsFunction(*index);
}
// internal bool IsNullOrUndefined(int index) :452
void Context__IsNullOrUndefined_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsNullOrUndefined(*index);
}
// internal bool IsNumber(int index) :454
void Context__IsNumber_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsNumber(*index);
}
// internal bool IsObject(int index) :456
void Context__IsObject_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsObject(*index);
}
// internal bool IsString(int index) :458
void Context__IsString_fn(Context* __this, int* index, bool* __retval)
{
*__retval = __this->IsString(*index);
}
// public Fuse.Scripting.Duktape.JSArray MakeArray() :233
void Context__MakeArray_fn(Context* __this, ::g::Fuse::Scripting::Duktape::JSArray** __retval)
{
*__retval = __this->MakeArray();
}
// public Fuse.Scripting.Duktape.JSObject MakeObject() :224
void Context__MakeObject_fn(Context* __this, ::g::Fuse::Scripting::Duktape::JSObject** __retval)
{
*__retval = __this->MakeObject();
}
// public Context New(Uno.Threading.IDispatcher dispatcher) :149
void Context__New1_fn(uObject* dispatcher, Context** __retval)
{
*__retval = Context::New1(dispatcher);
}
// internal void Pop() :508
void Context__Pop_fn(Context* __this)
{
__this->Pop();
}
// internal void Pop2() :510
void Context__Pop2_fn(Context* __this)
{
__this->Pop2();
}
// private void ProxyCallback(Uno.IntPtr ctx) :177
void Context__ProxyCallback_fn(Context* __this, void** ctx)
{
__this->ProxyCallback(*ctx);
}
// internal void Push(bool val) :514
void Context__Push_fn(Context* __this, bool* val)
{
__this->Push(*val);
}
// internal void Push(double val) :516
void Context__Push1_fn(Context* __this, double* val)
{
__this->Push1(*val);
}
// internal void Push(Fuse.Scripting.Callback callback) :365
void Context__Push2_fn(Context* __this, uDelegate* callback)
{
__this->Push2(callback);
}
// internal void Push(int val) :518
void Context__Push3_fn(Context* __this, int* val)
{
__this->Push3(*val);
}
// internal void Push(object value) :282
void Context__Push4_fn(Context* __this, uObject* value)
{
__this->Push4(value);
}
// internal void Push(string str) :520
void Context__Push5_fn(Context* __this, uString* str)
{
__this->Push5(str);
}
// internal int PushArray() :472
void Context__PushArray_fn(Context* __this, int* __retval)
{
*__retval = __this->PushArray();
}
// internal void PushCallbackProxy(int argc) :526
void Context__PushCallbackProxy_fn(Context* __this, int* argc)
{
__this->PushCallbackProxy(*argc);
}
// internal void PushDelegate(Uno.Action<Uno.IntPtr> del) :528
void Context__PushDelegate_fn(Context* __this, uDelegate* del)
{
__this->PushDelegate(del);
}
// internal void PushEnumerator(int index, Fuse.Scripting.Duktape.EnumFlags flags) :522
void Context__PushEnumerator_fn(Context* __this, int* index, int* flags)
{
__this->PushEnumerator(*index, *flags);
}
// internal void PushGlobalObject() :530
void Context__PushGlobalObject_fn(Context* __this)
{
__this->PushGlobalObject();
}
// internal void PushGlobalStash() :532
void Context__PushGlobalStash_fn(Context* __this)
{
__this->PushGlobalStash();
}
// internal int PushHeapPtr(Uno.IntPtr ptr) :474
void Context__PushHeapPtr_fn(Context* __this, void** ptr, int* __retval)
{
*__retval = __this->PushHeapPtr(*ptr);
}
// internal void PushNull() :534
void Context__PushNull_fn(Context* __this)
{
__this->PushNull();
}
// internal int PushObject() :476
void Context__PushObject_fn(Context* __this, int* __retval)
{
*__retval = __this->PushObject();
}
// internal bool PutProperty(int index, int arrIndex) :460
void Context__PutProperty_fn(Context* __this, int* index, int* arrIndex, bool* __retval)
{
*__retval = __this->PutProperty(*index, *arrIndex);
}
// internal bool PutProperty(int index, string key) :462
void Context__PutProperty1_fn(Context* __this, int* index, uString* key, bool* __retval)
{
*__retval = __this->PutProperty1(*index, key);
}
// internal int SafeCall(int argc) :478
void Context__SafeCall_fn(Context* __this, int* argc, int* __retval)
{
*__retval = __this->SafeCall(*argc);
}
// internal int SafeCallMethod(int argc) :480
void Context__SafeCallMethod_fn(Context* __this, int* argc, int* __retval)
{
*__retval = __this->SafeCallMethod(*argc);
}
// internal int SafeCompile(Fuse.Scripting.Duktape.CompileFlag flags) :484
void Context__SafeCompile_fn(Context* __this, uint32_t* flags, int* __retval)
{
*__retval = __this->SafeCompile(*flags);
}
// internal int SafeCompileFunction(string source) :482
void Context__SafeCompileFunction_fn(Context* __this, uString* source, int* __retval)
{
*__retval = __this->SafeCompileFunction(source);
}
// internal int SafeEval(string code) :486
void Context__SafeEval_fn(Context* __this, uString* code, int* __retval)
{
*__retval = __this->SafeEval(code);
}
// internal string SafeToString(int index) :494
void Context__SafeToString_fn(Context* __this, int* index, uString** __retval)
{
*__retval = __this->SafeToString(*index);
}
// internal string Stash(Uno.IntPtr heapPtr) :346
void Context__Stash_fn(Context* __this, void** heapPtr, uString** __retval)
{
*__retval = __this->Stash(*heapPtr);
}
uSStrong<uArray*> Context::_emptyArgs_;
// public Context(Uno.Threading.IDispatcher dispatcher) [instance] :149
void Context::ctor_1(uObject* dispatcher)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", ".ctor(Uno.Threading.IDispatcher)");
ctor_(dispatcher);
_isAlive = true;
_handle = ::g::Fuse::Scripting::Duktape::duktape::create_heap_default();
_callbacks = ((::g::Uno::Collections::Dictionary*)::g::Uno::Collections::Dictionary::New1(::TYPES[3/*Uno.Collections.Dictionary<int, Fuse.Scripting.Callback>*/]));
EvaluateNoResult(::STRINGS[0/*"var __globa...*/]);
InitCallbacks();
}
// internal int AddCallback(Fuse.Scripting.Callback callback) [instance] :158
int Context::AddCallback(uDelegate* callback)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "AddCallback(Fuse.Scripting.Callback)");
::g::Uno::Collections::Dictionary__Add_fn(uPtr(_callbacks), uCRef<int>(_callbacksCount), uDelegate::New(::TYPES[0/*Fuse.Scripting.Callback*/], (void*)::g::Fuse::Scripting::Duktape::CallbackClosure__Proxy_fn, ::g::Fuse::Scripting::Duktape::CallbackClosure::New1(_handle, callback)));
return _callbacksCount++;
}
// internal void CheckError(int errorCode) [instance] :420
void Context::CheckError(int errorCode)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "CheckError(int)");
if (errorCode != 0)
{
::g::Fuse::Scripting::ScriptException* e = GetError(GetTop() - 1);
Pop();
U_THROW(e);
}
}
// internal void Construct(int argc) [instance] :502
void Context::Construct(int argc)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Construct(int)");
::g::Fuse::Scripting::Duktape::duktape::new_(_handle, argc);
}
// internal bool DelProperty(int index, string key) [instance] :434
bool Context::DelProperty2(int index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "DelProperty(int,string)");
return ::g::Fuse::Scripting::Duktape::duktape::del_prop_string(_handle, index, key);
}
// internal bool EnumeratorNext(int index, bool getValue) [instance] :524
bool Context::EnumeratorNext(int index, bool getValue)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "EnumeratorNext(int,bool)");
return ::g::Fuse::Scripting::Duktape::duktape::next(_handle, index, getValue);
}
// public object Evaluate(string code) [instance] :215
uObject* Context::Evaluate2(uString* code)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Evaluate(string)");
CheckError(SafeEval(code));
uObject* result = IndexToObject(GetTop() - 1);
Pop();
return result;
}
// public void EvaluateNoResult(string code) [instance] :209
void Context::EvaluateNoResult(uString* code)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "EvaluateNoResult(string)");
CheckError(SafeEval(code));
Pop();
}
// internal bool GetBool(int index) [instance] :436
bool Context::GetBool(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetBool(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_boolean(_handle, index);
}
// internal Fuse.Scripting.ScriptException GetError(int index) [instance] :372
::g::Fuse::Scripting::ScriptException* Context::GetError(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetError(int)");
uString* name = NULL;
uString* message = NULL;
uString* fileName = NULL;
int lineNumber = -1;
uString* stack = NULL;
if (IsObject(index))
{
GetProperty1(index, ::STRINGS[1/*"name"*/]);
if (IsString(-1))
name = GetString(-1);
Pop();
GetProperty1(index, ::STRINGS[2/*"message"*/]);
if (IsString(-1))
message = GetString(-1);
Pop();
GetProperty1(index, ::STRINGS[3/*"fileName"*/]);
if (IsString(-1))
fileName = GetString(-1);
Pop();
GetProperty1(index, ::STRINGS[4/*"lineNumber"*/]);
lineNumber = GetInt(-1);
Pop();
GetProperty1(index, ::STRINGS[5/*"stack"*/]);
if (IsString(-1))
stack = GetString(-1);
Pop();
}
else
message = SafeToString(index);
return ::g::Fuse::Scripting::ScriptException::New4(name, message, fileName, lineNumber, NULL, stack);
}
// internal Uno.IntPtr GetHeapPtr(int index) [instance] :488
void* Context::GetHeapPtr(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetHeapPtr(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_heapptr(_handle, index);
}
// internal int GetInt(int index) [instance] :468
int Context::GetInt(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetInt(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_int(_handle, index);
}
// internal long GetLength(int index) [instance] :490
int64_t Context::GetLength(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetLength(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_length(_handle, index);
}
// internal double GetNumber(int index) [instance] :466
double Context::GetNumber(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetNumber(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_number(_handle, index);
}
// internal bool GetProperty(int index, int arrIndex) [instance] :438
bool Context::GetProperty(int index, int arrIndex)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetProperty(int,int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_prop_index(_handle, index, arrIndex);
}
// internal bool GetProperty(int index, string key) [instance] :440
bool Context::GetProperty1(int index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetProperty(int,string)");
return ::g::Fuse::Scripting::Duktape::duktape::get_prop_string(_handle, index, key);
}
// internal string GetString(int index) [instance] :492
uString* Context::GetString(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetString(int)");
return ::g::Fuse::Scripting::Duktape::duktape::get_string(_handle, index);
}
// internal int GetTop() [instance] :470
int Context::GetTop()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "GetTop()");
return ::g::Fuse::Scripting::Duktape::duktape::get_top(_handle);
}
// internal bool HasProperty(int index, string key) [instance] :464
bool Context::HasProperty(int index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "HasProperty(int,string)");
return ::g::Fuse::Scripting::Duktape::duktape::has_prop_string(_handle, index, key);
}
// internal object IndexToObject(int index) [instance] :248
uObject* Context::IndexToObject(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IndexToObject(int)");
if (IsFunction(index))
return ::g::Fuse::Scripting::Duktape::JSFunction::New1(this, GetHeapPtr(index));
if (IsArray(index))
return ::g::Fuse::Scripting::Duktape::JSArray::New1(this, GetHeapPtr(index));
if (IsObject(index))
return ::g::Fuse::Scripting::Duktape::JSObject::New1(this, GetHeapPtr(index));
if (IsNumber(index))
return uBox(::TYPES[7/*double*/], GetNumber(index));
if (IsString(index))
return GetString(index);
if (IsBool(index))
return uBox(::TYPES[9/*bool*/], GetBool(index));
if (IsNullOrUndefined(index))
return NULL;
U_THROW(::g::Uno::Exception::New2(::STRINGS[6/*"Could not c...*/]));
}
// private void InitCallbacks() [instance] :166
void Context::InitCallbacks()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "InitCallbacks()");
_proxyCallback = uDelegate::New(::TYPES[5/*Uno.Action<Uno.IntPtr>*/], (void*)Context__ProxyCallback_fn, this);
PushDelegate(_proxyCallback);
PushGlobalObject();
PushCallbackProxy(-1);
PutProperty1(GetTop() - 2, ::STRINGS[7/*"__callback_...*/]);
Pop();
}
// internal bool IsArray(int index) [instance] :442
bool Context::IsArray(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsArray(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_array(_handle, index);
}
// internal bool IsBool(int index) [instance] :444
bool Context::IsBool(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsBool(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_boolean(_handle, index);
}
// internal bool IsFunction(int index) [instance] :446
bool Context::IsFunction(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsFunction(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_function(_handle, index);
}
// internal bool IsNullOrUndefined(int index) [instance] :452
bool Context::IsNullOrUndefined(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsNullOrUndefined(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_null_or_undefined(_handle, index);
}
// internal bool IsNumber(int index) [instance] :454
bool Context::IsNumber(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsNumber(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_number(_handle, index);
}
// internal bool IsObject(int index) [instance] :456
bool Context::IsObject(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsObject(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_object(_handle, index);
}
// internal bool IsString(int index) [instance] :458
bool Context::IsString(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "IsString(int)");
return ::g::Fuse::Scripting::Duktape::duktape::is_string(_handle, index);
}
// public Fuse.Scripting.Duktape.JSArray MakeArray() [instance] :233
::g::Fuse::Scripting::Duktape::JSArray* Context::MakeArray()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "MakeArray()");
int index = PushArray();
void* ptr = GetHeapPtr(index);
::g::Fuse::Scripting::Duktape::JSArray* array = ::g::Fuse::Scripting::Duktape::JSArray::New1(this, ptr);
Pop();
return array;
}
// public Fuse.Scripting.Duktape.JSObject MakeObject() [instance] :224
::g::Fuse::Scripting::Duktape::JSObject* Context::MakeObject()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "MakeObject()");
int index = PushObject();
void* ptr = GetHeapPtr(index);
::g::Fuse::Scripting::Duktape::JSObject* obj = ::g::Fuse::Scripting::Duktape::JSObject::New1(this, ptr);
Pop();
return obj;
}
// internal void Pop() [instance] :508
void Context::Pop()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Pop()");
::g::Fuse::Scripting::Duktape::duktape::pop(_handle);
}
// internal void Pop2() [instance] :510
void Context::Pop2()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Pop2()");
::g::Fuse::Scripting::Duktape::duktape::pop_2(_handle);
}
// private void ProxyCallback(Uno.IntPtr ctx) [instance] :177
void Context::ProxyCallback(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "ProxyCallback(Uno.IntPtr)");
uDelegate* ret2;
int argc = (int)GetLength(0);
uArray* args = Context::_emptyArgs();
if (argc != 0)
{
args = uArray::New(::TYPES[2/*object[]*/], argc);
for (int i = 0; i < argc; i++)
{
GetProperty(0, i);
uPtr(args)->Strong<uObject*>(i) = IndexToObject(GetTop() - 1);
Pop();
}
}
int index = GetInt(GetTop() - 1);
uObject* result = uPtr((::g::Uno::Collections::Dictionary__get_Item_fn(uPtr(_callbacks), uCRef<int>(index), &ret2), ret2))->Invoke(1, args);
Push4(result);
}
// internal void Push(bool val) [instance] :514
void Context::Push(bool val)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(bool)");
::g::Fuse::Scripting::Duktape::duktape::push_boolean(_handle, val);
}
// internal void Push(double val) [instance] :516
void Context::Push1(double val)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(double)");
::g::Fuse::Scripting::Duktape::duktape::push_number(_handle, val);
}
// internal void Push(Fuse.Scripting.Callback callback) [instance] :365
void Context::Push2(uDelegate* callback)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(Fuse.Scripting.Callback)");
int index = AddCallback(callback);
int result = SafeCompileFunction(::g::Uno::String::op_Addition2(::g::Uno::String::op_Addition2(::STRINGS[8/*"function() ...*/], ::g::Uno::Int::ToString(index, ::TYPES[6/*int*/])), ::STRINGS[9/*"); }"*/]));
CheckError(result);
}
// internal void Push(int val) [instance] :518
void Context::Push3(int val)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(int)");
::g::Fuse::Scripting::Duktape::duktape::push_int(_handle, val);
}
// internal void Push(object value) [instance] :282
void Context::Push4(uObject* value)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(object)");
if (uIs(value, ::TYPES[6/*int*/]))
{
Push3(uUnbox<int>(::TYPES[6/*int*/], value));
return;
}
if (uIs(value, ::TYPES[7/*double*/]))
{
Push1(uUnbox<double>(::TYPES[7/*double*/], value));
return;
}
if (uIs(value, ::TYPES[8/*string*/]))
{
Push5(uCast<uString*>(value, ::TYPES[8/*string*/]));
return;
}
if (uIs(value, ::TYPES[9/*bool*/]))
{
Push(uUnbox<bool>(::TYPES[9/*bool*/], value));
return;
}
if (value == NULL)
{
PushNull();
return;
}
uDelegate* c = uAs<uDelegate*>(value, ::TYPES[0/*Fuse.Scripting.Callback*/]);
if (::g::Uno::Delegate::op_Inequality(c, NULL))
{
Push2(c);
return;
}
::g::Fuse::Scripting::Duktape::JSFunction* f = uAs< ::g::Fuse::Scripting::Duktape::JSFunction*>(value, ::TYPES[11/*Fuse.Scripting.Duktape.JSFunction*/]);
if (f != NULL)
{
PushHeapPtr(uPtr(f)->Handle());
return;
}
::g::Fuse::Scripting::Duktape::JSArray* a = uAs< ::g::Fuse::Scripting::Duktape::JSArray*>(value, ::TYPES[12/*Fuse.Scripting.Duktape.JSArray*/]);
if (a != NULL)
{
PushHeapPtr(uPtr(a)->Handle());
return;
}
::g::Fuse::Scripting::Duktape::JSObject* o = uAs< ::g::Fuse::Scripting::Duktape::JSObject*>(value, ::TYPES[13/*Fuse.Scripting.Duktape.JSObject*/]);
if (o != NULL)
{
PushHeapPtr(uPtr(o)->Handle());
return;
}
U_THROW(::g::Uno::Exception::New2(::g::Uno::String::op_Addition1(::STRINGS[10/*"Cannot push...*/], value)));
}
// internal void Push(string str) [instance] :520
void Context::Push5(uString* str)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Push(string)");
::g::Fuse::Scripting::Duktape::duktape::push_string(_handle, str);
}
// internal int PushArray() [instance] :472
int Context::PushArray()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushArray()");
return ::g::Fuse::Scripting::Duktape::duktape::push_array(_handle);
}
// internal void PushCallbackProxy(int argc) [instance] :526
void Context::PushCallbackProxy(int argc)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushCallbackProxy(int)");
::g::Fuse::Scripting::Duktape::duktape::push_callback_proxy(_handle, argc);
}
// internal void PushDelegate(Uno.Action<Uno.IntPtr> del) [instance] :528
void Context::PushDelegate(uDelegate* del)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushDelegate(Uno.Action<Uno.IntPtr>)");
::g::Fuse::Scripting::Duktape::duktape::push_delegate(_handle, del);
}
// internal void PushEnumerator(int index, Fuse.Scripting.Duktape.EnumFlags flags) [instance] :522
void Context::PushEnumerator(int index, int flags)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushEnumerator(int,Fuse.Scripting.Duktape.EnumFlags)");
::g::Fuse::Scripting::Duktape::duktape::enum_(_handle, index, flags);
}
// internal void PushGlobalObject() [instance] :530
void Context::PushGlobalObject()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushGlobalObject()");
::g::Fuse::Scripting::Duktape::duktape::push_global_object(_handle);
}
// internal void PushGlobalStash() [instance] :532
void Context::PushGlobalStash()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushGlobalStash()");
::g::Fuse::Scripting::Duktape::duktape::push_global_stash(_handle);
}
// internal int PushHeapPtr(Uno.IntPtr ptr) [instance] :474
int Context::PushHeapPtr(void* ptr)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushHeapPtr(Uno.IntPtr)");
return ::g::Fuse::Scripting::Duktape::duktape::push_heapptr(_handle, ptr);
}
// internal void PushNull() [instance] :534
void Context::PushNull()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushNull()");
::g::Fuse::Scripting::Duktape::duktape::push_null(_handle);
}
// internal int PushObject() [instance] :476
int Context::PushObject()
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PushObject()");
return ::g::Fuse::Scripting::Duktape::duktape::push_object(_handle);
}
// internal bool PutProperty(int index, int arrIndex) [instance] :460
bool Context::PutProperty(int index, int arrIndex)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PutProperty(int,int)");
return ::g::Fuse::Scripting::Duktape::duktape::put_prop_index(_handle, index, arrIndex);
}
// internal bool PutProperty(int index, string key) [instance] :462
bool Context::PutProperty1(int index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "PutProperty(int,string)");
return ::g::Fuse::Scripting::Duktape::duktape::put_prop_string(_handle, index, key);
}
// internal int SafeCall(int argc) [instance] :478
int Context::SafeCall(int argc)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeCall(int)");
return ::g::Fuse::Scripting::Duktape::duktape::pcall(_handle, argc);
}
// internal int SafeCallMethod(int argc) [instance] :480
int Context::SafeCallMethod(int argc)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeCallMethod(int)");
return ::g::Fuse::Scripting::Duktape::duktape::pcall_method(_handle, argc);
}
// internal int SafeCompile(Fuse.Scripting.Duktape.CompileFlag flags) [instance] :484
int Context::SafeCompile(uint32_t flags)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeCompile(Fuse.Scripting.Duktape.CompileFlag)");
return ::g::Fuse::Scripting::Duktape::duktape::pcompile(_handle, flags);
}
// internal int SafeCompileFunction(string source) [instance] :482
int Context::SafeCompileFunction(uString* source)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeCompileFunction(string)");
return ::g::Fuse::Scripting::Duktape::duktape::pcompile_string(_handle, 2U, source);
}
// internal int SafeEval(string code) [instance] :486
int Context::SafeEval(uString* code)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeEval(string)");
return ::g::Fuse::Scripting::Duktape::duktape::peval_string(_handle, code);
}
// internal string SafeToString(int index) [instance] :494
uString* Context::SafeToString(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "SafeToString(int)");
return ::g::Fuse::Scripting::Duktape::duktape::safe_to_string(_handle, index);
}
// internal string Stash(Uno.IntPtr heapPtr) [instance] :346
uString* Context::Stash(void* heapPtr)
{
uStackFrame __("Fuse.Scripting.Duktape.Context", "Stash(Uno.IntPtr)");
uString* stashKey = ::g::Uno::String::op_Addition2(::STRINGS[11/*"__stashKey"*/], ::g::Uno::Int::ToString(_stashKey, ::TYPES[6/*int*/]));
_stashKey++;
PushGlobalStash();
PushHeapPtr(heapPtr);
PutProperty1(GetTop() - 2, stashKey);
Pop();
return stashKey;
}
// public Context New(Uno.Threading.IDispatcher dispatcher) [static] :149
Context* Context::New1(uObject* dispatcher)
{
Context* obj1 = (Context*)uNew(Context_typeof());
obj1->ctor_1(dispatcher);
return obj1;
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(579)
// ------------------------------------------------------------------------------------------
// internal static extern class duktape :579
// {
uClassType* duktape_typeof()
{
static uSStrong<uClassType*> type;
if (type != NULL) return type;
uTypeOptions options;
options.TypeSize = sizeof(uClassType);
type = uClassType::New("Fuse.Scripting.Duktape.duktape", options);
return type;
}
// internal static Uno.IntPtr create_heap_default() :813
void duktape__create_heap_default_fn(void** __retval)
{
*__retval = duktape::create_heap_default();
}
// internal static bool del_prop_string(Uno.IntPtr ctx, int obj_index, string key) :750
void duktape__del_prop_string_fn(void** ctx, int* obj_index, uString* key, bool* __retval)
{
*__retval = duktape::del_prop_string(*ctx, *obj_index, key);
}
// internal static void destroy_heap(Uno.IntPtr ctx) :855
void duktape__destroy_heap_fn(void** ctx)
{
duktape::destroy_heap(*ctx);
}
// internal static void enum_(Uno.IntPtr ctx, int index, Fuse.Scripting.Duktape.EnumFlags flags) :595
void duktape__enum__fn(void** ctx, int* index, int* flags)
{
duktape::enum_(*ctx, *index, *flags);
}
// internal static void error(Uno.IntPtr ctx, string message) :785
void duktape__error_fn(void** ctx, uString* message)
{
duktape::error(*ctx, message);
}
// internal static bool get_boolean(Uno.IntPtr ctx, int index) :985
void duktape__get_boolean_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::get_boolean(*ctx, *index);
}
// internal static Uno.IntPtr get_heapptr(Uno.IntPtr ctx, int index) :955
void duktape__get_heapptr_fn(void** ctx, int* index, void** __retval)
{
*__retval = duktape::get_heapptr(*ctx, *index);
}
// internal static int get_int(Uno.IntPtr ctx, int index) :1030
void duktape__get_int_fn(void** ctx, int* index, int* __retval)
{
*__retval = duktape::get_int(*ctx, *index);
}
// internal static long get_length(Uno.IntPtr ctx, int index) :970
void duktape__get_length_fn(void** ctx, int* index, int64_t* __retval)
{
*__retval = duktape::get_length(*ctx, *index);
}
// internal static double get_number(Uno.IntPtr ctx, int index) :965
void duktape__get_number_fn(void** ctx, int* index, double* __retval)
{
*__retval = duktape::get_number(*ctx, *index);
}
// internal static bool get_prop_index(Uno.IntPtr ctx, int index, int arr_index) :995
void duktape__get_prop_index_fn(void** ctx, int* index, int* arr_index, bool* __retval)
{
*__retval = duktape::get_prop_index(*ctx, *index, *arr_index);
}
// internal static bool get_prop_string(Uno.IntPtr ctx, int obj_index, string key) :885
void duktape__get_prop_string_fn(void** ctx, int* obj_index, uString* key, bool* __retval)
{
*__retval = duktape::get_prop_string(*ctx, *obj_index, key);
}
// internal static string get_string(Uno.IntPtr ctx, int index) :975
void duktape__get_string_fn(void** ctx, int* index, uString** __retval)
{
*__retval = duktape::get_string(*ctx, *index);
}
// internal static int get_top(Uno.IntPtr ctx) :828
void duktape__get_top_fn(void** ctx, int* __retval)
{
*__retval = duktape::get_top(*ctx);
}
// internal static bool has_prop_string(Uno.IntPtr ctx, int index, string key) :1040
void duktape__has_prop_string_fn(void** ctx, int* index, uString* key, bool* __retval)
{
*__retval = duktape::has_prop_string(*ctx, *index, key);
}
// internal static bool is_array(Uno.IntPtr ctx, int index) :890
void duktape__is_array_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_array(*ctx, *index);
}
// internal static bool is_boolean(Uno.IntPtr ctx, int index) :895
void duktape__is_boolean_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_boolean(*ctx, *index);
}
// internal static bool is_function(Uno.IntPtr ctx, int index) :905
void duktape__is_function_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_function(*ctx, *index);
}
// internal static bool is_null_or_undefined(Uno.IntPtr ctx, int index) :920
void duktape__is_null_or_undefined_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_null_or_undefined(*ctx, *index);
}
// internal static bool is_number(Uno.IntPtr ctx, int index) :925
void duktape__is_number_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_number(*ctx, *index);
}
// internal static bool is_object(Uno.IntPtr ctx, int index) :930
void duktape__is_object_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_object(*ctx, *index);
}
// internal static bool is_string(Uno.IntPtr ctx, int index) :935
void duktape__is_string_fn(void** ctx, int* index, bool* __retval)
{
*__retval = duktape::is_string(*ctx, *index);
}
// internal static void new_(Uno.IntPtr ctx, int nargs) :1045
void duktape__new__fn(void** ctx, int* nargs)
{
duktape::new_(*ctx, *nargs);
}
// internal static bool next(Uno.IntPtr ctx, int index, bool getValue) :600
void duktape__next_fn(void** ctx, int* index, bool* getValue, bool* __retval)
{
*__retval = duktape::next(*ctx, *index, *getValue);
}
// internal static int pcall(Uno.IntPtr ctx, int nargs) :615
void duktape__pcall_fn(void** ctx, int* nargs, int* __retval)
{
*__retval = duktape::pcall(*ctx, *nargs);
}
// internal static int pcall_method(Uno.IntPtr ctx, int nargs) :625
void duktape__pcall_method_fn(void** ctx, int* nargs, int* __retval)
{
*__retval = duktape::pcall_method(*ctx, *nargs);
}
// internal static int pcompile(Uno.IntPtr ctx, uint flags) :699
void duktape__pcompile_fn(void** ctx, uint32_t* flags, int* __retval)
{
*__retval = duktape::pcompile(*ctx, *flags);
}
// internal static int pcompile_string(Uno.IntPtr ctx, uint flags, string src) :694
void duktape__pcompile_string_fn(void** ctx, uint32_t* flags, uString* src, int* __retval)
{
*__retval = duktape::pcompile_string(*ctx, *flags, src);
}
// internal static int peval_string(Uno.IntPtr ctx, string code) :850
void duktape__peval_string_fn(void** ctx, uString* code, int* __retval)
{
*__retval = duktape::peval_string(*ctx, code);
}
// internal static void pop(Uno.IntPtr ctx) :823
void duktape__pop_fn(void** ctx)
{
duktape::pop(*ctx);
}
// internal static void pop_2(Uno.IntPtr ctx) :1005
void duktape__pop_2_fn(void** ctx)
{
duktape::pop_2(*ctx);
}
// internal static int push_array(Uno.IntPtr ctx) :1015
void duktape__push_array_fn(void** ctx, int* __retval)
{
*__retval = duktape::push_array(*ctx);
}
// internal static void push_boolean(Uno.IntPtr ctx, bool value) :990
void duktape__push_boolean_fn(void** ctx, bool* value)
{
duktape::push_boolean(*ctx, *value);
}
// internal static void push_callback_proxy(Uno.IntPtr ctx, int argc) :1025
void duktape__push_callback_proxy_fn(void** ctx, int* argc)
{
duktape::push_callback_proxy(*ctx, *argc);
}
// internal static void push_delegate(Uno.IntPtr ctx, Uno.Action<Uno.IntPtr> del) :1035
void duktape__push_delegate_fn(void** ctx, uDelegate* del)
{
duktape::push_delegate(*ctx, del);
}
// internal static void push_global_object(Uno.IntPtr ctx) :818
void duktape__push_global_object_fn(void** ctx)
{
duktape::push_global_object(*ctx);
}
// internal static void push_global_stash(Uno.IntPtr ctx) :1020
void duktape__push_global_stash_fn(void** ctx)
{
duktape::push_global_stash(*ctx);
}
// internal static int push_heapptr(Uno.IntPtr ctx, Uno.IntPtr ptr) :950
void duktape__push_heapptr_fn(void** ctx, void** ptr, int* __retval)
{
*__retval = duktape::push_heapptr(*ctx, *ptr);
}
// internal static void push_int(Uno.IntPtr ctx, int val) :875
void duktape__push_int_fn(void** ctx, int* val)
{
duktape::push_int(*ctx, *val);
}
// internal static void push_null(Uno.IntPtr ctx) :960
void duktape__push_null_fn(void** ctx)
{
duktape::push_null(*ctx);
}
// internal static void push_number(Uno.IntPtr ctx, double val) :839
void duktape__push_number_fn(void** ctx, double* val)
{
duktape::push_number(*ctx, *val);
}
// internal static int push_object(Uno.IntPtr ctx) :860
void duktape__push_object_fn(void** ctx, int* __retval)
{
*__retval = duktape::push_object(*ctx);
}
// internal static void push_string(Uno.IntPtr ctx, string str) :880
void duktape__push_string_fn(void** ctx, uString* str)
{
duktape::push_string(*ctx, str);
}
// internal static bool put_prop_index(Uno.IntPtr ctx, int index, int arr_index) :1000
void duktape__put_prop_index_fn(void** ctx, int* index, int* arr_index, bool* __retval)
{
*__retval = duktape::put_prop_index(*ctx, *index, *arr_index);
}
// internal static bool put_prop_string(Uno.IntPtr ctx, int obj_index, string key) :844
void duktape__put_prop_string_fn(void** ctx, int* obj_index, uString* key, bool* __retval)
{
*__retval = duktape::put_prop_string(*ctx, *obj_index, key);
}
// internal static string safe_to_string(Uno.IntPtr ctx, int index) :980
void duktape__safe_to_string_fn(void** ctx, int* index, uString** __retval)
{
*__retval = duktape::safe_to_string(*ctx, *index);
}
// internal static Uno.IntPtr create_heap_default() [static] :813
void* duktape::create_heap_default()
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "create_heap_default()");
return duk_create_heap_default();
}
// internal static bool del_prop_string(Uno.IntPtr ctx, int obj_index, string key) [static] :750
bool duktape::del_prop_string(void* ctx, int obj_index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "del_prop_string(Uno.IntPtr,int,string)");
return duk_del_prop_string(ctx, obj_index, uStringToCStr(key));
}
// internal static void destroy_heap(Uno.IntPtr ctx) [static] :855
void duktape::destroy_heap(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "destroy_heap(Uno.IntPtr)");
duk_destroy_heap(ctx);
}
// internal static void enum_(Uno.IntPtr ctx, int index, Fuse.Scripting.Duktape.EnumFlags flags) [static] :595
void duktape::enum_(void* ctx, int index, int flags)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "enum_(Uno.IntPtr,int,Fuse.Scripting.Duktape.EnumFlags)");
duk_enum(ctx, index, flags);
}
// internal static void error(Uno.IntPtr ctx, string message) [static] :785
void duktape::error(void* ctx, uString* message)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "error(Uno.IntPtr,string)");
duk_error(ctx, DUK_ERR_ERROR, uStringToCStr(message));
}
// internal static bool get_boolean(Uno.IntPtr ctx, int index) [static] :985
bool duktape::get_boolean(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_boolean(Uno.IntPtr,int)");
return duk_get_boolean(ctx, index);
}
// internal static Uno.IntPtr get_heapptr(Uno.IntPtr ctx, int index) [static] :955
void* duktape::get_heapptr(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_heapptr(Uno.IntPtr,int)");
return duk_get_heapptr(ctx, index);
}
// internal static int get_int(Uno.IntPtr ctx, int index) [static] :1030
int duktape::get_int(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_int(Uno.IntPtr,int)");
return duk_get_int(ctx, index);
}
// internal static long get_length(Uno.IntPtr ctx, int index) [static] :970
int64_t duktape::get_length(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_length(Uno.IntPtr,int)");
return duk_get_length(ctx, index);
}
// internal static double get_number(Uno.IntPtr ctx, int index) [static] :965
double duktape::get_number(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_number(Uno.IntPtr,int)");
return duk_get_number(ctx, index);
}
// internal static bool get_prop_index(Uno.IntPtr ctx, int index, int arr_index) [static] :995
bool duktape::get_prop_index(void* ctx, int index, int arr_index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_prop_index(Uno.IntPtr,int,int)");
return duk_get_prop_index(ctx, index, arr_index);
}
// internal static bool get_prop_string(Uno.IntPtr ctx, int obj_index, string key) [static] :885
bool duktape::get_prop_string(void* ctx, int obj_index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_prop_string(Uno.IntPtr,int,string)");
return duk_get_prop_string(ctx, obj_index, uStringToCStr(key));
}
// internal static string get_string(Uno.IntPtr ctx, int index) [static] :975
uString* duktape::get_string(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_string(Uno.IntPtr,int)");
return uNewStringUtf8(duk_get_string(ctx, index));
}
// internal static int get_top(Uno.IntPtr ctx) [static] :828
int duktape::get_top(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "get_top(Uno.IntPtr)");
return duk_get_top(ctx);
}
// internal static bool has_prop_string(Uno.IntPtr ctx, int index, string key) [static] :1040
bool duktape::has_prop_string(void* ctx, int index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "has_prop_string(Uno.IntPtr,int,string)");
return duk_has_prop_string(ctx, index, uStringToCStr(key));
}
// internal static bool is_array(Uno.IntPtr ctx, int index) [static] :890
bool duktape::is_array(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_array(Uno.IntPtr,int)");
return duk_is_array(ctx, index);
}
// internal static bool is_boolean(Uno.IntPtr ctx, int index) [static] :895
bool duktape::is_boolean(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_boolean(Uno.IntPtr,int)");
return duk_is_boolean(ctx, index);
}
// internal static bool is_function(Uno.IntPtr ctx, int index) [static] :905
bool duktape::is_function(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_function(Uno.IntPtr,int)");
return duk_is_function(ctx, index);
}
// internal static bool is_null_or_undefined(Uno.IntPtr ctx, int index) [static] :920
bool duktape::is_null_or_undefined(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_null_or_undefined(Uno.IntPtr,int)");
return duk_is_null_or_undefined(ctx, index);
}
// internal static bool is_number(Uno.IntPtr ctx, int index) [static] :925
bool duktape::is_number(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_number(Uno.IntPtr,int)");
return duk_is_number(ctx, index);
}
// internal static bool is_object(Uno.IntPtr ctx, int index) [static] :930
bool duktape::is_object(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_object(Uno.IntPtr,int)");
return duk_is_object(ctx, index);
}
// internal static bool is_string(Uno.IntPtr ctx, int index) [static] :935
bool duktape::is_string(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "is_string(Uno.IntPtr,int)");
return duk_is_string(ctx, index);
}
// internal static void new_(Uno.IntPtr ctx, int nargs) [static] :1045
void duktape::new_(void* ctx, int nargs)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "new_(Uno.IntPtr,int)");
duk_new(ctx, nargs);
}
// internal static bool next(Uno.IntPtr ctx, int index, bool getValue) [static] :600
bool duktape::next(void* ctx, int index, bool getValue)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "next(Uno.IntPtr,int,bool)");
return duk_next(ctx, index, getValue);
}
// internal static int pcall(Uno.IntPtr ctx, int nargs) [static] :615
int duktape::pcall(void* ctx, int nargs)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pcall(Uno.IntPtr,int)");
return duk_pcall(ctx, nargs);
}
// internal static int pcall_method(Uno.IntPtr ctx, int nargs) [static] :625
int duktape::pcall_method(void* ctx, int nargs)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pcall_method(Uno.IntPtr,int)");
return duk_pcall_method(ctx, nargs);
}
// internal static int pcompile(Uno.IntPtr ctx, uint flags) [static] :699
int duktape::pcompile(void* ctx, uint32_t flags)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pcompile(Uno.IntPtr,uint)");
return duk_pcompile(ctx, flags);
}
// internal static int pcompile_string(Uno.IntPtr ctx, uint flags, string src) [static] :694
int duktape::pcompile_string(void* ctx, uint32_t flags, uString* src)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pcompile_string(Uno.IntPtr,uint,string)");
return duk_pcompile_string(ctx, flags, uStringToCStr(src));
}
// internal static int peval_string(Uno.IntPtr ctx, string code) [static] :850
int duktape::peval_string(void* ctx, uString* code)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "peval_string(Uno.IntPtr,string)");
return duk_peval_string(ctx, uStringToCStr(code));
}
// internal static void pop(Uno.IntPtr ctx) [static] :823
void duktape::pop(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pop(Uno.IntPtr)");
duk_pop(ctx);
}
// internal static void pop_2(Uno.IntPtr ctx) [static] :1005
void duktape::pop_2(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "pop_2(Uno.IntPtr)");
duk_pop_2(ctx);
}
// internal static int push_array(Uno.IntPtr ctx) [static] :1015
int duktape::push_array(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_array(Uno.IntPtr)");
return duk_push_array(ctx);
}
// internal static void push_boolean(Uno.IntPtr ctx, bool value) [static] :990
void duktape::push_boolean(void* ctx, bool value)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_boolean(Uno.IntPtr,bool)");
duk_push_boolean(ctx, value);
}
// internal static void push_callback_proxy(Uno.IntPtr ctx, int argc) [static] :1025
void duktape::push_callback_proxy(void* ctx, int argc)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_callback_proxy(Uno.IntPtr,int)");
duk_push_callback_proxy(ctx, argc);
}
// internal static void push_delegate(Uno.IntPtr ctx, Uno.Action<Uno.IntPtr> del) [static] :1035
void duktape::push_delegate(void* ctx, uDelegate* del)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_delegate(Uno.IntPtr,Uno.Action<Uno.IntPtr>)");
duk_push_delegate(ctx, del);
}
// internal static void push_global_object(Uno.IntPtr ctx) [static] :818
void duktape::push_global_object(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_global_object(Uno.IntPtr)");
duk_push_global_object(ctx);
}
// internal static void push_global_stash(Uno.IntPtr ctx) [static] :1020
void duktape::push_global_stash(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_global_stash(Uno.IntPtr)");
duk_push_global_stash(ctx);
}
// internal static int push_heapptr(Uno.IntPtr ctx, Uno.IntPtr ptr) [static] :950
int duktape::push_heapptr(void* ctx, void* ptr)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_heapptr(Uno.IntPtr,Uno.IntPtr)");
return duk_push_heapptr(ctx, ptr);
}
// internal static void push_int(Uno.IntPtr ctx, int val) [static] :875
void duktape::push_int(void* ctx, int val)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_int(Uno.IntPtr,int)");
duk_push_int(ctx, val);
}
// internal static void push_null(Uno.IntPtr ctx) [static] :960
void duktape::push_null(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_null(Uno.IntPtr)");
duk_push_null(ctx);
}
// internal static void push_number(Uno.IntPtr ctx, double val) [static] :839
void duktape::push_number(void* ctx, double val)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_number(Uno.IntPtr,double)");
duk_push_number(ctx, val);
}
// internal static int push_object(Uno.IntPtr ctx) [static] :860
int duktape::push_object(void* ctx)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_object(Uno.IntPtr)");
return duk_push_object(ctx);
}
// internal static void push_string(Uno.IntPtr ctx, string str) [static] :880
void duktape::push_string(void* ctx, uString* str)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "push_string(Uno.IntPtr,string)");
duk_push_string(ctx, uStringToCStr(str));
}
// internal static bool put_prop_index(Uno.IntPtr ctx, int index, int arr_index) [static] :1000
bool duktape::put_prop_index(void* ctx, int index, int arr_index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "put_prop_index(Uno.IntPtr,int,int)");
return duk_put_prop_index(ctx, index, arr_index);
}
// internal static bool put_prop_string(Uno.IntPtr ctx, int obj_index, string key) [static] :844
bool duktape::put_prop_string(void* ctx, int obj_index, uString* key)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "put_prop_string(Uno.IntPtr,int,string)");
return duk_put_prop_string(ctx, obj_index, uStringToCStr(key));
}
// internal static string safe_to_string(Uno.IntPtr ctx, int index) [static] :980
uString* duktape::safe_to_string(void* ctx, int index)
{
uStackFrame __("Fuse.Scripting.Duktape.duktape", "safe_to_string(Uno.IntPtr,int)");
return uNewStringUtf8(duk_safe_to_string(ctx, index));
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(566)
// ------------------------------------------------------------------------------------------
// internal extern enum EnumFlags :566
uEnumType* EnumFlags_typeof()
{
static uSStrong<uEnumType*> type;
if (type != NULL) return type;
type = uEnumType::New("Fuse.Scripting.Duktape.EnumFlags", ::g::Uno::Int_typeof(), 6);
type->SetLiterals(
"DUK_ENUM_INCLUDE_NONENUMERABLE", 1LL,
"DUK_ENUM_INCLUDE_INTERNAL", 2LL,
"DUK_ENUM_OWN_PROPERTIES_ONLY", 4LL,
"DUK_ENUM_ARRAY_INDICES_ONLY", 8LL,
"DUK_ENUM_SORT_ARRAY_INDICES", 16LL,
"DUK_ENUM_NO_PROXY_BEHAVIOR", 32LL);
return type;
}
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(10)
// -----------------------------------------------------------------------------------------
// public sealed extern class JSArray :10
// {
::g::Fuse::Scripting::Array_type* JSArray_typeof()
{
static uSStrong< ::g::Fuse::Scripting::Array_type*> type;
if (type != NULL) return type;
uTypeOptions options;
options.FieldCount = 3;
options.ObjectSize = sizeof(JSArray);
options.TypeSize = sizeof(::g::Fuse::Scripting::Array_type);
type = (::g::Fuse::Scripting::Array_type*)uClassType::New("Fuse.Scripting.Duktape.JSArray", options);
type->SetBase(::g::Fuse::Scripting::Array_typeof());
type->fp_Equals2 = (void(*)(::g::Fuse::Scripting::Array*, ::g::Fuse::Scripting::Array*, bool*))JSArray__Equals2_fn;
type->fp_GetHashCode = (void(*)(uObject*, int*))JSArray__GetHashCode_fn;
type->fp_get_Item = (void(*)(::g::Fuse::Scripting::Array*, int*, uObject**))JSArray__get_Item_fn;
type->fp_set_Item = (void(*)(::g::Fuse::Scripting::Array*, int*, uObject*))JSArray__set_Item_fn;
type->fp_get_Length = (void(*)(::g::Fuse::Scripting::Array*, int*))JSArray__get_Length_fn;
::TYPES[16] = ::g::Uno::IntPtr_typeof();
::TYPES[6] = ::g::Uno::Int_typeof();
type->SetFields(0,
::g::Fuse::Scripting::Duktape::Context_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSArray, _ctx), 0,
::g::Uno::IntPtr_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSArray, _handle), 0,
::g::Uno::String_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSArray, _stashKey), 0);
type->Reflection.SetFunctions(1,
new uFunction(".ctor", NULL, (void*)JSArray__New1_fn, 0, true, JSArray_typeof(), 2, ::g::Fuse::Scripting::Duktape::Context_typeof(), ::g::Uno::IntPtr_typeof()));
return type;
}
// public JSArray(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :18
void JSArray__ctor_1_fn(JSArray* __this, ::g::Fuse::Scripting::Duktape::Context* ctx, void** handle)
{
__this->ctor_1(ctx, *handle);
}
// public override sealed bool Equals(Fuse.Scripting.Array a) :48
void JSArray__Equals2_fn(JSArray* __this, ::g::Fuse::Scripting::Array* a, bool* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "Equals(Fuse.Scripting.Array)");
JSArray* ja = uAs<JSArray*>(a, JSArray_typeof());
if (ja == NULL)
return *__retval = false, void();
return *__retval = (__this->_ctx == uPtr(ja)->_ctx) && ::g::Uno::IntPtr::op_Equality(__this->_handle, uPtr(ja)->_handle), void();
}
// private object Get(int index) :61
void JSArray__Get_fn(JSArray* __this, int* index, uObject** __retval)
{
*__retval = __this->Get(*index);
}
// public override sealed int GetHashCode() :56
void JSArray__GetHashCode_fn(JSArray* __this, int* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "GetHashCode()");
void* ind1;
return *__retval = ::g::Uno::IntPtr::GetHashCode((ind1 = __this->_handle, ind1), ::TYPES[16/*Uno.IntPtr*/]), void();
}
// internal Uno.IntPtr get_Handle() :12
void JSArray__get_Handle_fn(JSArray* __this, void** __retval)
{
*__retval = __this->Handle();
}
// public override sealed object get_Item(int index) :44
void JSArray__get_Item_fn(JSArray* __this, int* index, uObject** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "get_Item(int)");
int index_ = *index;
return *__retval = __this->Get(index_), void();
}
// public override sealed void set_Item(int index, object value) :45
void JSArray__set_Item_fn(JSArray* __this, int* index, uObject* value)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "set_Item(int,object)");
int index_ = *index;
__this->Set(index_, value);
}
// public override sealed int get_Length() :33
void JSArray__get_Length_fn(JSArray* __this, int* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "get_Length()");
int index = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
int64_t l = uPtr(__this->_ctx)->GetLength(index);
uPtr(__this->_ctx)->Pop();
return *__retval = (int)l, void();
}
// public JSArray New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :18
void JSArray__New1_fn(::g::Fuse::Scripting::Duktape::Context* ctx, void** handle, JSArray** __retval)
{
*__retval = JSArray::New1(ctx, *handle);
}
// private void Set(int index, object value) :70
void JSArray__Set_fn(JSArray* __this, int* index, uObject* value)
{
__this->Set(*index, value);
}
// public JSArray(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [instance] :18
void JSArray::ctor_1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", ".ctor(Fuse.Scripting.Duktape.Context,Uno.IntPtr)");
ctor_();
_ctx = ctx;
_handle = handle;
_stashKey = uPtr(_ctx)->Stash(_handle);
}
// private object Get(int index) [instance] :61
uObject* JSArray::Get(int index)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "Get(int)");
int objIndex = uPtr(_ctx)->PushHeapPtr(_handle);
uPtr(_ctx)->GetProperty(objIndex, index);
uObject* res = uPtr(_ctx)->IndexToObject(uPtr(_ctx)->GetTop() - 1);
uPtr(_ctx)->Pop2();
return res;
}
// internal Uno.IntPtr get_Handle() [instance] :12
void* JSArray::Handle()
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "get_Handle()");
return _handle;
}
// private void Set(int index, object value) [instance] :70
void JSArray::Set(int index, uObject* value)
{
uStackFrame __("Fuse.Scripting.Duktape.JSArray", "Set(int,object)");
int objIndex = uPtr(_ctx)->PushHeapPtr(_handle);
uPtr(_ctx)->Push4(value);
uPtr(_ctx)->PutProperty(objIndex, index);
uPtr(_ctx)->Pop();
}
// public JSArray New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [static] :18
JSArray* JSArray::New1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
JSArray* obj2 = (JSArray*)uNew(JSArray_typeof());
obj2->ctor_1(ctx, handle);
return obj2;
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(1063)
// -------------------------------------------------------------------------------------------
// public sealed extern class JSFunction :1063
// {
::g::Fuse::Scripting::Function_type* JSFunction_typeof()
{
static uSStrong< ::g::Fuse::Scripting::Function_type*> type;
if (type != NULL) return type;
uTypeOptions options;
options.FieldCount = 3;
options.ObjectSize = sizeof(JSFunction);
options.TypeSize = sizeof(::g::Fuse::Scripting::Function_type);
type = (::g::Fuse::Scripting::Function_type*)uClassType::New("Fuse.Scripting.Duktape.JSFunction", options);
type->SetBase(::g::Fuse::Scripting::Function_typeof());
type->fp_Call = (void(*)(::g::Fuse::Scripting::Function*, uArray*, uObject**))JSFunction__Call_fn;
type->fp_Construct = (void(*)(::g::Fuse::Scripting::Function*, uArray*, ::g::Fuse::Scripting::Object**))JSFunction__Construct_fn;
type->fp_Equals2 = (void(*)(::g::Fuse::Scripting::Function*, ::g::Fuse::Scripting::Function*, bool*))JSFunction__Equals2_fn;
type->fp_GetHashCode = (void(*)(uObject*, int*))JSFunction__GetHashCode_fn;
::TYPES[2] = uObject_typeof()->Array();
::TYPES[15] = ::g::Fuse::Scripting::Object_typeof();
::TYPES[16] = ::g::Uno::IntPtr_typeof();
type->SetFields(0,
::g::Fuse::Scripting::Duktape::Context_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSFunction, _ctx), 0,
::g::Uno::IntPtr_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSFunction, _handle), 0,
::g::Uno::String_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSFunction, _stashKey), 0);
return type;
}
// internal JSFunction(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :1071
void JSFunction__ctor_1_fn(JSFunction* __this, ::g::Fuse::Scripting::Duktape::Context* ctx, void** handle)
{
__this->ctor_1(ctx, *handle);
}
// public override sealed object Call(object[] args) :1111
void JSFunction__Call_fn(JSFunction* __this, uArray* args, uObject** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", "Call(object[])");
uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
int argc = uPtr(args)->Length();
for (int i = 0; i < argc; i++)
uPtr(__this->_ctx)->Push4(uPtr(args)->Strong<uObject*>(i));
int result = uPtr(__this->_ctx)->SafeCall(argc);
uPtr(__this->_ctx)->CheckError(result);
int index = uPtr(__this->_ctx)->GetTop() - 1;
uObject* returnValue = uPtr(__this->_ctx)->IndexToObject(index);
uPtr(__this->_ctx)->Pop();
return *__retval = returnValue, void();
}
// public override sealed Fuse.Scripting.Object Construct(object[] args) :1097
void JSFunction__Construct_fn(JSFunction* __this, uArray* args, ::g::Fuse::Scripting::Object** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", "Construct(object[])");
uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
int argc = uPtr(args)->Length();
for (int i = 0; i < argc; i++)
uPtr(__this->_ctx)->Push4(uPtr(args)->Strong<uObject*>(i));
uPtr(__this->_ctx)->Construct(argc);
uObject* returnValue = uPtr(__this->_ctx)->IndexToObject(uPtr(__this->_ctx)->GetTop() - 1);
uPtr(__this->_ctx)->Pop();
return *__retval = uCast< ::g::Fuse::Scripting::Object*>(returnValue, ::TYPES[15/*Fuse.Scripting.Object*/]), void();
}
// public override sealed bool Equals(Fuse.Scripting.Function a) :1084
void JSFunction__Equals2_fn(JSFunction* __this, ::g::Fuse::Scripting::Function* a, bool* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", "Equals(Fuse.Scripting.Function)");
JSFunction* f = uAs<JSFunction*>(a, JSFunction_typeof());
if (f == NULL)
return *__retval = false, void();
return *__retval = (__this->_ctx == uPtr(f)->_ctx) && ::g::Uno::IntPtr::op_Equality(__this->_handle, uPtr(f)->_handle), void();
}
// public override sealed int GetHashCode() :1092
void JSFunction__GetHashCode_fn(JSFunction* __this, int* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", "GetHashCode()");
void* ind1;
return *__retval = ::g::Uno::IntPtr::GetHashCode((ind1 = __this->_handle, ind1), ::TYPES[16/*Uno.IntPtr*/]), void();
}
// internal Uno.IntPtr get_Handle() :1065
void JSFunction__get_Handle_fn(JSFunction* __this, void** __retval)
{
*__retval = __this->Handle();
}
// internal JSFunction New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :1071
void JSFunction__New1_fn(::g::Fuse::Scripting::Duktape::Context* ctx, void** handle, JSFunction** __retval)
{
*__retval = JSFunction::New1(ctx, *handle);
}
// internal JSFunction(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [instance] :1071
void JSFunction::ctor_1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", ".ctor(Fuse.Scripting.Duktape.Context,Uno.IntPtr)");
ctor_();
_ctx = ctx;
_handle = handle;
_stashKey = uPtr(_ctx)->Stash(_handle);
}
// internal Uno.IntPtr get_Handle() [instance] :1065
void* JSFunction::Handle()
{
uStackFrame __("Fuse.Scripting.Duktape.JSFunction", "get_Handle()");
return _handle;
}
// internal JSFunction New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [static] :1071
JSFunction* JSFunction::New1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
JSFunction* obj2 = (JSFunction*)uNew(JSFunction_typeof());
obj2->ctor_1(ctx, handle);
return obj2;
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(1195)
// -------------------------------------------------------------------------------------------
// public sealed extern class JSObject :1195
// {
::g::Fuse::Scripting::Object_type* JSObject_typeof()
{
static uSStrong< ::g::Fuse::Scripting::Object_type*> type;
if (type != NULL) return type;
uTypeOptions options;
options.FieldCount = 3;
options.ObjectSize = sizeof(JSObject);
options.TypeSize = sizeof(::g::Fuse::Scripting::Object_type);
type = (::g::Fuse::Scripting::Object_type*)uClassType::New("Fuse.Scripting.Duktape.JSObject", options);
type->SetBase(::g::Fuse::Scripting::Object_typeof());
type->fp_CallMethod = (void(*)(::g::Fuse::Scripting::Object*, uString*, uArray*, uObject**))JSObject__CallMethod_fn;
type->fp_ContainsKey = (void(*)(::g::Fuse::Scripting::Object*, uString*, bool*))JSObject__ContainsKey_fn;
type->fp_Equals2 = (void(*)(::g::Fuse::Scripting::Object*, ::g::Fuse::Scripting::Object*, bool*))JSObject__Equals2_fn;
type->fp_GetHashCode = (void(*)(uObject*, int*))JSObject__GetHashCode_fn;
type->fp_InstanceOf = (void(*)(::g::Fuse::Scripting::Object*, ::g::Fuse::Scripting::Function*, bool*))JSObject__InstanceOf_fn;
type->fp_get_Item = (void(*)(::g::Fuse::Scripting::Object*, uString*, uObject**))JSObject__get_Item_fn;
type->fp_set_Item = (void(*)(::g::Fuse::Scripting::Object*, uString*, uObject*))JSObject__set_Item_fn;
type->fp_get_Keys = (void(*)(::g::Fuse::Scripting::Object*, uArray**))JSObject__get_Keys_fn;
::STRINGS[12] = uString::Const("__a__");
::STRINGS[13] = uString::Const("__b__");
::STRINGS[14] = uString::Const("__a__ instanceof __b__");
::TYPES[2] = uObject_typeof()->Array();
::TYPES[16] = ::g::Uno::IntPtr_typeof();
::TYPES[11] = ::g::Fuse::Scripting::Duktape::JSFunction_typeof();
::TYPES[9] = ::g::Uno::Bool_typeof();
::TYPES[17] = ::g::Uno::Collections::List_typeof()->MakeType(::g::Uno::String_typeof());
type->SetFields(0,
::g::Fuse::Scripting::Duktape::Context_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSObject, _ctx), 0,
::g::Uno::IntPtr_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSObject, _handle), 0,
::g::Uno::String_typeof(), offsetof(::g::Fuse::Scripting::Duktape::JSObject, _stashKey), 0);
type->Reflection.SetFunctions(1,
new uFunction(".ctor", NULL, (void*)JSObject__New1_fn, 0, true, JSObject_typeof(), 2, ::g::Fuse::Scripting::Duktape::Context_typeof(), ::g::Uno::IntPtr_typeof()));
return type;
}
// public JSObject(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :1203
void JSObject__ctor_1_fn(JSObject* __this, ::g::Fuse::Scripting::Duktape::Context* ctx, void** handle)
{
__this->ctor_1(ctx, *handle);
}
// public override sealed object CallMethod(string name, object[] args) :1297
void JSObject__CallMethod_fn(JSObject* __this, uString* name, uArray* args, uObject** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "CallMethod(string,object[])");
int index = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
uPtr(__this->_ctx)->GetProperty1(index, name);
uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
for (int i = 0; i < uPtr(args)->Length(); i++)
uPtr(__this->_ctx)->Push4(uPtr(args)->Strong<uObject*>(i));
int result = uPtr(__this->_ctx)->SafeCallMethod(args->Length());
uPtr(__this->_ctx)->CheckError(result);
uObject* returnVal = uPtr(__this->_ctx)->IndexToObject(uPtr(__this->_ctx)->GetTop() - 1);
uPtr(__this->_ctx)->Pop2();
return *__retval = returnVal, void();
}
// public override sealed bool ContainsKey(string key) :1316
void JSObject__ContainsKey_fn(JSObject* __this, uString* key, bool* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "ContainsKey(string)");
int index = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
bool result = uPtr(__this->_ctx)->HasProperty(index, key);
uPtr(__this->_ctx)->Pop();
return *__retval = result, void();
}
// public override sealed bool Equals(Fuse.Scripting.Object obj) :1216
void JSObject__Equals2_fn(JSObject* __this, ::g::Fuse::Scripting::Object* obj, bool* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "Equals(Fuse.Scripting.Object)");
JSObject* o = uAs<JSObject*>(obj, JSObject_typeof());
if (o == NULL)
return *__retval = false, void();
return *__retval = (__this->_ctx == uPtr(o)->_ctx) && ::g::Uno::IntPtr::op_Equality(__this->_handle, uPtr(o)->_handle), void();
}
// public override sealed int GetHashCode() :1223
void JSObject__GetHashCode_fn(JSObject* __this, int* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "GetHashCode()");
void* ind1;
return *__retval = ::g::Uno::IntPtr::GetHashCode((ind1 = __this->_handle, ind1), ::TYPES[16/*Uno.IntPtr*/]), void();
}
// internal Uno.IntPtr get_Handle() :1197
void JSObject__get_Handle_fn(JSObject* __this, void** __retval)
{
*__retval = __this->Handle();
}
// public override sealed bool InstanceOf(Fuse.Scripting.Function type) :1228
void JSObject__InstanceOf_fn(JSObject* __this, ::g::Fuse::Scripting::Function* type, bool* __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "InstanceOf(Fuse.Scripting.Function)");
::g::Fuse::Scripting::Duktape::JSFunction* func = uAs< ::g::Fuse::Scripting::Duktape::JSFunction*>(type, ::TYPES[11/*Fuse.Scripting.Duktape.JSFunction*/]);
if (func != NULL)
{
uPtr(__this->_ctx)->PushGlobalObject();
int index = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
uPtr(__this->_ctx)->PutProperty1(uPtr(__this->_ctx)->GetTop() - 2, ::STRINGS[12/*"__a__"*/]);
int k = uPtr(__this->_ctx)->PushHeapPtr(uPtr(func)->Handle());
uPtr(__this->_ctx)->PutProperty1(uPtr(__this->_ctx)->GetTop() - 2, ::STRINGS[13/*"__b__"*/]);
bool res = uUnbox<bool>(::TYPES[9/*bool*/], uPtr(__this->_ctx)->Evaluate2(::STRINGS[14/*"__a__ insta...*/]));
uPtr(__this->_ctx)->DelProperty2(uPtr(__this->_ctx)->GetTop() - 1, ::STRINGS[12/*"__a__"*/]);
uPtr(__this->_ctx)->DelProperty2(uPtr(__this->_ctx)->GetTop() - 1, ::STRINGS[13/*"__b__"*/]);
uPtr(__this->_ctx)->Pop();
return *__retval = res, void();
}
return *__retval = false, void();
}
// public override sealed object get_Item(string key) :1257
void JSObject__get_Item_fn(JSObject* __this, uString* key, uObject** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "get_Item(string)");
int objIndex = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
uPtr(__this->_ctx)->GetProperty1(objIndex, key);
uObject* res = uPtr(__this->_ctx)->IndexToObject(uPtr(__this->_ctx)->GetTop() - 1);
uPtr(__this->_ctx)->Pop2();
return *__retval = res, void();
}
// public override sealed void set_Item(string key, object value) :1266
void JSObject__set_Item_fn(JSObject* __this, uString* key, uObject* value)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "set_Item(string,object)");
int objIndex = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
uPtr(__this->_ctx)->Push4(value);
uPtr(__this->_ctx)->PutProperty1(objIndex, key);
uPtr(__this->_ctx)->Pop();
}
// public override sealed string[] get_Keys() :1277
void JSObject__get_Keys_fn(JSObject* __this, uArray** __retval)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "get_Keys()");
::g::Uno::Collections::List* keys = (::g::Uno::Collections::List*)::g::Uno::Collections::List::New1(::TYPES[17/*Uno.Collections.List<string>*/]);
int index = uPtr(__this->_ctx)->PushHeapPtr(__this->_handle);
uPtr(__this->_ctx)->PushEnumerator(index, 4);
while (uPtr(__this->_ctx)->EnumeratorNext(uPtr(__this->_ctx)->GetTop() - 1, false))
{
uString* key = uPtr(__this->_ctx)->GetString(uPtr(__this->_ctx)->GetTop() - 1);
::g::Uno::Collections::List__Add_fn(uPtr(keys), key);
uPtr(__this->_ctx)->Pop();
}
uPtr(__this->_ctx)->Pop2();
return *__retval = (uArray*)keys->ToArray(), void();
}
// public JSObject New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) :1203
void JSObject__New1_fn(::g::Fuse::Scripting::Duktape::Context* ctx, void** handle, JSObject** __retval)
{
*__retval = JSObject::New1(ctx, *handle);
}
// public JSObject(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [instance] :1203
void JSObject::ctor_1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", ".ctor(Fuse.Scripting.Duktape.Context,Uno.IntPtr)");
ctor_();
_ctx = ctx;
_handle = handle;
_stashKey = uPtr(_ctx)->Stash(_handle);
}
// internal Uno.IntPtr get_Handle() [instance] :1197
void* JSObject::Handle()
{
uStackFrame __("Fuse.Scripting.Duktape.JSObject", "get_Handle()");
return _handle;
}
// public JSObject New(Fuse.Scripting.Duktape.Context ctx, Uno.IntPtr handle) [static] :1203
JSObject* JSObject::New1(::g::Fuse::Scripting::Duktape::Context* ctx, void* handle)
{
JSObject* obj2 = (JSObject*)uNew(JSObject_typeof());
obj2->ctor_1(ctx, handle);
return obj2;
}
// }
// ../../../../../../../usr/local/share/uno/Packages/Fuse.Scripting.Duktape/0.24.6/$.uno(1142)
// -------------------------------------------------------------------------------------------
// public static extern class JSONObject :1142
// {
uClassType* JSONObject_typeof()
{
static uSStrong<uClassType*> type;
if (type != NULL) return type;
uTypeOptions options;
options.TypeSize = sizeof(uClassType);
type = uClassType::New("Fuse.Scripting.Duktape.JSONObject", options);
::STRINGS[15] = uString::Const("Could not convert value to JSON: ");
type->Reflection.SetFunctions(1,
new uFunction("Stringify", NULL, (void*)JSONObject__Stringify_fn, 0, true, ::g::Uno::String_typeof(), 1, uObject_typeof()));
return type;
}
// public static string Stringify(object value) :1144
void JSONObject__Stringify_fn(uObject* value, uString** __retval)
{
*__retval = JSONObject::Stringify(value);
}
// public static string Stringify(object value) [static] :1144
uString* JSONObject::Stringify(uObject* value)
{
uStackFrame __("Fuse.Scripting.Duktape.JSONObject", "Stringify(object)");
U_THROW(::g::Uno::Exception::New2(::g::Uno::String::op_Addition1(::STRINGS[15/*"Could not c...*/], value)));
}
// }
}}}} // ::g::Fuse::Scripting::Duktape
|
//==-- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions --*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that expands pseudo instructions into target
// instructions to allow proper scheduling and other late optimizations. This
// pass should be run after register allocation but before the post-regalloc
// scheduling pass.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "AArch64InstrInfo.h"
#include "AArch64Subtarget.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
#define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
namespace {
class AArch64ExpandPseudo : public MachineFunctionPass {
public:
static char ID;
AArch64ExpandPseudo() : MachineFunctionPass(ID) {
initializeAArch64ExpandPseudoPass(*PassRegistry::getPassRegistry());
}
const AArch64InstrInfo *TII;
bool runOnMachineFunction(MachineFunction &Fn) override;
StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
private:
bool expandMBB(MachineBasicBlock &MBB);
bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);
bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
unsigned ExtendImm, unsigned ZeroReg,
MachineBasicBlock::iterator &NextMBBI);
bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
};
char AArch64ExpandPseudo::ID = 0;
}
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
AARCH64_EXPAND_PSEUDO_NAME, false, false)
/// \brief Transfer implicit operands on the pseudo instruction to the
/// instructions created from the expansion.
static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
MachineInstrBuilder &DefMI) {
const MCInstrDesc &Desc = OldMI.getDesc();
for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
++i) {
const MachineOperand &MO = OldMI.getOperand(i);
assert(MO.isReg() && MO.getReg());
if (MO.isUse())
UseMI.addOperand(MO);
else
DefMI.addOperand(MO);
}
}
/// \brief Helper function which extracts the specified 16-bit chunk from a
/// 64-bit value.
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
assert(ChunkIdx < 4 && "Out of range chunk index specified!");
return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
}
/// \brief Helper function which replicates a 16-bit chunk within a 64-bit
/// value. Indices correspond to element numbers in a v4i16.
static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!");
const unsigned ShiftAmt = ToIdx * 16;
// Replicate the source chunk to the destination position.
const uint64_t Chunk = getChunk(Imm, FromIdx) << ShiftAmt;
// Clear the destination chunk.
Imm &= ~(0xFFFFLL << ShiftAmt);
// Insert the replicated chunk.
return Imm | Chunk;
}
/// \brief Helper function which tries to materialize a 64-bit value with an
/// ORR + MOVK instruction sequence.
static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const AArch64InstrInfo *TII, unsigned ChunkIdx) {
assert(ChunkIdx < 4 && "Out of range chunk index specified!");
const unsigned ShiftAmt = ChunkIdx * 16;
uint64_t Encoding;
if (AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding)) {
// Create the ORR-immediate instruction.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.addOperand(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
// Create the MOVK instruction.
const unsigned Imm16 = getChunk(UImm, ChunkIdx);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
return false;
}
/// \brief Check whether the given 16-bit chunk replicated to full 64-bit width
/// can be materialized with an ORR instruction.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
}
/// \brief Check for identical 16-bit chunks within the constant and if so
/// materialize them with a single ORR instruction. The remaining one or two
/// 16-bit chunks will be materialized with MOVK instructions.
///
/// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
/// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
/// an ORR instruction.
///
static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const AArch64InstrInfo *TII) {
typedef DenseMap<uint64_t, unsigned> CountMap;
CountMap Counts;
// Scan the constant and count how often every chunk occurs.
for (unsigned Idx = 0; Idx < 4; ++Idx)
++Counts[getChunk(UImm, Idx)];
// Traverse the chunks to find one which occurs more than once.
for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
Chunk != End; ++Chunk) {
const uint64_t ChunkVal = Chunk->first;
const unsigned Count = Chunk->second;
uint64_t Encoding = 0;
// We are looking for chunks which have two or three instances and can be
// materialized with an ORR instruction.
if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
continue;
const bool CountThree = Count == 3;
// Create the ORR-immediate instruction.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.addOperand(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
unsigned ShiftAmt = 0;
uint64_t Imm16 = 0;
// Find the first chunk not materialized with the ORR instruction.
for (; ShiftAmt < 64; ShiftAmt += 16) {
Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
if (Imm16 != ChunkVal)
break;
}
// Create the first MOVK instruction.
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg,
RegState::Define | getDeadRegState(DstIsDead && CountThree))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
// In case we have three instances the whole constant is now materialized
// and we can exit.
if (CountThree) {
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
// Find the remaining chunk which needs to be materialized.
for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
if (Imm16 != ChunkVal)
break;
}
// Create the second MOVK instruction.
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
transferImpOps(MI, MIB, MIB2);
MI.eraseFromParent();
return true;
}
return false;
}
/// \brief Check whether this chunk matches the pattern '1...0...'. This pattern
/// starts a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isStartChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == UINT64_MAX)
return false;
return isMask_64(~Chunk);
}
/// \brief Check whether this chunk matches the pattern '0...1...' This pattern
/// ends a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isEndChunk(uint64_t Chunk) {
if (Chunk == 0 || Chunk == UINT64_MAX)
return false;
return isMask_64(Chunk);
}
/// \brief Clear or set all bits in the chunk at the given index.
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
const uint64_t Mask = 0xFFFF;
if (Clear)
// Clear chunk in the immediate.
Imm &= ~(Mask << (Idx * 16));
else
// Set all bits in the immediate for the particular chunk.
Imm |= Mask << (Idx * 16);
return Imm;
}
/// \brief Check whether the constant contains a sequence of contiguous ones,
/// which might be interrupted by one or two chunks. If so, materialize the
/// sequence of contiguous ones with an ORR instruction.
/// Materialize the chunks which are either interrupting the sequence or outside
/// of the sequence with a MOVK instruction.
///
/// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
/// which ends the sequence (0...1...). Then we are looking for constants which
/// contain at least one S and E chunk.
/// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
///
/// We are also looking for constants like |S|A|B|E| where the contiguous
/// sequence of ones wraps around the MSB into the LSB.
///
static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const AArch64InstrInfo *TII) {
const int NotSet = -1;
const uint64_t Mask = 0xFFFF;
int StartIdx = NotSet;
int EndIdx = NotSet;
// Try to find the chunks which start/end a contiguous sequence of ones.
for (int Idx = 0; Idx < 4; ++Idx) {
int64_t Chunk = getChunk(UImm, Idx);
// Sign extend the 16-bit chunk to 64-bit.
Chunk = (Chunk << 48) >> 48;
if (isStartChunk(Chunk))
StartIdx = Idx;
else if (isEndChunk(Chunk))
EndIdx = Idx;
}
// Early exit in case we can't find a start/end chunk.
if (StartIdx == NotSet || EndIdx == NotSet)
return false;
// Outside of the contiguous sequence of ones everything needs to be zero.
uint64_t Outside = 0;
// Chunks between the start and end chunk need to have all their bits set.
uint64_t Inside = Mask;
// If our contiguous sequence of ones wraps around from the MSB into the LSB,
// just swap indices and pretend we are materializing a contiguous sequence
// of zeros surrounded by a contiguous sequence of ones.
if (StartIdx > EndIdx) {
std::swap(StartIdx, EndIdx);
std::swap(Outside, Inside);
}
uint64_t OrrImm = UImm;
int FirstMovkIdx = NotSet;
int SecondMovkIdx = NotSet;
// Find out which chunks we need to patch up to obtain a contiguous sequence
// of ones.
for (int Idx = 0; Idx < 4; ++Idx) {
const uint64_t Chunk = getChunk(UImm, Idx);
// Check whether we are looking at a chunk which is not part of the
// contiguous sequence of ones.
if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
OrrImm = updateImm(OrrImm, Idx, Outside == 0);
// Remember the index we need to patch.
if (FirstMovkIdx == NotSet)
FirstMovkIdx = Idx;
else
SecondMovkIdx = Idx;
// Check whether we are looking a chunk which is part of the contiguous
// sequence of ones.
} else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
// Remember the index we need to patch.
if (FirstMovkIdx == NotSet)
FirstMovkIdx = Idx;
else
SecondMovkIdx = Idx;
}
}
assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");
// Create the ORR-immediate instruction.
uint64_t Encoding = 0;
AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
.addOperand(MI.getOperand(0))
.addReg(AArch64::XZR)
.addImm(Encoding);
const unsigned DstReg = MI.getOperand(0).getReg();
const bool DstIsDead = MI.getOperand(0).isDead();
const bool SingleMovk = SecondMovkIdx == NotSet;
// Create the first MOVK instruction.
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg,
RegState::Define | getDeadRegState(DstIsDead && SingleMovk))
.addReg(DstReg)
.addImm(getChunk(UImm, FirstMovkIdx))
.addImm(
AArch64_AM::getShifterImm(AArch64_AM::LSL, FirstMovkIdx * 16));
// Early exit in case we only need to emit a single MOVK instruction.
if (SingleMovk) {
transferImpOps(MI, MIB, MIB1);
MI.eraseFromParent();
return true;
}
// Create the second MOVK instruction.
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addImm(getChunk(UImm, SecondMovkIdx))
.addImm(
AArch64_AM::getShifterImm(AArch64_AM::LSL, SecondMovkIdx * 16));
transferImpOps(MI, MIB, MIB2);
MI.eraseFromParent();
return true;
}
/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
/// real move-immediate instructions to synthesize the immediate.
bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned BitSize) {
MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg();
uint64_t Imm = MI.getOperand(1).getImm();
const unsigned Mask = 0xFFFF;
if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
// Useless def, and we don't want to risk creating an invalid ORR (which
// would really write to sp).
MI.eraseFromParent();
return true;
}
// Try a MOVI instruction (aka ORR-immediate with the zero register).
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
.addOperand(MI.getOperand(0))
.addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
.addImm(Encoding);
transferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
}
// Scan the immediate and count the number of 16-bit chunks which are either
// all ones or all zeros.
unsigned OneChunks = 0;
unsigned ZeroChunks = 0;
for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
const unsigned Chunk = (Imm >> Shift) & Mask;
if (Chunk == Mask)
OneChunks++;
else if (Chunk == 0)
ZeroChunks++;
}
// Since we can't materialize the constant with a single ORR instruction,
// let's see whether we can materialize 3/4 of the constant with an ORR
// instruction and use an additional MOVK instruction to materialize the
// remaining 1/4.
//
// We are looking for constants with a pattern like: |A|X|B|X| or |X|A|X|B|.
//
// E.g. assuming |A|X|A|X| is a pattern which can be materialized with ORR,
// we would create the following instruction sequence:
//
// ORR x0, xzr, |A|X|A|X|
// MOVK x0, |B|, LSL #16
//
// Only look at 64-bit constants which can't be materialized with a single
// instruction e.g. which have less than either three all zero or all one
// chunks.
//
// Ignore 32-bit constants here, they always can be materialized with a
// MOVZ/MOVN + MOVK pair. Since the 32-bit constant can't be materialized
// with a single ORR, the best sequence we can achieve is a ORR + MOVK pair.
// Thus we fall back to the default code below which in the best case creates
// a single MOVZ/MOVN instruction (in case one chunk is all zero or all one).
//
if (BitSize == 64 && OneChunks < 3 && ZeroChunks < 3) {
// If we interpret the 64-bit constant as a v4i16, are elements 0 and 2
// identical?
if (getChunk(UImm, 0) == getChunk(UImm, 2)) {
// See if we can come up with a constant which can be materialized with
// ORR-immediate by replicating element 3 into element 1.
uint64_t OrrImm = replicateChunk(UImm, 3, 1);
if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 1))
return true;
// See if we can come up with a constant which can be materialized with
// ORR-immediate by replicating element 1 into element 3.
OrrImm = replicateChunk(UImm, 1, 3);
if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 3))
return true;
// If we interpret the 64-bit constant as a v4i16, are elements 1 and 3
// identical?
} else if (getChunk(UImm, 1) == getChunk(UImm, 3)) {
// See if we can come up with a constant which can be materialized with
// ORR-immediate by replicating element 2 into element 0.
uint64_t OrrImm = replicateChunk(UImm, 2, 0);
if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 0))
return true;
// See if we can come up with a constant which can be materialized with
// ORR-immediate by replicating element 1 into element 3.
OrrImm = replicateChunk(UImm, 0, 2);
if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 2))
return true;
}
}
// Check for identical 16-bit chunks within the constant and if so materialize
// them with a single ORR instruction. The remaining one or two 16-bit chunks
// will be materialized with MOVK instructions.
if (BitSize == 64 && tryToreplicateChunks(UImm, MI, MBB, MBBI, TII))
return true;
// Check whether the constant contains a sequence of contiguous ones, which
// might be interrupted by one or two chunks. If so, materialize the sequence
// of contiguous ones with an ORR instruction. Materialize the chunks which
// are either interrupting the sequence or outside of the sequence with a
// MOVK instruction.
if (BitSize == 64 && trySequenceOfOnes(UImm, MI, MBB, MBBI, TII))
return true;
// Use a MOVZ or MOVN instruction to set the high bits, followed by one or
// more MOVK instructions to insert additional 16-bit portions into the
// lower bits.
bool isNeg = false;
// Use MOVN to materialize the high bits if we have more all one chunks
// than all zero chunks.
if (OneChunks > ZeroChunks) {
isNeg = true;
Imm = ~Imm;
}
unsigned FirstOpc;
if (BitSize == 32) {
Imm &= (1LL << 32) - 1;
FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
} else {
FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
}
unsigned Shift = 0; // LSL amount for high bits with MOVZ/MOVN
unsigned LastShift = 0; // LSL amount for last MOVK
if (Imm != 0) {
unsigned LZ = countLeadingZeros(Imm);
unsigned TZ = countTrailingZeros(Imm);
Shift = ((63 - LZ) / 16) * 16;
LastShift = (TZ / 16) * 16;
}
unsigned Imm16 = (Imm >> Shift) & Mask;
bool DstIsDead = MI.getOperand(0).isDead();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
.addReg(DstReg, RegState::Define |
getDeadRegState(DstIsDead && Shift == LastShift))
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
// If a MOVN was used for the high bits of a negative value, flip the rest
// of the bits back for use with MOVK.
if (isNeg)
Imm = ~Imm;
if (Shift == LastShift) {
transferImpOps(MI, MIB1, MIB1);
MI.eraseFromParent();
return true;
}
MachineInstrBuilder MIB2;
unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
while (Shift != LastShift) {
Shift -= 16;
Imm16 = (Imm >> Shift) & Mask;
if (Imm16 == (isNeg ? Mask : 0))
continue; // This 16-bit portion is already set correctly.
MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
.addReg(DstReg,
RegState::Define |
getDeadRegState(DstIsDead && Shift == LastShift))
.addReg(DstReg)
.addImm(Imm16)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
}
transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
static void addPostLoopLiveIns(MachineBasicBlock *MBB, LivePhysRegs &LiveRegs) {
for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
MBB->addLiveIn(*I);
}
bool AArch64ExpandPseudo::expandCMP_SWAP(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
MachineOperand &Dest = MI.getOperand(0);
unsigned StatusReg = MI.getOperand(1).getReg();
MachineOperand &Addr = MI.getOperand(2);
MachineOperand &Desired = MI.getOperand(3);
MachineOperand &New = MI.getOperand(4);
LivePhysRegs LiveRegs(&TII->getRegisterInfo());
LiveRegs.addLiveOuts(MBB);
for (auto I = std::prev(MBB.end()); I != MBBI; --I)
LiveRegs.stepBackward(*I);
MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
MF->insert(++MBB.getIterator(), LoadCmpBB);
MF->insert(++LoadCmpBB->getIterator(), StoreBB);
MF->insert(++StoreBB->getIterator(), DoneBB);
// .Lloadcmp:
// ldaxr xDest, [xAddr]
// cmp xDest, xDesired
// b.ne .Ldone
LoadCmpBB->addLiveIn(Addr.getReg());
LoadCmpBB->addLiveIn(Dest.getReg());
LoadCmpBB->addLiveIn(Desired.getReg());
addPostLoopLiveIns(LoadCmpBB, LiveRegs);
BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
.addReg(Addr.getReg());
BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
.addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
.addOperand(Desired)
.addImm(ExtendImm);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
.addImm(AArch64CC::NE)
.addMBB(DoneBB)
.addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
LoadCmpBB->addSuccessor(DoneBB);
LoadCmpBB->addSuccessor(StoreBB);
// .Lstore:
// stlxr wStatus, xNew, [xAddr]
// cbnz wStatus, .Lloadcmp
StoreBB->addLiveIn(Addr.getReg());
StoreBB->addLiveIn(New.getReg());
addPostLoopLiveIns(StoreBB, LiveRegs);
BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
.addOperand(New)
.addOperand(Addr);
BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
.addReg(StatusReg, RegState::Kill)
.addMBB(LoadCmpBB);
StoreBB->addSuccessor(LoadCmpBB);
StoreBB->addSuccessor(DoneBB);
DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
DoneBB->transferSuccessors(&MBB);
addPostLoopLiveIns(DoneBB, LiveRegs);
MBB.addSuccessor(LoadCmpBB);
NextMBBI = MBB.end();
MI.eraseFromParent();
return true;
}
bool AArch64ExpandPseudo::expandCMP_SWAP_128(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
MachineOperand &DestLo = MI.getOperand(0);
MachineOperand &DestHi = MI.getOperand(1);
unsigned StatusReg = MI.getOperand(2).getReg();
MachineOperand &Addr = MI.getOperand(3);
MachineOperand &DesiredLo = MI.getOperand(4);
MachineOperand &DesiredHi = MI.getOperand(5);
MachineOperand &NewLo = MI.getOperand(6);
MachineOperand &NewHi = MI.getOperand(7);
LivePhysRegs LiveRegs(&TII->getRegisterInfo());
LiveRegs.addLiveOuts(MBB);
for (auto I = std::prev(MBB.end()); I != MBBI; --I)
LiveRegs.stepBackward(*I);
MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
MF->insert(++MBB.getIterator(), LoadCmpBB);
MF->insert(++LoadCmpBB->getIterator(), StoreBB);
MF->insert(++StoreBB->getIterator(), DoneBB);
// .Lloadcmp:
// ldaxp xDestLo, xDestHi, [xAddr]
// cmp xDestLo, xDesiredLo
// sbcs xDestHi, xDesiredHi
// b.ne .Ldone
LoadCmpBB->addLiveIn(Addr.getReg());
LoadCmpBB->addLiveIn(DestLo.getReg());
LoadCmpBB->addLiveIn(DestHi.getReg());
LoadCmpBB->addLiveIn(DesiredLo.getReg());
LoadCmpBB->addLiveIn(DesiredHi.getReg());
addPostLoopLiveIns(LoadCmpBB, LiveRegs);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
.addReg(DestLo.getReg(), RegState::Define)
.addReg(DestHi.getReg(), RegState::Define)
.addReg(Addr.getReg());
BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
.addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
.addOperand(DesiredLo)
.addImm(0);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
.addUse(AArch64::WZR)
.addUse(AArch64::WZR)
.addImm(AArch64CC::EQ);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
.addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
.addOperand(DesiredHi)
.addImm(0);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
.addUse(StatusReg, RegState::Kill)
.addUse(StatusReg, RegState::Kill)
.addImm(AArch64CC::EQ);
BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
.addUse(StatusReg, RegState::Kill)
.addMBB(DoneBB);
LoadCmpBB->addSuccessor(DoneBB);
LoadCmpBB->addSuccessor(StoreBB);
// .Lstore:
// stlxp wStatus, xNewLo, xNewHi, [xAddr]
// cbnz wStatus, .Lloadcmp
StoreBB->addLiveIn(Addr.getReg());
StoreBB->addLiveIn(NewLo.getReg());
StoreBB->addLiveIn(NewHi.getReg());
addPostLoopLiveIns(StoreBB, LiveRegs);
BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
.addOperand(NewLo)
.addOperand(NewHi)
.addOperand(Addr);
BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
.addReg(StatusReg, RegState::Kill)
.addMBB(LoadCmpBB);
StoreBB->addSuccessor(LoadCmpBB);
StoreBB->addSuccessor(DoneBB);
DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
DoneBB->transferSuccessors(&MBB);
addPostLoopLiveIns(DoneBB, LiveRegs);
MBB.addSuccessor(LoadCmpBB);
NextMBBI = MBB.end();
MI.eraseFromParent();
return true;
}
/// \brief If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
default:
break;
case AArch64::ADDWrr:
case AArch64::SUBWrr:
case AArch64::ADDXrr:
case AArch64::SUBXrr:
case AArch64::ADDSWrr:
case AArch64::SUBSWrr:
case AArch64::ADDSXrr:
case AArch64::SUBSXrr:
case AArch64::ANDWrr:
case AArch64::ANDXrr:
case AArch64::BICWrr:
case AArch64::BICXrr:
case AArch64::ANDSWrr:
case AArch64::ANDSXrr:
case AArch64::BICSWrr:
case AArch64::BICSXrr:
case AArch64::EONWrr:
case AArch64::EONXrr:
case AArch64::EORWrr:
case AArch64::EORXrr:
case AArch64::ORNWrr:
case AArch64::ORNXrr:
case AArch64::ORRWrr:
case AArch64::ORRXrr: {
unsigned Opcode;
switch (MI.getOpcode()) {
default:
return false;
case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
}
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
MI.getOperand(0).getReg())
.addOperand(MI.getOperand(1))
.addOperand(MI.getOperand(2))
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
transferImpOps(MI, MIB1, MIB1);
MI.eraseFromParent();
return true;
}
case AArch64::LOADgot: {
// Expand into ADRP + LDR.
unsigned DstReg = MI.getOperand(0).getReg();
const MachineOperand &MO1 = MI.getOperand(1);
unsigned Flags = MO1.getTargetFlags();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
.addOperand(MI.getOperand(0))
.addReg(DstReg);
if (MO1.isGlobal()) {
MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
MIB2.addGlobalAddress(MO1.getGlobal(), 0,
Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
} else if (MO1.isSymbol()) {
MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
MIB2.addExternalSymbol(MO1.getSymbolName(),
Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
} else {
assert(MO1.isCPI() &&
"Only expect globals, externalsymbols, or constant pools");
MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
Flags | AArch64II::MO_PAGE);
MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
Flags | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
}
transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
case AArch64::MOVaddr:
case AArch64::MOVaddrJT:
case AArch64::MOVaddrCP:
case AArch64::MOVaddrBA:
case AArch64::MOVaddrTLS:
case AArch64::MOVaddrEXT: {
// Expand into ADRP + ADD.
unsigned DstReg = MI.getOperand(0).getReg();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
.addOperand(MI.getOperand(1));
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
.addOperand(MI.getOperand(0))
.addReg(DstReg)
.addOperand(MI.getOperand(2))
.addImm(0);
transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
case AArch64::MOVi32imm:
return expandMOVImm(MBB, MBBI, 32);
case AArch64::MOVi64imm:
return expandMOVImm(MBB, MBBI, 64);
case AArch64::RET_ReallyLR: {
// Hiding the LR use with RET_ReallyLR may lead to extra kills in the
// function and missing live-ins. We are fine in practice because callee
// saved register handling ensures the register value is restored before
// RET, but we need the undef flag here to appease the MachineVerifier
// liveness checks.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
.addReg(AArch64::LR, RegState::Undef);
transferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
}
case AArch64::CMP_SWAP_8:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
AArch64::SUBSWrx,
AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_16:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
AArch64::SUBSWrx,
AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_32:
return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
AArch64::SUBSWrs,
AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
AArch64::WZR, NextMBBI);
case AArch64::CMP_SWAP_64:
return expandCMP_SWAP(MBB, MBBI,
AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
AArch64::XZR, NextMBBI);
case AArch64::CMP_SWAP_128:
return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
}
return false;
}
/// \brief Iterate over the instructions in basic block MBB and expand any
/// pseudo instructions. Return true if anything was modified.
bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
Modified |= expandMI(MBB, MBBI, NMBBI);
MBBI = NMBBI;
}
return Modified;
}
bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
bool Modified = false;
for (auto &MBB : MF)
Modified |= expandMBB(MBB);
return Modified;
}
/// \brief Returns an instance of the pseudo instruction expansion pass.
FunctionPass *llvm::createAArch64ExpandPseudoPass() {
return new AArch64ExpandPseudo();
}
|
/**!
* \file strongly_connected_components_test.cpp
* \brief Tests: Algorithm for strongly connected components
*/
#include <algorithm>
#include <gtest/gtest.h>
#include "algolib/graphs/algorithms/strongly_connected_components.hpp"
namespace algr = algolib::graphs;
using graph_t = algr::directed_simple_graph<>;
using graph_v = graph_t::vertex_type;
TEST(StronglyConnectedComponentsTest, findSCC_WhenManyComponents_ThenAllListed)
{
// given
graph_t graph({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
graph.add_edge_between(graph[0], graph[4]);
graph.add_edge_between(graph[0], graph[5]);
graph.add_edge_between(graph[1], graph[0]);
graph.add_edge_between(graph[2], graph[3]);
graph.add_edge_between(graph[3], graph[1]);
graph.add_edge_between(graph[4], graph[1]);
graph.add_edge_between(graph[4], graph[3]);
graph.add_edge_between(graph[6], graph[5]);
graph.add_edge_between(graph[6], graph[9]);
graph.add_edge_between(graph[7], graph[4]);
graph.add_edge_between(graph[7], graph[6]);
graph.add_edge_between(graph[8], graph[3]);
graph.add_edge_between(graph[8], graph[7]);
graph.add_edge_between(graph[9], graph[8]);
std::vector<std::unordered_set<graph_v>> expected = {{graph[0], graph[1], graph[3], graph[4]},
{graph[2]},
{graph[5]},
{graph[6], graph[7], graph[8], graph[9]}};
// when
std::vector<std::unordered_set<graph_v>> result = algr::find_scc(graph);
// then
ASSERT_EQ(4, result.size());
for(auto && scc : expected)
EXPECT_TRUE(std::find(result.begin(), result.end(), scc) != result.end());
}
TEST(StronglyConnectedComponentsTest, findSCC_WhenSingeleComponent_ThenAllVertices)
{
// given
graph_t graph({0, 1, 2, 3, 4, 5, 6});
graph.add_edge_between(graph[0], graph[1]);
graph.add_edge_between(graph[1], graph[2]);
graph.add_edge_between(graph[2], graph[3]);
graph.add_edge_between(graph[3], graph[4]);
graph.add_edge_between(graph[4], graph[5]);
graph.add_edge_between(graph[5], graph[6]);
graph.add_edge_between(graph[6], graph[0]);
std::vector<std::unordered_set<graph_v>> expected = {
{graph[0], graph[1], graph[2], graph[3], graph[4], graph[5], graph[6]}};
// when
std::vector<std::unordered_set<graph_v>> result = algr::find_scc(graph);
// then
ASSERT_EQ(1, result.size());
EXPECT_EQ(expected, result);
}
TEST(StronglyConnectedComponentsTest, findSCC_WhenEmptyGraph_ThenEachVertexIsComponent)
{
// given
graph_t graph({0, 1, 2, 3});
std::vector<std::unordered_set<graph_v>> expected = {
{graph[0]}, {graph[1]}, {graph[2]}, {graph[3]}};
// when
std::vector<std::unordered_set<graph_v>> result = algr::find_scc(graph);
// then
ASSERT_EQ(4, result.size());
for(auto && scc : expected)
EXPECT_TRUE(std::find(result.begin(), result.end(), scc) != result.end());
}
|
#include <bits/stdc++.h>
#define REP(i, n) for(int i = 0; i < n; i++)
#define FOR(i, a, b) for (int i = a; i < b; i++)
#define CLR(t, value) memset(t, value, sizeof(t))
#define ALL(v) v.begin(), v.end()
#define SZ(v) ((int)(v).size())
#define TEST(x) cerr << "test " << #x << " " << x << endl;
#define sc(x) scanf("%d", &x)
using namespace std;
typedef long long Long;
typedef vector<int> vInt;
typedef pair<int,int> Pair;
const int N = 1e5 + 2;
const int INF = 1e9 + 7;
const int MOD = 1e9 + 7;
const double EPS = 1e-8;
/************************************/
int main() {
int n;
uint p;
char ord[10];
cin >> n;
long long ans = 1;
vector <pair <bool, uint> > A;
REP(i, n) {
scanf("%s%d", ord, &p);
A.push_back({ord[1]=='D', p});
}
reverse(ALL(A));
cout << ans << endl;
return 0;
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "kudu/tserver/tablet_server.h"
#include <stdlib.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <initializer_list>
#include <map>
#include <memory>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <unordered_set>
#include <utility>
#include <vector>
#include <boost/bind.hpp>
#include <boost/optional/optional.hpp>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <google/protobuf/util/message_differencer.h>
#include <gtest/gtest.h>
#include "kudu/clock/clock.h"
#include "kudu/clock/hybrid_clock.h"
#include "kudu/common/common.pb.h"
#include "kudu/common/encoded_key.h"
#include "kudu/common/partial_row.h"
#include "kudu/common/partition.h"
#include "kudu/common/row_operations.h"
#include "kudu/common/schema.h"
#include "kudu/common/timestamp.h"
#include "kudu/common/wire_protocol-test-util.h"
#include "kudu/common/wire_protocol.h"
#include "kudu/common/wire_protocol.pb.h"
#include "kudu/consensus/log-test-base.h"
#include "kudu/consensus/log.h"
#include "kudu/consensus/metadata.pb.h"
#include "kudu/consensus/raft_consensus.h"
#include "kudu/fs/block_id.h"
#include "kudu/fs/block_manager.h"
#include "kudu/fs/data_dirs.h"
#include "kudu/fs/fs-test-util.h"
#include "kudu/fs/fs.pb.h"
#include "kudu/fs/fs_manager.h"
#include "kudu/gutil/basictypes.h"
#include "kudu/gutil/callback.h"
#include "kudu/gutil/casts.h"
#include "kudu/gutil/gscoped_ptr.h"
#include "kudu/gutil/map-util.h"
#include "kudu/gutil/port.h"
#include "kudu/gutil/ref_counted.h"
#include "kudu/gutil/stringprintf.h"
#include "kudu/gutil/strings/escaping.h"
#include "kudu/gutil/strings/join.h"
#include "kudu/gutil/strings/substitute.h"
#include "kudu/rpc/messenger.h"
#include "kudu/rpc/rpc_controller.h"
#include "kudu/rpc/rpc_header.pb.h"
#include "kudu/rpc/user_credentials.h"
#include "kudu/server/rpc_server.h"
#include "kudu/server/server_base.pb.h"
#include "kudu/server/server_base.proxy.h"
#include "kudu/tablet/local_tablet_writer.h"
#include "kudu/tablet/metadata.pb.h"
#include "kudu/tablet/tablet.h"
#include "kudu/tablet/tablet_metadata.h"
#include "kudu/tablet/tablet_replica.h"
#include "kudu/tserver/heartbeater.h"
#include "kudu/tserver/mini_tablet_server.h"
#include "kudu/tserver/scanners.h"
#include "kudu/tserver/tablet_server-test-base.h"
#include "kudu/tserver/tablet_server_options.h"
#include "kudu/tserver/tablet_server_test_util.h"
#include "kudu/tserver/ts_tablet_manager.h"
#include "kudu/tserver/tserver.pb.h"
#include "kudu/tserver/tserver_admin.pb.h"
#include "kudu/tserver/tserver_admin.proxy.h"
#include "kudu/tserver/tserver_service.pb.h"
#include "kudu/tserver/tserver_service.proxy.h"
#include "kudu/util/countdown_latch.h"
#include "kudu/util/crc.h"
#include "kudu/util/curl_util.h"
#include "kudu/util/debug/sanitizer_scopes.h"
#include "kudu/util/env.h"
#include "kudu/util/faststring.h"
#include "kudu/util/hdr_histogram.h"
#include "kudu/util/jsonwriter.h"
#include "kudu/util/logging_test_util.h"
#include "kudu/util/metrics.h"
#include "kudu/util/monotime.h"
#include "kudu/util/net/sockaddr.h"
#include "kudu/util/path_util.h"
#include "kudu/util/pb_util.h"
#include "kudu/util/scoped_cleanup.h"
#include "kudu/util/slice.h"
#include "kudu/util/status.h"
#include "kudu/util/stopwatch.h"
#include "kudu/util/test_macros.h"
#include "kudu/util/test_util.h"
#include "kudu/util/thread.h"
#include "kudu/util/zlib.h"
using google::protobuf::util::MessageDifferencer;
using kudu::clock::Clock;
using kudu::clock::HybridClock;
using kudu::consensus::ConsensusStatePB;
using kudu::fs::BlockManager;
using kudu::fs::CreateCorruptBlock;
using kudu::fs::DataDirManager;
using kudu::pb_util::SecureDebugString;
using kudu::pb_util::SecureShortDebugString;
using kudu::rpc::Messenger;
using kudu::rpc::MessengerBuilder;
using kudu::rpc::RpcController;
using kudu::tablet::LocalTabletWriter;
using kudu::tablet::RowSetDataPB;
using kudu::tablet::Tablet;
using kudu::tablet::TabletReplica;
using kudu::tablet::TabletStatePB;
using kudu::tablet::TabletSuperBlockPB;
using std::map;
using std::pair;
using std::set;
using std::shared_ptr;
using std::string;
using std::thread;
using std::unique_ptr;
using std::unordered_set;
using std::vector;
using strings::Substitute;
DEFINE_int32(single_threaded_insert_latency_bench_warmup_rows, 100,
"Number of rows to insert in the warmup phase of the single threaded"
" tablet server insert latency micro-benchmark");
DEFINE_int32(single_threaded_insert_latency_bench_insert_rows, 1000,
"Number of rows to insert in the testing phase of the single threaded"
" tablet server insert latency micro-benchmark");
DEFINE_int32(delete_tablet_bench_num_flushes, 200,
"Number of disk row sets to flush in the delete tablet benchmark");
DECLARE_bool(crash_on_eio);
DECLARE_bool(enable_flush_deltamemstores);
DECLARE_bool(enable_flush_memrowset);
DECLARE_bool(enable_maintenance_manager);
DECLARE_bool(enable_rowset_compaction);
DECLARE_bool(fail_dns_resolution);
DECLARE_bool(rowset_metadata_store_keys);
DECLARE_double(cfile_inject_corruption);
DECLARE_double(env_inject_eio);
DECLARE_double(env_inject_full);
DECLARE_int32(flush_threshold_mb);
DECLARE_int32(flush_threshold_secs);
DECLARE_int32(fs_data_dirs_available_space_cache_seconds);
DECLARE_int32(fs_target_data_dirs_per_tablet);
DECLARE_int32(maintenance_manager_num_threads);
DECLARE_int32(maintenance_manager_polling_interval_ms);
DECLARE_int32(memory_pressure_percentage);
DECLARE_int32(metrics_retirement_age_ms);
DECLARE_int32(scanner_batch_size_rows);
DECLARE_int32(scanner_gc_check_interval_us);
DECLARE_int32(scanner_ttl_ms);
DECLARE_string(block_manager);
DECLARE_string(env_inject_eio_globs);
DECLARE_string(env_inject_full_globs);
// Declare these metrics prototypes for simpler unit testing of their behavior.
METRIC_DECLARE_counter(block_manager_total_bytes_read);
METRIC_DECLARE_counter(log_block_manager_holes_punched);
METRIC_DECLARE_counter(rows_inserted);
METRIC_DECLARE_counter(rows_updated);
METRIC_DECLARE_counter(rows_deleted);
METRIC_DECLARE_counter(scanners_expired);
METRIC_DECLARE_gauge_uint64(log_block_manager_blocks_under_management);
METRIC_DECLARE_gauge_uint64(log_block_manager_containers);
METRIC_DECLARE_gauge_size(active_scanners);
METRIC_DECLARE_gauge_size(tablet_active_scanners);
METRIC_DECLARE_gauge_size(num_rowsets_on_disk);
METRIC_DECLARE_histogram(flush_dms_duration);
namespace kudu {
namespace tablet {
class RowSet;
}
namespace tserver {
class TabletServerTest : public TabletServerTestBase {
public:
// Starts the tablet server, override to start it later.
virtual void SetUp() OVERRIDE {
NO_FATALS(TabletServerTestBase::SetUp());
NO_FATALS(StartTabletServer(/*num_data_dirs=*/1));
}
void DoOrderedScanTest(const Schema& projection, const string& expected_rows_as_string);
void ScanYourWritesTest(uint64_t propagated_timestamp, ScanResponsePB* resp);
};
TEST_F(TabletServerTest, TestPingServer) {
// Ping the server.
PingRequestPB req;
PingResponsePB resp;
RpcController controller;
ASSERT_OK(proxy_->Ping(req, &resp, &controller));
}
TEST_F(TabletServerTest, TestStatus) {
// Get the server's status.
server::GetStatusRequestPB req;
server::GetStatusResponsePB resp;
RpcController controller;
ASSERT_OK(generic_proxy_->GetStatus(req, &resp, &controller));
ASSERT_TRUE(resp.has_status());
ASSERT_TRUE(resp.status().has_node_instance());
ASSERT_EQ(mini_server_->uuid(), resp.status().node_instance().permanent_uuid());
// Regression test for KUDU-2148: try to get the status as the server is
// starting. To surface this more frequently, we restart the server a number
// of times.
CountDownLatch latch(1);
thread status_thread([&](){
server::GetStatusRequestPB req;
server::GetStatusResponsePB resp;
RpcController controller;
while (latch.count() > 0) {
controller.Reset();
resp.Clear();
Status s = generic_proxy_->GetStatus(req, &resp, &controller);
if (s.ok()) {
// These two fields are guaranteed even if the request yielded an error.
CHECK(resp.has_status());
CHECK(resp.status().has_node_instance());
if (resp.has_error()) {
// But this one isn't set if the request yielded an error.
CHECK(!resp.status().has_version_info());
}
}
}
});
SCOPED_CLEANUP({
latch.CountDown();
status_thread.join();
});
// Can't safely restart unless we allow the replica to be destroyed.
tablet_replica_.reset();
for (int i = 0; i < (AllowSlowTests() ? 100 : 10); i++) {
mini_server_->Shutdown();
ASSERT_OK(mini_server_->Restart());
}
}
TEST_F(TabletServerTest, TestServerClock) {
server::ServerClockRequestPB req;
server::ServerClockResponsePB resp;
RpcController controller;
ASSERT_OK(generic_proxy_->ServerClock(req, &resp, &controller));
ASSERT_GT(mini_server_->server()->clock()->Now().ToUint64(), resp.timestamp());
}
TEST_F(TabletServerTest, TestGetFlags) {
server::GenericServiceProxy proxy(
client_messenger_, mini_server_->bound_rpc_addr(),
mini_server_->bound_rpc_addr().host());
server::GetFlagsRequestPB req;
server::GetFlagsResponsePB resp;
// Check that a default request returns flags set to a non-default value and
// does not return flags set to a default value.
// Throughout, we make the reasonable assumption that the -fs_wal_dir flag
// will have a non-default value, and the -help and unsafe -logemaillevel
// flags will have default values.
{
RpcController controller;
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
}
// Check that specifying all flags returns even flags with default values.
{
RpcController controller;
req.set_all_flags(true);
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "logemaillevel";
}));
}
// Check that filtering on tags excludes flags with no matching tag.
{
RpcController controller;
req.add_tags("stable");
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "logemaillevel";
}));
}
// Check that we get flags with -flags.
{
RpcController controller;
req.Clear();
req.add_flags("log_dir");
req.add_flags("logemaillevel");
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "logemaillevel";
}));
}
// Check -flags will ignore -all_flags.
{
RpcController controller;
req.Clear();
req.set_all_flags(true);
req.add_flags("logemaillevel");
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
EXPECT_TRUE(std::any_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "logemaillevel";
}));
}
// Check -flag_tags filter to matching tags with -flags.
{
RpcController controller;
req.Clear();
req.add_flags("logemaillevel");
req.add_tags("stable");
ASSERT_OK(proxy.GetFlags(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "log_dir";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "help";
}));
EXPECT_TRUE(std::none_of(resp.flags().begin(), resp.flags().end(),
[](const server::GetFlagsResponsePB::Flag& flag) -> bool {
return flag.name() == "logemaillevel";
}));
}
}
TEST_F(TabletServerTest, TestSetFlags) {
server::GenericServiceProxy proxy(
client_messenger_, mini_server_->bound_rpc_addr(),
mini_server_->bound_rpc_addr().host());
server::SetFlagRequestPB req;
server::SetFlagResponsePB resp;
// Set an invalid flag.
{
RpcController controller;
req.set_flag("foo");
req.set_value("bar");
ASSERT_OK(proxy.SetFlag(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_EQ(server::SetFlagResponsePB::NO_SUCH_FLAG, resp.result());
EXPECT_TRUE(resp.msg().empty());
}
// Set a valid flag to a valid value.
{
int32_t old_val = FLAGS_metrics_retirement_age_ms;
RpcController controller;
req.set_flag("metrics_retirement_age_ms");
req.set_value("12345");
ASSERT_OK(proxy.SetFlag(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_EQ(server::SetFlagResponsePB::SUCCESS, resp.result());
EXPECT_EQ(resp.msg(), "metrics_retirement_age_ms set to 12345\n");
EXPECT_EQ(Substitute("$0", old_val), resp.old_value());
EXPECT_EQ(12345, FLAGS_metrics_retirement_age_ms);
}
// Set a valid flag to an invalid value.
{
RpcController controller;
req.set_flag("metrics_retirement_age_ms");
req.set_value("foo");
ASSERT_OK(proxy.SetFlag(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_EQ(server::SetFlagResponsePB::BAD_VALUE, resp.result());
EXPECT_EQ(resp.msg(), "Unable to set flag: bad value");
EXPECT_EQ(12345, FLAGS_metrics_retirement_age_ms);
}
// Try setting a flag which isn't runtime-modifiable
{
RpcController controller;
req.set_flag("tablet_bloom_target_fp_rate");
req.set_value("1.0");
ASSERT_OK(proxy.SetFlag(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_EQ(server::SetFlagResponsePB::NOT_SAFE, resp.result());
}
// Try again, but with the force flag.
{
RpcController controller;
req.set_flag("tablet_bloom_target_fp_rate");
req.set_value("1.0");
req.set_force(true);
ASSERT_OK(proxy.SetFlag(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
EXPECT_EQ(server::SetFlagResponsePB::SUCCESS, resp.result());
}
}
TEST_F(TabletServerTest, TestWebPages) {
EasyCurl c;
faststring buf;
string addr = mini_server_->bound_http_addr().ToString();
// Tablets page should list tablet.
ASSERT_OK(c.FetchURL(Substitute("http://$0/tablets", addr),
&buf));
ASSERT_STR_CONTAINS(buf.ToString(), kTabletId);
ASSERT_STR_CONTAINS(buf.ToString(), "RANGE (key) PARTITION UNBOUNDED");
// Tablet page should include the schema.
ASSERT_OK(c.FetchURL(Substitute("http://$0/tablet?id=$1", addr, kTabletId),
&buf));
ASSERT_STR_CONTAINS(buf.ToString(), "key");
ASSERT_STR_CONTAINS(buf.ToString(), "STRING NULLABLE");
// Test fetching metrics.
// Fetching metrics has the side effect of retiring metrics, but not in a single pass.
// So, we check a couple of times in a loop -- thus, if we had a bug where one of these
// metrics was accidentally un-referenced too early, we'd cause it to get retired.
// If the metrics survive several passes of fetching, then we are pretty sure they will
// stick around properly for the whole lifetime of the server.
FLAGS_metrics_retirement_age_ms = 0;
for (int i = 0; i < 3; i++) {
SCOPED_TRACE(i);
ASSERT_OK(c.FetchURL(strings::Substitute("http://$0/jsonmetricz", addr), &buf));
// Check that the tablet entry shows up.
ASSERT_STR_CONTAINS(buf.ToString(), "\"type\": \"tablet\"");
ASSERT_STR_CONTAINS(buf.ToString(), "\"id\": \"ffffffffffffffffffffffffffffffff\"");
ASSERT_STR_CONTAINS(buf.ToString(), "\"partition\": \"RANGE (key) PARTITION UNBOUNDED");
// Check entity attributes.
ASSERT_STR_CONTAINS(buf.ToString(), "\"table_name\": \"TestTable\"");
ASSERT_STR_CONTAINS(buf.ToString(), "\"table_id\": \"TestTable\"");
// Check for the existence of some particular metrics for which we've had early-retirement
// bugs in the past.
ASSERT_STR_CONTAINS(buf.ToString(), "hybrid_clock_timestamp");
ASSERT_STR_CONTAINS(buf.ToString(), "active_scanners");
ASSERT_STR_CONTAINS(buf.ToString(), "threads_started");
ASSERT_STR_CONTAINS(buf.ToString(), "code_cache_queries");
#ifdef TCMALLOC_ENABLED
ASSERT_STR_CONTAINS(buf.ToString(), "tcmalloc_max_total_thread_cache_bytes");
#endif
ASSERT_STR_CONTAINS(buf.ToString(), "glog_info_messages");
}
// Smoke-test the tracing infrastructure.
ASSERT_OK(c.FetchURL(
Substitute("http://$0/tracing/json/get_buffer_percent_full", addr, kTabletId),
&buf));
ASSERT_EQ(buf.ToString(), "0");
string enable_req_json = "{\"categoryFilter\":\"*\", \"useContinuousTracing\": \"true\","
" \"useSampling\": \"false\"}";
string req_b64;
strings::Base64Escape(enable_req_json, &req_b64);
for (bool compressed : {false, true}) {
ASSERT_OK(c.FetchURL(Substitute("http://$0/tracing/json/begin_recording?$1",
addr,
req_b64), &buf));
ASSERT_EQ(buf.ToString(), "");
ASSERT_OK(c.FetchURL(Substitute("http://$0/tracing/json/end_recording$1", addr,
compressed ? "_compressed" : ""),
&buf));
string json;
if (compressed) {
std::ostringstream ss;
ASSERT_OK(zlib::Uncompress(buf, &ss));
json = ss.str();
} else {
json = buf.ToString();
}
ASSERT_STR_CONTAINS(json, "__metadata");
}
ASSERT_OK(c.FetchURL(Substitute("http://$0/tracing/json/categories", addr),
&buf));
ASSERT_STR_CONTAINS(buf.ToString(), "\"log\"");
// Smoke test the pprof contention profiler handler.
ASSERT_OK(c.FetchURL(Substitute("http://$0/pprof/contention?seconds=1", addr),
&buf));
ASSERT_STR_CONTAINS(buf.ToString(), "discarded samples = 0");
#if defined(__linux__)
// The executable name appears as part of the dump of /proc/self/maps, which
// only exists on Linux.
ASSERT_STR_CONTAINS(buf.ToString(), "tablet_server-test");
#endif
}
// Ensure that when a replica is in a failed / shutdown state, it returns an
// error for ConsensusState() requests.
TEST_F(TabletServerTest, TestFailedTabletsRejectConsensusState) {
scoped_refptr<TabletReplica> replica;
TSTabletManager* tablet_manager = mini_server_->server()->tablet_manager();
ASSERT_TRUE(tablet_manager->LookupTablet(kTabletId, &replica));
replica->SetError(Status::IOError("This error will leave the replica FAILED state at shutdown"));
replica->Shutdown();
ASSERT_EQ(tablet::FAILED, replica->state());
auto consensus = replica->shared_consensus();
ASSERT_TRUE(consensus);
ConsensusStatePB cstate;
Status s = consensus->ConsensusState(&cstate);
ASSERT_TRUE(s.IsIllegalState()) << s.ToString();
ASSERT_STR_CONTAINS(s.ToString(), "Tablet replica is shutdown");
}
// Test that tablet replicas that get failed and deleted will eventually show
// up as failed tombstones on the web UI.
TEST_F(TabletServerTest, TestFailedTabletsOnWebUI) {
scoped_refptr<TabletReplica> replica;
TSTabletManager* tablet_manager = mini_server_->server()->tablet_manager();
ASSERT_TRUE(tablet_manager->LookupTablet(kTabletId, &replica));
replica->SetError(Status::IOError("This error will leave the replica FAILED state at shutdown"));
replica->Shutdown();
ASSERT_EQ(tablet::FAILED, replica->state());
// Now delete the replica and leave it tombstoned, e.g. as if the failed
// replica were deleted.
TabletServerErrorPB::Code error_code;
ASSERT_OK(tablet_manager->DeleteTablet(kTabletId,
tablet::TABLET_DATA_TOMBSTONED, boost::none, &error_code));
EasyCurl c;
faststring buf;
const string addr = mini_server_->bound_http_addr().ToString();
ASSERT_OK(c.FetchURL(Substitute("http://$0/tablets", addr), &buf));
// The webui should have a record of a FAILED and tombstoned tablet replica.
ASSERT_STR_CONTAINS(buf.ToString(), "FAILED (TABLET_DATA_TOMBSTONED)");
}
// Test that tombstoned tablets are displayed correctly in the web ui:
// - After restart, status message of "Tombstoned" instead of "Tablet initializing...".
// - No consensus configuration.
TEST_F(TabletServerTest, TestTombstonedTabletOnWebUI) {
TSTabletManager* tablet_manager = mini_server_->server()->tablet_manager();
TabletServerErrorPB::Code error_code;
ASSERT_OK(
tablet_manager->DeleteTablet(kTabletId,
tablet::TABLET_DATA_TOMBSTONED,
boost::none,
&error_code));
// Restart the server. We drop the tablet_replica_ reference since it becomes
// invalid when the server shuts down.
tablet_replica_.reset();
mini_server_->Shutdown();
ASSERT_OK(mini_server_->Restart());
ASSERT_OK(mini_server_->WaitStarted());
EasyCurl c;
faststring buf;
const string addr = mini_server_->bound_http_addr().ToString();
ASSERT_OK(c.FetchURL(Substitute("http://$0/tablets", addr), &buf));
// Check the page contains a tombstoned tablet, and its state is not
// "Tablet initializing...".
string s = buf.ToString();
ASSERT_STR_CONTAINS(s, "TABLET_DATA_TOMBSTONED");
ASSERT_STR_NOT_CONTAINS(s, "Tablet initializing...");
// Since the consensus config shouldn't be displayed, the page should not
// contain the server's RPC address.
ASSERT_STR_NOT_CONTAINS(s, mini_server_->bound_rpc_addr().ToString());
}
class TabletServerDiskSpaceTest : public TabletServerTestBase,
public testing::WithParamInterface<string> {
public:
void SetUp() override {
FLAGS_block_manager = GetParam();
NO_FATALS(TabletServerTestBase::SetUp());
NO_FATALS(StartTabletServer(/*num_data_dirs=*/kNumDirs));
}
protected:
const int kNumDirs = FLAGS_fs_target_data_dirs_per_tablet + 1;
};
// Test that when there isn't enough space in a tablet's data directory group
// and there are additional directories available, directories are added to the
// group, and the new groups are persisted to disk.
TEST_P(TabletServerDiskSpaceTest, TestFullGroupAddsDir) {
DataDirManager* dd_manager = mini_server_->server()->fs_manager()->dd_manager();
vector<string> dir_group;
ASSERT_OK(dd_manager->FindDataDirsByTabletId(kTabletId, &dir_group));
ASSERT_EQ(kNumDirs - 1, dir_group.size());
FLAGS_fs_data_dirs_available_space_cache_seconds = 0;
FLAGS_env_inject_full_globs = JoinStrings(dir_group, ",");
FLAGS_env_inject_full = 1.0;
// Insert some data and flush. This should lead to the creation of a block,
// and the addition of a new directory in the dir group.
unordered_set<string> old_group(dir_group.begin(), dir_group.end());
NO_FATALS(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
ASSERT_OK(dd_manager->FindDataDirsByTabletId(kTabletId, &dir_group));
ASSERT_EQ(kNumDirs, dir_group.size());
// Grab the newly added directory and check that failing it means the tablet
// is in a failed directory.
string new_dir;
for (const auto& dir : dir_group) {
if (!ContainsKey(old_group, dir)) {
new_dir = dir;
break;
}
}
ASSERT_FALSE(new_dir.empty());
string new_uuid;
ASSERT_TRUE(dd_manager->FindUuidByRoot(DirName(new_dir), &new_uuid));
dd_manager->MarkDirFailedByUuid(new_uuid);
ASSERT_TRUE(dd_manager->IsTabletInFailedDir(kTabletId));
// The group should be the updated even after restarting the tablet server.
NO_FATALS(ShutdownAndRebuildTablet(kNumDirs));
dd_manager = mini_server_->server()->fs_manager()->dd_manager();
ASSERT_OK(dd_manager->FindDataDirsByTabletId(kTabletId, &dir_group));
ASSERT_EQ(kNumDirs, dir_group.size());
ASSERT_TRUE(dd_manager->FindUuidByRoot(DirName(new_dir), &new_uuid));
dd_manager->MarkDirFailedByUuid(new_uuid);
ASSERT_TRUE(dd_manager->IsTabletInFailedDir(kTabletId));
}
INSTANTIATE_TEST_CASE_P(BlockManager, TabletServerDiskSpaceTest,
::testing::ValuesIn(BlockManager::block_manager_types()));
enum class ErrorType {
DISK_FAILURE,
CFILE_CORRUPTION
};
class TabletServerDiskErrorTest : public TabletServerTestBase,
public testing::WithParamInterface<ErrorType> {
public:
virtual void SetUp() override {
const int kNumDirs = 5;
NO_FATALS(TabletServerTestBase::SetUp());
// Ensure the server will flush frequently.
FLAGS_enable_maintenance_manager = true;
FLAGS_maintenance_manager_num_threads = kNumDirs;
FLAGS_flush_threshold_mb = 1;
FLAGS_flush_threshold_secs = 1;
// Create a brand new tablet server with multiple disks, ensuring it can
// survive at least one disk failure.
NO_FATALS(StartTabletServer(/*num_data_dirs=*/ kNumDirs));
}
};
INSTANTIATE_TEST_CASE_P(ErrorType, TabletServerDiskErrorTest, ::testing::Values(
ErrorType::DISK_FAILURE, ErrorType::CFILE_CORRUPTION));
// Test that applies random write operations to a tablet with a high
// maintenance manager load and a non-zero error injection rate.
TEST_P(TabletServerDiskErrorTest, TestRandomOpSequence) {
if (!AllowSlowTests()) {
LOG(INFO) << "Not running slow test. To run, use KUDU_ALLOW_SLOW_TESTS=1";
return;
}
typedef vector<RowOperationsPB::Type> OpTypeList;
const OpTypeList kOpsIfKeyNotPresent = { RowOperationsPB::INSERT, RowOperationsPB::UPSERT };
const OpTypeList kOpsIfKeyPresent = { RowOperationsPB::UPSERT, RowOperationsPB::UPDATE,
RowOperationsPB::DELETE };
const int kMaxKey = 100000;
if (GetParam() == ErrorType::DISK_FAILURE) {
// Set these way up-front so we can change a single value to actually start
// injecting errors. Inject errors into all data dirs but one.
FLAGS_crash_on_eio = false;
const vector<string> failed_dirs = { mini_server_->options()->fs_opts.data_roots.begin() + 1,
mini_server_->options()->fs_opts.data_roots.end() };
FLAGS_env_inject_eio_globs = JoinStrings(JoinPathSegmentsV(failed_dirs, "**"), ",");
}
set<int> keys;
const auto GetRandomString = [] {
return StringPrintf("%d", rand() % kMaxKey);
};
// Perform a random op (insert, update, upsert, or delete).
const auto PerformOp = [&] {
// Set up the request.
WriteRequestPB req;
req.set_tablet_id(kTabletId);
RETURN_NOT_OK(SchemaToPB(schema_, req.mutable_schema()));
// Set up the other state.
WriteResponsePB resp;
RpcController controller;
RowOperationsPB::Type op_type;
int key = rand() % kMaxKey;
auto key_iter = keys.find(key);
if (key_iter == keys.end()) {
// If the key already exists, insert or upsert.
op_type = kOpsIfKeyNotPresent[rand() % kOpsIfKeyNotPresent.size()];
} else {
// ... else we can do anything but insert.
op_type = kOpsIfKeyPresent[rand() % kOpsIfKeyPresent.size()];
}
// Add the op to the request.
if (op_type != RowOperationsPB::DELETE) {
AddTestRowToPB(op_type, schema_, key, key, GetRandomString(),
req.mutable_row_operations());
keys.insert(key);
} else {
AddTestKeyToPB(RowOperationsPB::DELETE, schema_, key, req.mutable_row_operations());
keys.erase(key_iter);
}
// Finally, write to the server and log the response.
RETURN_NOT_OK_PREPEND(proxy_->Write(req, &resp, &controller), "Failed to write");
LOG(INFO) << "Tablet server responded with: " << SecureDebugString(resp);
return resp.has_error() ? StatusFromPB(resp.error().status()) : Status::OK();
};
// Perform some arbitrarily large number of ops, with some pauses to encourage flushes.
for (int i = 0; i < 500; i++) {
if (i % 10) {
SleepFor(MonoDelta::FromMilliseconds(100));
}
ASSERT_OK(PerformOp());
}
// At this point, a bunch of operations have gone through successfully. Start
// injecting errors.
switch (GetParam()) {
case ErrorType::DISK_FAILURE:
FLAGS_env_inject_eio = 0.01;
break;
case ErrorType::CFILE_CORRUPTION:
FLAGS_cfile_inject_corruption = 0.01;
break;
}
// The tablet will eventually be failed and will not be able to accept
// updates. Keep on inserting until that happens.
ASSERT_EVENTUALLY([&] {
Status s;
for (int i = 0; i < 150 && s.ok(); i++) {
s = PerformOp();
}
ASSERT_FALSE(s.ok());
});
LOG(INFO) << "Failure was caught by an op!";
ASSERT_EVENTUALLY([&] {
ASSERT_EQ(tablet::FAILED, tablet_replica_->state());
});
LOG(INFO) << "Tablet was successfully failed";
}
// Regression test for KUDU-2635.
TEST_F(TabletServerTest, TestEIODuringDelete) {
// Delete some blocks, but don't always delete them persistently so we're
// left with some orphaned blocks in the orphaned blocks list. We'll do this
// by injecting some EIOs.
NO_FATALS(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
NO_FATALS(UpdateTestRowRemote(1, 2));
ASSERT_OK(tablet_replica_->tablet()->FlushAllDMSForTests());
FsManager* fs_manager = mini_server_->server()->fs_manager();
FLAGS_env_inject_eio_globs = JoinPathSegments(fs_manager->GetDataRootDirs()[0], "**");
FLAGS_env_inject_eio = 0.5;
ignore_result(tablet_replica_->tablet()->MajorCompactAllDeltaStoresForTests());
// Delete the tablet while still injecting failures. Even if we aren't
// successful in deleting our orphaned blocks list, we shouldn't crash.
DeleteTabletRequestPB req;
DeleteTabletResponsePB resp;
req.set_dest_uuid(fs_manager->uuid());
req.set_tablet_id(kTabletId);
req.set_delete_type(tablet::TABLET_DATA_DELETED);
RpcController rpc;
ASSERT_OK(admin_proxy_->DeleteTablet(req, &resp, &rpc));
}
// Test that adding a directories enables tablet placement in the new
// directories, and that removing directories fails tablets that are striped
// across the removed directories.
TEST_F(TabletServerTest, TestAddRemoveDirectory) {
// Start with multiple data dirs so the dirs are suffixed with numbers, and
// so when we remove a data dirs, we'll be using the same set of dirs.
NO_FATALS(ShutdownAndRebuildTablet(/*num_data_dirs*/2));
const char* kFooTablet1 = "fffffffffffffffffffffffffffffff1";
ASSERT_OK(mini_server_->AddTestTablet("footable", kFooTablet1, schema_));
ASSERT_OK(WaitForTabletRunning(kFooTablet1));
// Shut down and restart with a new directory. This is allowed, and the
// tablet server will be able to use the new directory if we create a new
// tablet.
NO_FATALS(ShutdownAndRebuildTablet(/*num_data_dirs*/3));
const char* kFooTablet2 = "fffffffffffffffffffffffffffffff2";
ASSERT_OK(mini_server_->AddTestTablet("footable", kFooTablet2, schema_));
ASSERT_OK(WaitForTabletRunning(kFooTablet2));
// Now open up again with a our original two directories. The second tablet
// should fail because it should have been striped across the third
// directory. The first tablet should be unaffected.
NO_FATALS(ShutdownAndRebuildTablet(/*num_data_dirs*/2));
ASSERT_EVENTUALLY([&] {
scoped_refptr<TabletReplica> replica1;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kFooTablet1, &replica1));
ASSERT_EQ(TabletStatePB::RUNNING, replica1->state());
scoped_refptr<TabletReplica> replica2;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kFooTablet2, &replica2));
ASSERT_EQ(TabletStatePB::FAILED, replica2->state());
});
}
class TabletServerMaintenanceMemoryPressureTest : public TabletServerTestBase {
public:
void SetUp() override {
NO_FATALS(TabletServerTestBase::SetUp());
FLAGS_enable_maintenance_manager = true;
FLAGS_flush_threshold_secs = 1;
FLAGS_memory_pressure_percentage = 0;
// For the sake of easier setup, slow down our maintenance polling interval.
FLAGS_maintenance_manager_polling_interval_ms = 1000;
// While setting up rowsets, disable compactions and flushing. Do this
// before doing anything so we can have tighter control over the flushing
// of our rowsets.
FLAGS_enable_rowset_compaction = false;
FLAGS_enable_flush_deltamemstores = false;
FLAGS_enable_flush_memrowset = false;
NO_FATALS(StartTabletServer(/*num_data_dirs=*/1));
}
};
// Regression test for KUDU-3002. Previously, when under memory pressure, we
// might starve older (usually small) DMS flushes in favor of (usually larger)
// MRS flushes.
TEST_F(TabletServerMaintenanceMemoryPressureTest, TestDontStarveDMSWhileUnderMemoryPressure) {
// First, set up a rowset with a delta.
NO_FATALS(InsertTestRowsDirect(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
NO_FATALS(UpdateTestRowRemote(1, 2));
// Roll onto a new log segment so our DMS anchors some WAL bytes.
ASSERT_OK(tablet_replica_->log()->WaitUntilAllFlushed());
ASSERT_OK(tablet_replica_->log()->AllocateSegmentAndRollOverForTests());
// Now start inserting to the tablet so every time we pick a maintenance op,
// we'll have a sizeable MRS.
std::atomic<bool> keep_inserting(true);
thread insert_thread([&] {
int cur_row = 2;
while (keep_inserting) {
// Ignore TSAN warnings that complain about a race in gtest between this
// check for fatal failures and the check for fatal failures in the below
// AssertEventually.
debug::ScopedTSANIgnoreReadsAndWrites ignore_tsan;
NO_FATALS(InsertTestRowsDirect(cur_row++, 1));
}
});
SCOPED_CLEANUP({
keep_inserting = false;
insert_thread.join();
});
// Wait a bit for the MRS to build up and then enable flushing.
SleepFor(MonoDelta::FromSeconds(1));
FLAGS_enable_flush_memrowset = true;
FLAGS_enable_flush_deltamemstores = true;
// Despite always having a large MRS, we should eventually flush the DMS,
// since it anchors WALs.
scoped_refptr<Histogram> dms_flushes =
METRIC_flush_dms_duration.Instantiate(tablet_replica_->tablet()->GetMetricEntity());
ASSERT_EVENTUALLY([&] {
ASSERT_EQ(1, dms_flushes->histogram()->TotalCount());
});
}
// Regression test for KUDU-2929. Previously, when under memory pressure, we
// would never compact, even if there were something else to do. We'll simulate
// this by flushing some overlapping rowsets and then making sure we compact.
TEST_F(TabletServerMaintenanceMemoryPressureTest, TestCompactWhileUnderMemoryPressure) {
// Insert sets of overlapping rows.
// Since we're under memory pressure, we'll flush as soon as we're able.
NO_FATALS(InsertTestRowsDirect(1, 1));
NO_FATALS(InsertTestRowsDirect(3, 1));
FLAGS_enable_flush_memrowset = true;
ASSERT_EVENTUALLY([&] {
ASSERT_EQ(1, tablet_replica_->tablet()->num_rowsets());
});
NO_FATALS(InsertTestRowsDirect(2, 1));
ASSERT_EVENTUALLY([&] {
ASSERT_EQ(2, tablet_replica_->tablet()->num_rowsets());
});
// Even though we're under memory pressure, we should see compactions because
// there's nothing else to do.
FLAGS_enable_rowset_compaction = true;
ASSERT_EVENTUALLY([&] {
ASSERT_EQ(1, tablet_replica_->tablet()->num_rowsets());
});
}
TEST_F(TabletServerTest, TestInsert) {
WriteRequestPB req;
req.set_tablet_id(kTabletId);
WriteResponsePB resp;
RpcController controller;
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
scoped_refptr<Counter> rows_inserted =
METRIC_rows_inserted.Instantiate(tablet->tablet()->GetMetricEntity());
ASSERT_EQ(0, rows_inserted->value());
tablet.reset();
// Send a bad insert which has an empty schema. This should result
// in an error.
{
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 5678, "hello world via RPC",
req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::MISMATCHED_SCHEMA, resp.error().code());
Status s = StatusFromPB(resp.error().status());
EXPECT_TRUE(s.IsInvalidArgument());
ASSERT_STR_CONTAINS(s.ToString(),
"Client missing required column: key INT32 NOT NULL");
req.clear_row_operations();
}
// Send an empty request with the correct schema.
// This should succeed and do nothing.
{
controller.Reset();
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
req.clear_row_operations();
}
// Send an actual row insert.
{
controller.Reset();
RowOperationsPB* data = req.mutable_row_operations();
data->Clear();
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 5678,
"hello world via RPC", data);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
req.clear_row_operations();
ASSERT_EQ(1, rows_inserted->value());
}
// Send a batch with multiple rows, one of which is a duplicate of
// the above insert, and one of which has a too-large value.
// This should generate two errors into per_row_errors.
{
const string kTooLargeValue(100 * 1024, 'x');
controller.Reset();
RowOperationsPB* data = req.mutable_row_operations();
data->Clear();
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1, 1, "ceci n'est pas une dupe", data);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 2, 1, "also not a dupe key", data);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 1, "I am a duplicate key", data);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 3, 1, kTooLargeValue, data);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error()) << SecureShortDebugString(resp);
ASSERT_EQ(3, rows_inserted->value()); // This counter only counts successful inserts.
ASSERT_EQ(2, resp.per_row_errors().size());
// Check the duplicate key error.
ASSERT_EQ(2, resp.per_row_errors().Get(0).row_index());
Status s = StatusFromPB(resp.per_row_errors().Get(0).error());
ASSERT_STR_CONTAINS(s.ToString(), "Already present");
// Check the value-too-large error.
ASSERT_EQ(3, resp.per_row_errors().Get(1).row_index());
s = StatusFromPB(resp.per_row_errors().Get(1).error());
ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument");
}
// get the clock's current timestamp
Timestamp now_before = mini_server_->server()->clock()->Now();
rows_inserted = nullptr;
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(1, 1), KeyValue(2, 1), KeyValue(1234, 5678) });
// get the clock's timestamp after replay
Timestamp now_after = mini_server_->server()->clock()->Now();
// make sure 'now_after' is greater than or equal to 'now_before'
ASSERT_GE(now_after.value(), now_before.value());
}
TEST_F(TabletServerTest, TestExternalConsistencyModes_ClientPropagated) {
WriteRequestPB req;
req.set_tablet_id(kTabletId);
WriteResponsePB resp;
RpcController controller;
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(
mini_server_->server()->tablet_manager()->LookupTablet(kTabletId,
&tablet));
scoped_refptr<Counter> rows_inserted =
METRIC_rows_inserted.Instantiate(tablet->tablet()->GetMetricEntity());
ASSERT_EQ(0, rows_inserted->value());
// get the current time
Timestamp current = mini_server_->server()->clock()->Now();
// advance current to some time in the future. we do 5 secs to make
// sure this timestamp will still be in the future when it reaches the
// server.
current = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(current) + 5000000);
// Send an actual row insert.
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 5678, "hello world via RPC",
req.mutable_row_operations());
// set the external consistency mode and the timestamp
req.set_external_consistency_mode(CLIENT_PROPAGATED);
req.set_propagated_timestamp(current.ToUint64());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
req.clear_row_operations();
ASSERT_EQ(1, rows_inserted->value());
// make sure the server returned a write timestamp where only
// the logical value was increased since he should have updated
// its clock with the client's value.
Timestamp write_timestamp(resp.timestamp());
ASSERT_EQ(HybridClock::GetPhysicalValueMicros(current),
HybridClock::GetPhysicalValueMicros(write_timestamp));
ASSERT_EQ(HybridClock::GetLogicalValue(current) + 1,
HybridClock::GetLogicalValue(write_timestamp));
}
TEST_F(TabletServerTest, TestExternalConsistencyModes_CommitWait) {
WriteRequestPB req;
req.set_tablet_id(kTabletId);
WriteResponsePB resp;
RpcController controller;
HybridClock* hclock = down_cast<HybridClock*, Clock>(mini_server_->server()->clock());
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(
mini_server_->server()->tablet_manager()->LookupTablet(kTabletId,
&tablet));
scoped_refptr<Counter> rows_inserted =
METRIC_rows_inserted.Instantiate(
tablet->tablet()->GetMetricEntity());
ASSERT_EQ(0, rows_inserted->value());
// get current time, with and without error
Timestamp now_before;
uint64_t error_before;
hclock->NowWithError(&now_before, &error_before);
uint64_t now_before_usec = HybridClock::GetPhysicalValueMicros(now_before);
LOG(INFO) << "Submitting write with commit wait at: " << now_before_usec << " us +- "
<< error_before << " us";
// Send an actual row insert.
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 5678, "hello world via RPC",
req.mutable_row_operations());
// set the external consistency mode to COMMIT_WAIT
req.set_external_consistency_mode(COMMIT_WAIT);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
req.clear_row_operations();
ASSERT_EQ(1, rows_inserted->value());
// Two things must have happened.
// 1 - The write timestamp must be greater than 'now_before'
// 2 - The write must have taken at least 'error_before' to complete (two
// times more in average).
Timestamp now_after;
uint64_t error_after;
hclock->NowWithError(&now_after, &error_after);
Timestamp write_timestamp(resp.timestamp());
uint64_t write_took = HybridClock::GetPhysicalValueMicros(now_after) -
HybridClock::GetPhysicalValueMicros(now_before);
LOG(INFO) << "Write applied at: " << HybridClock::GetPhysicalValueMicros(write_timestamp)
<< " us, current time: " << HybridClock::GetPhysicalValueMicros(now_after)
<< " us, write took: " << write_took << " us";
ASSERT_GT(write_timestamp.value(), now_before.value());
// see HybridClockTest.TestWaitUntilAfter_TestCase2
if (error_after >= error_before) {
ASSERT_GE(write_took, 2 * error_before);
} else {
ASSERT_GE(write_took, error_before);
}
}
TEST_F(TabletServerTest, TestInsertAndMutate) {
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
scoped_refptr<Counter> rows_inserted =
METRIC_rows_inserted.Instantiate(tablet->tablet()->GetMetricEntity());
scoped_refptr<Counter> rows_updated =
METRIC_rows_updated.Instantiate(tablet->tablet()->GetMetricEntity());
scoped_refptr<Counter> rows_deleted =
METRIC_rows_deleted.Instantiate(tablet->tablet()->GetMetricEntity());
ASSERT_EQ(0, rows_inserted->value());
ASSERT_EQ(0, rows_updated->value());
ASSERT_EQ(0, rows_deleted->value());
tablet.reset();
RpcController controller;
{
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
RowOperationsPB* data = req.mutable_row_operations();
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1, 1, "original1", data);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 2, 2, "original2", data);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 3, 3, "original3", data);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error()) << SecureShortDebugString(resp);
ASSERT_EQ(0, resp.per_row_errors().size());
ASSERT_EQ(3, rows_inserted->value());
ASSERT_EQ(0, rows_updated->value());
controller.Reset();
}
// Try and mutate the rows inserted above
{
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 1, 2, "mutation1",
req.mutable_row_operations());
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 2, 3, "mutation2",
req.mutable_row_operations());
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 3, 4, "mutation3",
req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error()) << SecureShortDebugString(resp);
ASSERT_EQ(0, resp.per_row_errors().size());
ASSERT_EQ(3, rows_inserted->value());
ASSERT_EQ(3, rows_updated->value());
controller.Reset();
}
// Try and mutate a non existent row key (should get an error)
{
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 1234, 2, "mutated",
req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error()) << SecureShortDebugString(resp);
ASSERT_EQ(1, resp.per_row_errors().size());
ASSERT_EQ(3, rows_updated->value());
controller.Reset();
}
// Try and delete 1 row
{
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestKeyToPB(RowOperationsPB::DELETE, schema_, 1, req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error())<< SecureShortDebugString(resp);
ASSERT_EQ(0, resp.per_row_errors().size());
ASSERT_EQ(3, rows_updated->value());
ASSERT_EQ(1, rows_deleted->value());
controller.Reset();
}
// Now try and mutate a row we just deleted, we should get an error
{
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 1, 2, "mutated1",
req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error())<< SecureShortDebugString(resp);
ASSERT_EQ(1, resp.per_row_errors().size());
controller.Reset();
}
ASSERT_EQ(3, rows_inserted->value());
ASSERT_EQ(3, rows_updated->value());
// At this point, we have two rows left (row key 2 and 3).
VerifyRows(schema_, { KeyValue(2, 3), KeyValue(3, 4) });
// Do a mixed operation (some insert, update, and delete, some of which fail)
{
const string kTooLargeValue(100 * 1024, 'x');
WriteRequestPB req;
WriteResponsePB resp;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
RowOperationsPB* ops = req.mutable_row_operations();
// op 0: Mutate row 1, which doesn't exist. This should fail.
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 1, 3, "mutate_should_fail", ops);
// op 1: Insert a new row 4 (succeeds)
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 4, 4, "new row 4", ops);
// op 2: Delete a non-existent row 5 (should fail)
AddTestKeyToPB(RowOperationsPB::DELETE, schema_, 5, ops);
// op 3: Insert a new row 6 (succeeds)
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 6, 6, "new row 6", ops);
// op 4: update a row with a too-large value (fail)
AddTestRowToPB(RowOperationsPB::UPDATE, schema_, 4, 6, kTooLargeValue, ops);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error())<< SecureShortDebugString(resp);
ASSERT_EQ(3, resp.per_row_errors().size());
EXPECT_EQ("row_index: 0 error { code: NOT_FOUND message: \"key not found\" }",
SecureShortDebugString(resp.per_row_errors(0)));
EXPECT_EQ("row_index: 2 error { code: NOT_FOUND message: \"key not found\" }",
SecureShortDebugString(resp.per_row_errors(1)));
EXPECT_EQ("row_index: 4 error { code: INVALID_ARGUMENT message: "
"\"value too large for column \\'string_val\\' (102400 bytes, "
"maximum is 65536 bytes)\" }",
SecureShortDebugString(resp.per_row_errors(2)));
controller.Reset();
}
// get the clock's current timestamp
Timestamp now_before = mini_server_->server()->clock()->Now();
rows_inserted = nullptr;
rows_updated = nullptr;
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(2, 3), KeyValue(3, 4), KeyValue(4, 4), KeyValue(6, 6) });
// get the clock's timestamp after replay
Timestamp now_after = mini_server_->server()->clock()->Now();
// make sure 'now_after' is greater that or equal to 'now_before'
ASSERT_GE(now_after.value(), now_before.value());
}
// Try sending write requests that do not contain write operations. Make sure
// we get an error that makes sense.
TEST_F(TabletServerTest, TestInvalidWriteRequest_WrongOpType) {
const vector<RowOperationsPB::Type> wrong_op_types = {
RowOperationsPB::SPLIT_ROW,
RowOperationsPB::RANGE_LOWER_BOUND,
RowOperationsPB::RANGE_UPPER_BOUND,
RowOperationsPB::EXCLUSIVE_RANGE_LOWER_BOUND,
RowOperationsPB::INCLUSIVE_RANGE_UPPER_BOUND,
};
const auto send_bad_write = [&] (RowOperationsPB::Type op_type) {
WriteRequestPB req;
req.set_tablet_id(kTabletId);
WriteResponsePB resp;
RpcController controller;
CHECK_OK(SchemaToPB(schema_, req.mutable_schema()));
RowOperationsPB* data = req.mutable_row_operations();
AddTestRowToPB(op_type, schema_, 1234, 5678, "foo", data);
SCOPED_TRACE(SecureDebugString(req));
CHECK_OK(proxy_->Write(req, &resp, &controller));
return resp;
};
// Send a bunch of op types that are inappropriate for write requests.
for (const auto& op_type : wrong_op_types) {
WriteResponsePB resp = send_bad_write(op_type);
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::MISMATCHED_SCHEMA, resp.error().code());
ASSERT_EQ(AppStatusPB::INVALID_ARGUMENT, resp.error().status().code());
ASSERT_STR_CONTAINS(resp.error().status().message(),
"Invalid write operation type");
}
{
// Do the same for UNKNOWN, which is an unexpected operation type in all
// cases, and thus results in a different error message.
WriteResponsePB resp = send_bad_write(RowOperationsPB::UNKNOWN);
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::MISMATCHED_SCHEMA, resp.error().code());
ASSERT_EQ(AppStatusPB::NOT_SUPPORTED, resp.error().status().code());
ASSERT_STR_CONTAINS(resp.error().status().message(),
"Unknown row operation type");
}
}
// Test that passing a schema with fields not present in the tablet schema
// throws an exception.
TEST_F(TabletServerTest, TestInvalidWriteRequest_BadSchema) {
SchemaBuilder schema_builder(schema_);
ASSERT_OK(schema_builder.AddColumn("col_doesnt_exist", INT32));
Schema bad_schema_with_ids = schema_builder.Build();
Schema bad_schema = schema_builder.BuildWithoutIds();
// Send a row insert with an extra column
{
WriteRequestPB req;
WriteResponsePB resp;
RpcController controller;
req.set_tablet_id(kTabletId);
RowOperationsPB* data = req.mutable_row_operations();
ASSERT_OK(SchemaToPB(bad_schema, req.mutable_schema()));
KuduPartialRow row(&bad_schema);
CHECK_OK(row.SetInt32("key", 1234));
CHECK_OK(row.SetInt32("int_val", 5678));
CHECK_OK(row.SetStringCopy("string_val", "hello world via RPC"));
CHECK_OK(row.SetInt32("col_doesnt_exist", 91011));
RowOperationsPBEncoder enc(data);
enc.Add(RowOperationsPB::INSERT, row);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::MISMATCHED_SCHEMA, resp.error().code());
ASSERT_STR_CONTAINS(resp.error().status().message(),
"Client provided column col_doesnt_exist INT32 NOT NULL"
" not present in tablet");
}
// Send a row mutation with an extra column and IDs
{
WriteRequestPB req;
WriteResponsePB resp;
RpcController controller;
req.set_tablet_id(kTabletId);
ASSERT_OK(SchemaToPB(bad_schema_with_ids, req.mutable_schema()));
AddTestKeyToPB(RowOperationsPB::UPDATE, bad_schema_with_ids, 1,
req.mutable_row_operations());
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::INVALID_SCHEMA, resp.error().code());
ASSERT_STR_CONTAINS(resp.error().status().message(),
"User requests should not have Column IDs");
}
}
// Executes mutations each time a Tablet goes through a compaction/flush
// lifecycle hook. This allows to create mutations of all possible types
// deterministically. The purpose is to make sure such mutations are replayed
// correctly on tablet bootstrap.
class MyCommonHooks : public Tablet::FlushCompactCommonHooks,
public Tablet::FlushFaultHooks,
public Tablet::CompactionFaultHooks {
public:
explicit MyCommonHooks(TabletServerTest* test)
: test_(test),
iteration_(0) {}
Status DoHook(int32_t key, int32_t new_int_val) {
test_->UpdateTestRowRemote(key, new_int_val);
return Status::OK();
}
// This should go in pre-flush and get flushed
virtual Status PostSwapNewMemRowSet() OVERRIDE {
return DoHook(1, 10 + iteration_);
}
// This should go in after the flush, but before
// the duplicating row set, i.e., this should appear as
// a missed delta.
virtual Status PostTakeMvccSnapshot() OVERRIDE {
return DoHook(2, 20 + iteration_);
}
// This too should appear as a missed delta.
virtual Status PostWriteSnapshot() OVERRIDE {
return DoHook(3, 30 + iteration_);
}
// This should appear as a duplicated mutation
virtual Status PostSwapInDuplicatingRowSet() OVERRIDE {
return DoHook(4, 40 + iteration_);
}
// This too should appear as a duplicated mutation
virtual Status PostReupdateMissedDeltas() OVERRIDE {
return DoHook(5, 50 + iteration_);
}
// This should go into the new delta.
virtual Status PostSwapNewRowSet() OVERRIDE {
return DoHook(6, 60 + iteration_);
}
// This should go in pre-flush (only on compactions)
virtual Status PostSelectIterators() OVERRIDE {
return DoHook(7, 70 + iteration_);
}
void increment_iteration() {
iteration_++;
}
protected:
TabletServerTest* test_;
int iteration_;
};
// Tests performing mutations that are going to the initial MRS
// or to a DMS, when the MRS is flushed. This also tests that the
// log produced on recovery allows to re-recover the original state.
TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushing) {
InsertTestRowsRemote(1, 7);
shared_ptr<MyCommonHooks> hooks(new MyCommonHooks(this));
tablet_replica_->tablet()->SetFlushHooksForTests(hooks);
tablet_replica_->tablet()->SetCompactionHooksForTests(hooks);
tablet_replica_->tablet()->SetFlushCompactCommonHooksForTests(hooks);
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Shutdown the tserver and try and rebuild the tablet from the log
// produced on recovery (recovery flushed no state, but produced a new
// log).
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(1, 10),
KeyValue(2, 20),
KeyValue(3, 30),
KeyValue(4, 40),
KeyValue(5, 50),
KeyValue(6, 60),
// the last hook only fires on compaction
// so this isn't mutated
KeyValue(7, 7) });
// Shutdown and rebuild again to test that the log generated during
// the previous recovery allows to perform recovery again.
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(1, 10),
KeyValue(2, 20),
KeyValue(3, 30),
KeyValue(4, 40),
KeyValue(5, 50),
KeyValue(6, 60),
KeyValue(7, 7) });
}
// Tests performing mutations that are going to a DMS or to the following
// DMS, when the initial one is flushed.
TEST_F(TabletServerTest, TestRecoveryWithMutationsWhileFlushingAndCompacting) {
InsertTestRowsRemote(1, 7);
shared_ptr<MyCommonHooks> hooks(new MyCommonHooks(this));
tablet_replica_->tablet()->SetFlushHooksForTests(hooks);
tablet_replica_->tablet()->SetCompactionHooksForTests(hooks);
tablet_replica_->tablet()->SetFlushCompactCommonHooksForTests(hooks);
// flush the first time
ASSERT_OK(tablet_replica_->tablet()->Flush());
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(1, 10),
KeyValue(2, 20),
KeyValue(3, 30),
KeyValue(4, 40),
KeyValue(5, 50),
KeyValue(6, 60),
KeyValue(7, 7) });
hooks->increment_iteration();
// set the hooks on the new tablet
tablet_replica_->tablet()->SetFlushHooksForTests(hooks);
tablet_replica_->tablet()->SetCompactionHooksForTests(hooks);
tablet_replica_->tablet()->SetFlushCompactCommonHooksForTests(hooks);
// insert an additional row so that we can flush
InsertTestRowsRemote(8, 1);
// flush an additional MRS so that we have two DiskRowSets and then compact
// them making sure that mutations executed mid compaction are replayed as
// expected
ASSERT_OK(tablet_replica_->tablet()->Flush());
VerifyRows(schema_, { KeyValue(1, 11),
KeyValue(2, 21),
KeyValue(3, 31),
KeyValue(4, 41),
KeyValue(5, 51),
KeyValue(6, 61),
KeyValue(7, 7),
KeyValue(8, 8) });
hooks->increment_iteration();
ASSERT_OK(tablet_replica_->tablet()->Compact(Tablet::FORCE_COMPACT_ALL));
// get the clock's current timestamp
Timestamp now_before = mini_server_->server()->clock()->Now();
// Shutdown the tserver and try and rebuild the tablet from the log
// produced on recovery (recovery flushed no state, but produced a new
// log).
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(schema_, { KeyValue(1, 11),
KeyValue(2, 22),
KeyValue(3, 32),
KeyValue(4, 42),
KeyValue(5, 52),
KeyValue(6, 62),
KeyValue(7, 72),
KeyValue(8, 8) });
// get the clock's timestamp after replay
Timestamp now_after = mini_server_->server()->clock()->Now();
// make sure 'now_after' is greater than or equal to 'now_before'
ASSERT_GE(now_after.value(), now_before.value());
}
#define ANFF NO_FATALS
// Regression test for KUDU-176. Ensures that after a major delta compaction,
// restarting properly recovers the tablet.
TEST_F(TabletServerTest, TestKUDU_176_RecoveryAfterMajorDeltaCompaction) {
// Flush a DRS with 1 rows.
NO_FATALS(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
ANFF(VerifyRows(schema_, { KeyValue(1, 1) }));
// Update it, flush deltas.
ANFF(UpdateTestRowRemote(1, 2));
ASSERT_OK(tablet_replica_->tablet()->FlushBiggestDMS());
ANFF(VerifyRows(schema_, { KeyValue(1, 2) }));
// Major compact deltas.
{
vector<shared_ptr<tablet::RowSet> > rsets;
tablet_replica_->tablet()->GetRowSetsForTests(&rsets);
vector<ColumnId> col_ids = { tablet_replica_->tablet()->schema()->column_id(1),
tablet_replica_->tablet()->schema()->column_id(2) };
ASSERT_OK(tablet_replica_->tablet()->DoMajorDeltaCompaction(col_ids, rsets[0]));
}
// Verify that data is still the same.
ANFF(VerifyRows(schema_, { KeyValue(1, 2) }));
// Verify that data remains after a restart.
ASSERT_OK(ShutdownAndRebuildTablet());
ANFF(VerifyRows(schema_, { KeyValue(1, 2) }));
}
// Regression test for KUDU-1341, a case in which, during bootstrap,
// we have a DELETE for a row which is still live in multiple on-disk
// rowsets.
TEST_F(TabletServerTest, TestKUDU_1341) {
for (int i = 0; i < 3; i++) {
// Insert a row to DMS and flush it.
ANFF(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Update and delete row (in DMS)
ANFF(UpdateTestRowRemote(1, i));
ANFF(DeleteTestRowsRemote(1, 1));
}
// Insert row again, update it in MRS before flush, and
// flush.
ANFF(InsertTestRowsRemote(1, 1));
ANFF(UpdateTestRowRemote(1, 12345));
ASSERT_OK(tablet_replica_->tablet()->Flush());
ANFF(VerifyRows(schema_, { KeyValue(1, 12345) }));
// Test restart.
ASSERT_OK(ShutdownAndRebuildTablet());
ANFF(VerifyRows(schema_, { KeyValue(1, 12345) }));
ASSERT_OK(tablet_replica_->tablet()->Flush());
ANFF(VerifyRows(schema_, { KeyValue(1, 12345) }));
// Test compaction after restart.
ASSERT_OK(tablet_replica_->tablet()->Compact(Tablet::FORCE_COMPACT_ALL));
ANFF(VerifyRows(schema_, { KeyValue(1, 12345) }));
}
TEST_F(TabletServerTest, TestExactlyOnceForErrorsAcrossRestart) {
WriteRequestPB req;
WriteResponsePB resp;
RpcController rpc;
// Set up a request to insert two rows.
req.set_tablet_id(kTabletId);
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 1234, 5678, "hello world via RPC",
req.mutable_row_operations());
AddTestRowToPB(RowOperationsPB::INSERT, schema_, 12345, 5679, "hello world via RPC2",
req.mutable_row_operations());
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
// Insert it, assuming no errors.
{
SCOPED_TRACE(req.DebugString());
ASSERT_OK(proxy_->Write(req, &resp, &rpc));
SCOPED_TRACE(resp.DebugString());
ASSERT_FALSE(resp.has_error());
ASSERT_EQ(0, resp.per_row_errors_size());
}
// Set up a RequestID to use in the later requests.
rpc::RequestIdPB req_id;
req_id.set_client_id("client-id");
req_id.set_seq_no(1);
req_id.set_first_incomplete_seq_no(1);
req_id.set_attempt_no(1);
// Insert the row again, with the request ID specified. We should expect an
// "ALREADY_PRESENT" error.
{
rpc.Reset();
rpc.SetRequestIdPB(unique_ptr<rpc::RequestIdPB>(new rpc::RequestIdPB(req_id)));
ASSERT_OK(proxy_->Write(req, &resp, &rpc));
SCOPED_TRACE(resp.DebugString());
ASSERT_FALSE(resp.has_error());
ASSERT_EQ(2, resp.per_row_errors_size());
}
// Restart the tablet server several times, and after each restart, send a new attempt of the
// same request. We make the request itself invalid by clearing the schema and ops, but
// that shouldn't matter since it's just hitting the ResultTracker and returning the
// cached response. If the ResultTracker didn't have a cached response, then we'd get an
// error about an invalid request.
req.clear_schema();
req.clear_row_operations();
for (int i = 1; i <= 5; i++) {
SCOPED_TRACE(Substitute("restart attempt #$0", i));
NO_FATALS(ShutdownAndRebuildTablet());
rpc.Reset();
req_id.set_attempt_no(req_id.attempt_no() + 1);
rpc.SetRequestIdPB(unique_ptr<rpc::RequestIdPB>(new rpc::RequestIdPB(req_id)));
ASSERT_OK(proxy_->Write(req, &resp, &rpc));
SCOPED_TRACE(resp.DebugString());
ASSERT_FALSE(resp.has_error());
ASSERT_EQ(2, resp.per_row_errors_size());
}
}
// Regression test for KUDU-177. Ensures that after a major delta compaction,
// rows that were in the old DRS's DMS are properly replayed.
TEST_F(TabletServerTest, TestKUDU_177_RecoveryOfDMSEditsAfterMajorDeltaCompaction) {
// Flush a DRS with 1 rows.
ANFF(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
ANFF(VerifyRows(schema_, { KeyValue(1, 1) }));
// Update it, flush deltas.
ANFF(UpdateTestRowRemote(1, 2));
ASSERT_OK(tablet_replica_->tablet()->FlushBiggestDMS());
// Update it again, so this last update is in the DMS.
ANFF(UpdateTestRowRemote(1, 3));
ANFF(VerifyRows(schema_, { KeyValue(1, 3) }));
// Major compact deltas. This doesn't include the DMS, but the old
// DMS should "move over" to the output of the delta compaction.
{
vector<shared_ptr<tablet::RowSet> > rsets;
tablet_replica_->tablet()->GetRowSetsForTests(&rsets);
vector<ColumnId> col_ids = { tablet_replica_->tablet()->schema()->column_id(1),
tablet_replica_->tablet()->schema()->column_id(2) };
ASSERT_OK(tablet_replica_->tablet()->DoMajorDeltaCompaction(col_ids, rsets[0]));
}
// Verify that data is still the same.
ANFF(VerifyRows(schema_, { KeyValue(1, 3) }));
// Verify that the update remains after a restart.
ASSERT_OK(ShutdownAndRebuildTablet());
ANFF(VerifyRows(schema_, { KeyValue(1, 3) }));
}
TEST_F(TabletServerTest, TestClientGetsErrorBackWhenRecoveryFailed) {
ANFF(InsertTestRowsRemote(1, 7));
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Save the log path before shutting down the tablet (and destroying
// the TabletReplica).
string log_path = tablet_replica_->log()->ActiveSegmentPathForTests();
ShutdownTablet();
ASSERT_OK(log::CorruptLogFile(env_, log_path, log::FLIP_BYTE, 300));
ASSERT_FALSE(ShutdownAndRebuildTablet().ok());
// Connect to it.
CreateTsClientProxies(mini_server_->bound_rpc_addr(),
client_messenger_,
&tablet_copy_proxy_, &proxy_, &admin_proxy_, &consensus_proxy_,
&generic_proxy_);
WriteRequestPB req;
req.set_tablet_id(kTabletId);
WriteResponsePB resp;
rpc::RpcController controller;
// We're expecting the write to fail.
ASSERT_OK(DCHECK_NOTNULL(proxy_.get())->Write(req, &resp, &controller));
ASSERT_EQ(TabletServerErrorPB::TABLET_FAILED, resp.error().code());
ASSERT_STR_CONTAINS(resp.error().status().message(), "Tablet not RUNNING: FAILED");
// Check that the TabletReplica's status message is updated with the failure.
ASSERT_STR_CONTAINS(tablet_replica_->last_status(),
"Log file corruption detected");
}
TEST_F(TabletServerTest, TestReadLatest) {
int num_rows = AllowSlowTests() ? 10000 : 1000;
InsertTestRowsDirect(0, num_rows);
// Instantiate scanner metrics.
ASSERT_TRUE(mini_server_->server()->metric_entity());
// We don't care what the function is, since the metric is already instantiated.
auto active_scanners = METRIC_active_scanners.InstantiateFunctionGauge(
mini_server_->server()->metric_entity(), Callback<size_t(void)>());
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
ASSERT_TRUE(tablet->tablet()->GetMetricEntity());
scoped_refptr<AtomicGauge<size_t>> tablet_active_scanners =
METRIC_tablet_active_scanners.Instantiate(tablet->tablet()->GetMetricEntity(), 0);
ScanResponsePB resp;
NO_FATALS(OpenScannerWithAllColumns(&resp));
// Ensure that the scanner ID came back and got inserted into the
// ScannerManager map.
string scanner_id = resp.scanner_id();
ASSERT_TRUE(!scanner_id.empty());
{
SharedScanner junk;
TabletServerErrorPB::Code error_code;
ASSERT_OK(mini_server_->server()->scanner_manager()->LookupScanner(
scanner_id, proxy_->user_credentials().real_user(), &error_code, &junk));
}
// Ensure that the scanner shows up in the server and tablet's metrics.
ASSERT_EQ(1, active_scanners->value());
ASSERT_EQ(1, tablet_active_scanners->value());
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(num_rows, results.size());
KuduPartialRow row(&schema_);
for (int i = 0; i < num_rows; i++) {
BuildTestRow(i, &row);
string expected = "(" + row.ToString() + ")";
ASSERT_EQ(expected, results[i]);
}
// Since the rows are drained, the scanner should be automatically removed
// from the scanner manager.
{
SharedScanner junk;
TabletServerErrorPB::Code error_code;
ASSERT_TRUE(mini_server_->server()->scanner_manager()->LookupScanner(
scanner_id, proxy_->user_credentials().real_user(), &error_code, &junk).IsNotFound());
ASSERT_EQ(TabletServerErrorPB::SCANNER_EXPIRED, error_code);
}
// Ensure that the metrics have been updated now that the scanner is unregistered.
ASSERT_EQ(0, active_scanners->value());
ASSERT_EQ(0, tablet_active_scanners->value());
}
class ExpiredScannerParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(ExpiredScannerParamTest, Test) {
const ReadMode mode = GetParam();
// Make scanners expire quickly.
FLAGS_scanner_ttl_ms = 1;
int num_rows = 100;
InsertTestRowsDirect(0, num_rows);
// Instantiate scanners expired metric.
ASSERT_TRUE(mini_server_->server()->metric_entity());
scoped_refptr<Counter> scanners_expired = METRIC_scanners_expired.Instantiate(
mini_server_->server()->metric_entity());
// Initially, there've been no scanners, so none of have expired.
ASSERT_EQ(0, scanners_expired->value());
// Capture the glog output so we can ensure the scanner expiration message
// gets logged.
StringVectorSink capture_logs;
ScopedRegisterSink reg(&capture_logs);
// Open a scanner but don't read from it.
ScanResponsePB resp;
NO_FATALS(OpenScannerWithAllColumns(&resp, mode));
// The scanner should expire after a short time.
ASSERT_EVENTUALLY([&]() {
ASSERT_EQ(1, scanners_expired->value());
});
// Continue the scan. We should get a SCANNER_EXPIRED error.
ScanRequestPB req;
RpcController rpc;
req.set_scanner_id(resp.scanner_id());
req.set_call_seq_id(1);
resp.Clear();
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::SCANNER_EXPIRED, resp.error().code());
ASSERT_STR_MATCHES(resp.error().status().message(), "Scanner .* not found");
ASSERT_STRINGS_ANY_MATCH(capture_logs.logged_msgs(), "Scan: .* Scanner .* not found .* remote=");
}
static const ReadMode kReadModes[] = {
READ_LATEST,
READ_AT_SNAPSHOT,
READ_YOUR_WRITES,
};
INSTANTIATE_TEST_CASE_P(Params, ExpiredScannerParamTest,
testing::ValuesIn(kReadModes));
class ScanCorruptedDeltasParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(ScanCorruptedDeltasParamTest, Test) {
const ReadMode mode = GetParam();
// Ensure some rows get to disk with deltas.
InsertTestRowsDirect(0, 100);
ASSERT_OK(tablet_replica_->tablet()->Flush());
UpdateTestRowRemote(1, 100);
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Fudge with some delta blocks.
TabletSuperBlockPB superblock_pb;
tablet_replica_->tablet()->metadata()->ToSuperBlock(&superblock_pb);
FsManager* fs_manager = mini_server_->server()->fs_manager();
for (int rowset_no = 0; rowset_no < superblock_pb.rowsets_size(); rowset_no++) {
RowSetDataPB* rowset_pb = superblock_pb.mutable_rowsets(rowset_no);
for (int id = 0; id < rowset_pb->undo_deltas_size(); id++) {
BlockId block_id(rowset_pb->undo_deltas(id).block().id());
BlockId new_block_id;
// Make a copy of each block and rewrite the superblock to include these
// newly corrupted blocks.
ASSERT_OK(CreateCorruptBlock(fs_manager, block_id, 0, 0, &new_block_id));
rowset_pb->mutable_undo_deltas(id)->mutable_block()->set_id(new_block_id.id());
}
}
// Grab the deltafiles and corrupt them.
const string& meta_path = fs_manager->GetTabletMetadataPath(tablet_replica_->tablet_id());
ShutdownTablet();
// Flush the corruption and rebuild the server with the corrupt data.
ASSERT_OK(pb_util::WritePBContainerToPath(env_,
meta_path, superblock_pb, pb_util::OVERWRITE, pb_util::SYNC));
ASSERT_OK(ShutdownAndRebuildTablet());
LOG(INFO) << Substitute("Rebuilt tablet $0 with broken blocks", tablet_replica_->tablet_id());
// Now open a scanner for the server.
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_read_mode(mode);
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
// Send the call. This first call should attempt to init the corrupted
// deltafiles and return with an error. Subsequent calls should see that the
// previous call to init failed and should return an appropriate error.
//
// It's possible for snapshot scans to be waiting in MVCC when the tablet
// fails. If that happens, the error will be slightly different.
{
req.set_batch_size_bytes(10000);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
const auto& s = resp.error().status();
if (s.code() == AppStatusPB::CORRUPTION) {
ASSERT_STR_CONTAINS(s.message(), "failed to init CFileReader");
} else if (s.code() == AppStatusPB::ABORTED) {
ASSERT_STR_CONTAINS(s.message(), "MVCC is closed");
} else {
FAIL() << "Unexpected failure";
}
}
// The tablet will end up transitioning to a failed state and yield "not
// running" errors.
for (int i = 0; i < 2; i++) {
rpc.Reset();
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(req));
ASSERT_TRUE(resp.has_error());
SCOPED_TRACE(SecureDebugString(resp));
const auto& s = resp.error().status();
if (s.code() == AppStatusPB::ILLEGAL_STATE) {
ASSERT_STR_CONTAINS(s.message(), "Tablet not RUNNING");
} else if (s.code() == AppStatusPB::ABORTED) {
ASSERT_STR_CONTAINS(s.message(), "MVCC is closed");
} else {
FAIL() << "Unexpected failure";
}
}
}
INSTANTIATE_TEST_CASE_P(Params, ScanCorruptedDeltasParamTest,
testing::ValuesIn(kReadModes));
class ScannerOpenWhenServerShutsDownParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(ScannerOpenWhenServerShutsDownParamTest, Test) {
const ReadMode mode = GetParam();
// Write and flush the write, so we have some rows in MRS and DRS
InsertTestRowsDirect(0, 100);
ASSERT_OK(tablet_replica_->tablet()->Flush());
UpdateTestRowRemote(1, 100);
ASSERT_OK(tablet_replica_->tablet()->Flush());
ScanResponsePB resp;
NO_FATALS(OpenScannerWithAllColumns(&resp, mode));
// Scanner is now open. The test will now shut down the TS with the scanner still
// out there. Due to KUDU-161 this used to fail, since the scanner (and thus the MRS)
// stayed open longer than the anchor registry
}
INSTANTIATE_TEST_CASE_P(Params, ScannerOpenWhenServerShutsDownParamTest,
testing::ValuesIn(kReadModes));
TEST_F(TabletServerTest, TestSnapshotScan) {
const int num_rows = AllowSlowTests() ? 1000 : 100;
const int num_batches = AllowSlowTests() ? 100 : 10;
vector<uint64_t> write_timestamps_collector;
// perform a series of writes and collect the timestamps
InsertTestRowsRemote(0, num_rows, num_batches, nullptr,
kTabletId, &write_timestamps_collector);
// now perform snapshot scans.
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
int batch_idx = 1;
for (uint64_t write_timestamp : write_timestamps_collector) {
req.Clear();
resp.Clear();
rpc.Reset();
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_read_mode(READ_AT_SNAPSHOT);
// Decode and re-encode the timestamp. Note that a snapshot at 'write_timestamp'
// does not include the written rows, so we increment that timestamp by one
// to make sure we get those rows back
Timestamp read_timestamp(write_timestamp);
read_timestamp = Timestamp(read_timestamp.value() + 1);
scan->set_snap_timestamp(read_timestamp.ToUint64());
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
const Timestamp pre_scan_ts = mini_server_->server()->clock()->Now();
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// The 'propagated_timestamp' field must be set for 'success' responses.
ASSERT_TRUE(resp.has_propagated_timestamp());
ASSERT_GT(mini_server_->server()->clock()->Now().ToUint64(),
resp.propagated_timestamp());
ASSERT_LT(pre_scan_ts.ToUint64(), resp.propagated_timestamp());
ASSERT_TRUE(resp.has_more_results());
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
// on each scan we should get (num_rows / num_batches) * batch_idx rows back
int expected_num_rows = (num_rows / num_batches) * batch_idx;
ASSERT_EQ(expected_num_rows, results.size());
if (VLOG_IS_ON(2)) {
VLOG(2) << Substitute("Scanner: $0 performing a snapshot read at $1 got back: ",
resp.scanner_id(), read_timestamp.ToString());
for (const string& result : results) {
VLOG(2) << result;
}
}
// assert that the first and last rows were the expected ones
ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
ASSERT_EQ(Substitute(R"((int32 key=$0, int32 int_val=$0, string string_val="original$0"))",
(batch_idx * (num_rows / num_batches) - 1)), results[results.size() - 1]);
batch_idx++;
}
}
TEST_F(TabletServerTest, TestSnapshotScan_WithoutSnapshotTimestamp) {
vector<uint64_t> write_timestamps_collector;
// perform a write
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
scan->set_read_mode(READ_AT_SNAPSHOT);
const Timestamp pre_scan_ts = mini_server_->server()->clock()->Now();
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// make sure that the snapshot timestamp that was selected is >= now
ASSERT_GE(resp.snap_timestamp(), pre_scan_ts.ToUint64());
// The 'propagated_timestamp' field must be set for all successful responses.
ASSERT_TRUE(resp.has_propagated_timestamp());
ASSERT_GT(mini_server_->server()->clock()->Now().ToUint64(),
resp.propagated_timestamp());
ASSERT_LT(pre_scan_ts.ToUint64(), resp.propagated_timestamp());
// The propagated timestamp should be after (i.e. greater) than the scan
// timestamp.
ASSERT_GT(resp.propagated_timestamp(), resp.snap_timestamp());
}
// Tests that a snapshot in the future (beyond the current time plus maximum
// synchronization error) fails as an invalid snapshot.
TEST_F(TabletServerTest, TestSnapshotScan_SnapshotInTheFutureFails) {
vector<uint64_t> write_timestamps_collector;
// perform a write
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
scan->set_read_mode(READ_AT_SNAPSHOT);
Timestamp read_timestamp(write_timestamps_collector[0]);
// Increment the write timestamp by 60 secs: the server will definitely consider
// this in the future.
read_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(read_timestamp) + 60000000);
scan->set_snap_timestamp(read_timestamp.ToUint64());
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::INVALID_SNAPSHOT, resp.error().code());
}
}
// Test retrying a snapshot scan using last_row.
TEST_F(TabletServerTest, TestSnapshotScan_LastRow) {
// Set the internal batching within the tserver to be small. Otherwise,
// even though we use a small batch size in our request, we'd end up reading
// many rows at a time.
FLAGS_scanner_batch_size_rows = 5;
const int num_rows = AllowSlowTests() ? 1000 : 100;
const int num_batches = AllowSlowTests() ? 10 : 5;
const int batch_size = num_rows / num_batches;
// Generate some interleaved rows
for (int i = 0; i < batch_size; i++) {
ASSERT_OK(tablet_replica_->tablet()->Flush());
for (int j = 0; j < num_rows; j++) {
if (j % batch_size == i) {
InsertTestRowsDirect(j, 1);
}
}
}
// Remove all the key columns from the projection.
// This makes sure the scanner adds them in for sorting but removes them before returning
// to the client.
SchemaBuilder sb(schema_);
for (int i = 0; i < schema_.num_key_columns(); i++) {
sb.RemoveColumn(schema_.column(i).name());
}
const Schema& projection = sb.BuildWithoutIds();
// Scan the whole tablet with a few different batch sizes.
for (int i = 1; i < 10000; i *= 2) {
ScanResponsePB resp;
ScanRequestPB req;
RpcController rpc;
// Set up a new snapshot scan without a specified timestamp.
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
scan->set_read_mode(READ_AT_SNAPSHOT);
scan->set_order_mode(ORDERED);
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
vector<string> results;
do {
rpc.Reset();
// Send the call.
{
SCOPED_TRACE(SecureDebugString(req));
req.set_batch_size_bytes(i);
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Save the rows into 'results' vector.
StringifyRowsFromResponse(projection, rpc, &resp, &results);
// Retry the scan, setting the last_row_key and snapshot based on the response.
scan->set_last_primary_key(resp.last_primary_key());
scan->set_snap_timestamp(resp.snap_timestamp());
} while (resp.has_more_results());
ASSERT_EQ(num_rows, results.size());
// Verify that we get the rows back in order.
KuduPartialRow row(&projection);
for (int j = 0; j < num_rows; j++) {
ASSERT_OK(row.SetInt32(0, j * 2));
ASSERT_OK(row.SetStringCopy(1, StringPrintf("hello %d", j)));
string expected = "(" + row.ToString() + ")";
ASSERT_EQ(expected, results[j]);
}
}
}
// Tests that a read in the future succeeds if a propagated_timestamp (that is even
// further in the future) follows along. Also tests that the clock was updated so
// that no writes will ever have a timestamp post this snapshot.
TEST_F(TabletServerTest, TestSnapshotScan_SnapshotInTheFutureWithPropagatedTimestamp) {
vector<uint64_t> write_timestamps_collector;
// perform a write
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
scan->set_read_mode(READ_AT_SNAPSHOT);
Timestamp read_timestamp(write_timestamps_collector[0]);
// increment the write timestamp by 5 secs, the server will definitely consider
// this in the future.
read_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(read_timestamp) + 5000000);
scan->set_snap_timestamp(read_timestamp.ToUint64());
// send a propagated timestamp that is an additional 100 msecs into the future.
Timestamp propagated_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(read_timestamp) + 100000);
scan->set_propagated_timestamp(propagated_timestamp.ToUint64());
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// make sure the server's current clock returns a value that is larger than the
// propagated timestamp. It should have the same physical time, but higher
// logical time (due to various calls to clock.Now() when processing the request).
Timestamp now = mini_server_->server()->clock()->Now();
ASSERT_EQ(HybridClock::GetPhysicalValueMicros(propagated_timestamp),
HybridClock::GetPhysicalValueMicros(now));
ASSERT_GT(HybridClock::GetLogicalValue(now),
HybridClock::GetLogicalValue(propagated_timestamp));
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(1, results.size());
ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
}
// Test that a read in the future fails, even if a propagated_timestamp is sent along,
// if the read_timestamp is beyond the propagated_timestamp.
TEST_F(TabletServerTest, TestSnapshotScan__SnapshotInTheFutureBeyondPropagatedTimestampFails) {
vector<uint64_t> write_timestamps_collector;
// perform a write
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
scan->set_read_mode(READ_AT_SNAPSHOT);
Timestamp read_timestamp(write_timestamps_collector[0]);
// increment the write timestamp by 60 secs, the server will definitely consider
// this in the future.
read_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(read_timestamp) + 60000000);
scan->set_snap_timestamp(read_timestamp.ToUint64());
// send a propagated timestamp that is an less than the read timestamp (but still
// in the future as far the server is concerned).
Timestamp propagated_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(read_timestamp) - 100000);
scan->set_propagated_timestamp(propagated_timestamp.ToUint64());
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::INVALID_SNAPSHOT, resp.error().code());
}
}
// Scan with READ_YOUR_WRITES mode to ensure it can
// satisfy read-your-writes/read-your-reads session guarantee.
TEST_F(TabletServerTest, TestScanYourWrites) {
vector<uint64_t> write_timestamps_collector;
const int kNumRows = 100;
// Perform a write.
InsertTestRowsRemote(0, kNumRows, 1, nullptr, kTabletId, &write_timestamps_collector);
// Scan with READ_YOUR_WRITES mode and use the previous
// write response as the propagated timestamp.
ScanResponsePB resp;
uint64_t propagated_timestamp = write_timestamps_collector[0];
ScanYourWritesTest(propagated_timestamp, &resp);
// Store the returned snapshot timestamp as the propagated
// timestamp for the next read.
propagated_timestamp = resp.snap_timestamp();
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(kNumRows, results.size());
ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
ASSERT_EQ(R"((int32 key=99, int32 int_val=99, string string_val="original99"))", results[99]);
// Rescan the tablet to ensure READ_YOUR_WRITES mode can
// satisfy read-your-reads session guarantee.
ScanResponsePB new_resp;
ScanYourWritesTest(propagated_timestamp, &new_resp);
// Drain all the rows from the scanner.
results.clear();
NO_FATALS(DrainScannerToStrings(new_resp.scanner_id(), schema_, &results));
ASSERT_EQ(kNumRows, results.size());
ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
ASSERT_EQ(R"((int32 key=99, int32 int_val=99, string string_val="original99"))", results[99]);
}
// Tests that a read succeeds even without propagated_timestamp.
TEST_F(TabletServerTest, TestScanYourWrites_WithoutPropagatedTimestamp) {
vector<uint64_t> write_timestamps_collector;
// Perform a write.
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanResponsePB resp;
ScanYourWritesTest(Timestamp::kMin.ToUint64(), &resp);
}
// Tests that a read succeeds even with a future propagated_timestamp. Also
// tests that the clock was updated so that no writes will ever have a
// timestamp before this snapshot.
TEST_F(TabletServerTest, TestScanYourWrites_PropagatedTimestampInTheFuture) {
vector<uint64_t> write_timestamps_collector;
// Perform a write.
InsertTestRowsRemote(0, 1, 1, nullptr, kTabletId, &write_timestamps_collector);
ScanResponsePB resp;
// Increment the write timestamp by 5 secs: the server will definitely consider
// this in the future.
Timestamp propagated_timestamp(write_timestamps_collector[0]);
propagated_timestamp = HybridClock::TimestampFromMicroseconds(
HybridClock::GetPhysicalValueMicros(propagated_timestamp) + 5000000);
ScanYourWritesTest(propagated_timestamp.ToUint64(), &resp);
// Make sure the server's current clock returns a value that is larger than the
// propagated timestamp. It should have the same physical time, but higher
// logical time (due to various calls to clock.Now() when processing the request).
Timestamp now = mini_server_->server()->clock()->Now();
ASSERT_EQ(HybridClock::GetPhysicalValueMicros(propagated_timestamp),
HybridClock::GetPhysicalValueMicros(now));
ASSERT_GT(HybridClock::GetLogicalValue(now),
HybridClock::GetLogicalValue(propagated_timestamp));
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(1, results.size());
ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="original0"))", results[0]);
}
TEST_F(TabletServerTest, TestScanWithStringPredicates) {
InsertTestRowsDirect(0, 100);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
// Set up a range predicate: "hello 50" < string_val <= "hello 59"
ColumnRangePredicatePB* pred = scan->add_deprecated_range_predicates();
pred->mutable_column()->CopyFrom(scan->projected_columns(2));
pred->set_lower_bound("hello 50");
pred->set_inclusive_upper_bound("hello 59");
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(
DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(10, results.size());
ASSERT_EQ(R"((int32 key=50, int32 int_val=100, string string_val="hello 50"))", results[0]);
ASSERT_EQ(R"((int32 key=59, int32 int_val=118, string string_val="hello 59"))", results[9]);
}
TEST_F(TabletServerTest, TestNonPositiveLimitsShortCircuit) {
InsertTestRowsDirect(0, 10);
for (int limit : { -1, 0 }) {
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new scan request with non-positive limits.
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_limit(limit);
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
{
// Send the request and make sure we get no rows back.
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
// We're expecting the scan to have short circuited and for no scanner to
// exist for the scan, so we need to "drain" it a bit differently.
ASSERT_FALSE(resp.has_scanner_id());
unique_ptr<RowwiseRowBlockPB> data(resp.release_data());
ASSERT_EQ(0, data->num_rows());
}
}
}
// Randomized test that runs a few scans with varying limits.
TEST_F(TabletServerTest, TestRandomizedScanLimits) {
// Set a relatively small batch size...
const int kBatchSizeRows = rand() % 1000;
// ...and decent number of rows, such that we can get a good mix of
// multiple-batch and single-batch scans.
const int kNumRows = rand() % 2000;
FLAGS_scanner_batch_size_rows = kBatchSizeRows;
InsertTestRowsDirect(0, kNumRows);
LOG(INFO) << Substitute("Rows inserted: $0, batch size: $1", kNumRows, kBatchSizeRows);
for (int i = 1; i < 100; i++) {
// To broaden a range of coverage, gradiate the max limit that we can set.
const int kMaxLimit = kNumRows * static_cast<double>(0.01 * i);
// Get a random limit, capped by the max, inclusive.
// "kMaxLimit" cannot be 0, if it's 0, we set "kLimit" to 1 directly.
const int kLimit = kMaxLimit == 0 ? 1 : rand() % kMaxLimit + 1;
LOG(INFO) << "Scanning with a limit of " << kLimit;
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_limit(kLimit);
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
// Send the scan.
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(results.size(), std::min({ kLimit, kNumRows }));
}
}
TEST_F(TabletServerTest, TestScanWithPredicates) {
int num_rows = AllowSlowTests() ? 10000 : 1000;
InsertTestRowsDirect(0, num_rows);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
// Set up a range predicate: 51 <= key <= 100
ColumnRangePredicatePB* pred = scan->add_deprecated_range_predicates();
pred->mutable_column()->CopyFrom(scan->projected_columns(0));
int32_t lower_bound_int = 51;
int32_t upper_bound_int = 100;
pred->mutable_lower_bound()->append(reinterpret_cast<char*>(&lower_bound_int),
sizeof(lower_bound_int));
pred->mutable_inclusive_upper_bound()->append(reinterpret_cast<char*>(&upper_bound_int),
sizeof(upper_bound_int));
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(
DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(50, results.size());
}
TEST_F(TabletServerTest, TestScanWithEncodedPredicates) {
InsertTestRowsDirect(0, 100);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
// Set up a range predicate: 51 <= key <= 60
// using encoded keys
int32_t start_key_int = 51;
int32_t stop_key_int = 60;
EncodedKeyBuilder ekb(&schema_);
ekb.AddColumnKey(&start_key_int);
gscoped_ptr<EncodedKey> start_encoded(ekb.BuildEncodedKey());
ekb.Reset();
ekb.AddColumnKey(&stop_key_int);
gscoped_ptr<EncodedKey> stop_encoded(ekb.BuildEncodedKey());
scan->mutable_start_primary_key()->assign(
reinterpret_cast<const char*>(start_encoded->encoded_key().data()),
start_encoded->encoded_key().size());
scan->mutable_stop_primary_key()->assign(
reinterpret_cast<const char*>(stop_encoded->encoded_key().data()),
stop_encoded->encoded_key().size());
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(
DrainScannerToStrings(resp.scanner_id(), schema_, &results));
ASSERT_EQ(9, results.size());
EXPECT_EQ(R"((int32 key=51, int32 int_val=102, string string_val="hello 51"))",
results.front());
EXPECT_EQ(R"((int32 key=59, int32 int_val=118, string string_val="hello 59"))",
results.back());
}
TEST_F(TabletServerTest, TestScanWithSimplifiablePredicates) {
int num_rows = AllowSlowTests() ? 10000 : 1000;
InsertTestRowsDirect(0, num_rows);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
req.set_batch_size_bytes(0); // so it won't return data right away
// Set up a projection without the key columns or the column after the last key column
SchemaBuilder sb(schema_);
for (int i = 0; i <= schema_.num_key_columns(); i++) {
sb.RemoveColumn(schema_.column(i).name());
}
const Schema& projection = sb.BuildWithoutIds();
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
// Set up a key range predicate: 51 <= key < 100
ColumnPredicatePB* key_predicate = scan->add_column_predicates();
key_predicate->set_column(schema_.column(0).name());
ColumnPredicatePB::Range* range = key_predicate->mutable_range();
int32_t lower_bound_inclusive = 51;
int32_t upper_bound_exclusive = 100;
range->mutable_lower()->append(
reinterpret_cast<char*>(&lower_bound_inclusive), sizeof(lower_bound_inclusive));
range->mutable_upper()->append(
reinterpret_cast<char*>(&upper_bound_exclusive), sizeof(upper_bound_exclusive));
// Set up is not null predicate for not nullable column.
ColumnPredicatePB* is_not_null_predicate = scan->add_column_predicates();
is_not_null_predicate->set_column(schema_.column(1).name());
is_not_null_predicate->mutable_is_not_null();
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Ensure that the scanner includes correct columns.
{
auto scan_descriptors = mini_server_->server()->scanner_manager()->ListScans();
ASSERT_EQ(1, projection.columns().size());
ASSERT_EQ(1, scan_descriptors.size());
ASSERT_EQ(projection.columns().size(), scan_descriptors[0].projected_columns.size());
ASSERT_EQ(2, scan_descriptors[0].predicates.size());
ASSERT_EQ(projection.columns().size(), scan_descriptors[0].iterator_stats.size());
ASSERT_EQ(projection.column(0).name(), scan_descriptors[0].iterator_stats[0].first);
}
// Drain all the rows from the scanner.
vector<string> results;
NO_FATALS(
DrainScannerToStrings(resp.scanner_id(), projection, &results));
ASSERT_EQ(49, results.size());
}
// Test for diff scan RPC interface.
TEST_F(TabletServerTest, TestDiffScan) {
// Insert 100 rows with the usual pattern.
const int kStartRow = 0;
const int kNumRows = 1000;
const int kNumToUpdate = 200;
const int kNumToDelete = 100;
InsertTestRowsDirect(kStartRow, kNumRows);
Timestamp before_mutations = tablet_replica_->clock()->Now();
// Structure: key -> {val, is_deleted}
map<int32_t, pair<int32_t, bool>> expected;
vector<int32_t> keys;
keys.reserve(kNumRows);
for (int32_t i = 0; i < kNumRows; i++) {
keys.emplace_back(i);
}
// Update some random rows.
LocalTabletWriter writer(tablet_replica_->tablet(), &schema_);
std::random_shuffle(keys.begin(), keys.end());
for (int i = 0; i < kNumToUpdate; i++) {
KuduPartialRow row(&schema_);
int32_t key = keys[i];
CHECK_OK(row.SetInt32(0, key));
int32_t new_val = key * 3;
CHECK_OK(row.SetInt32(1, new_val));
InsertOrDie(&expected, key, pair<int32_t, bool>(new_val, false));
CHECK_OK(writer.Update(row));
}
// Delete some random rows.
std::random_shuffle(keys.begin(), keys.end());
for (int i = 0; i < kNumToDelete; i++) {
KuduPartialRow row(&schema_);
int32_t key = keys[i];
CHECK_OK(row.SetInt32(0, key));
EmplaceOrUpdate(&expected, key, pair<int32_t, bool>(0 /* ignored */, true));
CHECK_OK(writer.Delete(row));
}
Timestamp after_mutations = tablet_replica_->clock()->Now();
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Build a projection with an IS_DELETED column.
SchemaBuilder builder(*tablet_replica_->tablet()->schema());
const bool kIsDeletedDefault = false;
ASSERT_OK(builder.AddColumn("is_deleted", IS_DELETED,
/*is_nullable=*/ false,
/*read_default=*/ &kIsDeletedDefault,
/*write_default=*/ nullptr));
Schema projection = builder.BuildWithoutIds();
// Start scan.
auto* new_scan = req.mutable_new_scan_request();
new_scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, new_scan->mutable_projected_columns()));
new_scan->set_read_mode(READ_AT_SNAPSHOT);
new_scan->set_order_mode(ORDERED);
new_scan->set_snap_start_timestamp(before_mutations.ToUint64());
new_scan->set_snap_timestamp(after_mutations.ToUint64());
int call_seq_id = 0;
{
req.set_call_seq_id(call_seq_id);
req.set_batch_size_bytes(0); // So it won't return data right away.
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
ASSERT_EQ(0, resp.data().num_rows());
}
// Consume the scan results and validate that the values are as expected.
req.clear_new_scan_request();
req.set_scanner_id(resp.scanner_id());
vector<string> results;
while (resp.has_more_results()) {
rpc.Reset();
req.set_call_seq_id(++call_seq_id);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
NO_FATALS(StringifyRowsFromResponse(projection, rpc, &resp, &results));
}
// Verify that the scan results match what we expected.
ASSERT_EQ(expected.size(), results.size());
int i = 0;
for (const auto& entry : expected) {
int32_t key = entry.first;
int32_t val = entry.second.first;
bool is_deleted = entry.second.second;
string val_str = Substitute("$0", val);
if (is_deleted) {
val_str = ".*"; // Match any value on deleted values.
}
ASSERT_STR_MATCHES(results[i++],
Substitute("^\\(int32 key=$0, int32 int_val=$1, string string_val=\"hello $0\", "
"is_deleted is_deleted=$2\\)$$", key, val_str, is_deleted));
}
}
// Send various "bad" diff scan requests and validate that we catch the errors
// and respond with reasonable error messages.
TEST_F(TabletServerTest, TestDiffScanErrors) {
Timestamp before_insert = tablet_replica_->clock()->Now();
InsertTestRowsDirect(/*start_row=*/0, /*num_rows=*/100);
Timestamp after_insert = tablet_replica_->clock()->Now();
// Build a projection with an IS_DELETED column.
SchemaBuilder builder(*tablet_replica_->tablet()->schema());
const bool kIsDeletedDefault = false;
ASSERT_OK(builder.AddColumn("is_deleted", IS_DELETED,
/*is_nullable=*/ false,
/*read_default=*/ &kIsDeletedDefault,
/*write_default=*/ nullptr));
Schema projection = builder.BuildWithoutIds();
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up the RPC request.
auto* new_scan = req.mutable_new_scan_request();
new_scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, new_scan->mutable_projected_columns()));
new_scan->set_snap_start_timestamp(before_insert.ToUint64());
new_scan->set_snap_timestamp(after_insert.ToUint64());
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // So it won't return data right away.
// Send a scan request to the server and assert
auto req_assert_invalid_argument = [&](const TabletServerErrorPB::Code expected_code,
const string& expected_msg) {
rpc.Reset();
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(expected_code, resp.error().code())
<< "Expected " << TabletServerErrorPB::Code_Name(expected_code)
<< ", got " << TabletServerErrorPB::Code_Name(resp.error().code());
Status s = StatusFromPB(resp.error().status());
ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
ASSERT_STR_CONTAINS(s.ToString(), expected_msg);
};
// Attempt to start a diff scan with an illegal scan mode.
for (ReadMode read_mode : {READ_YOUR_WRITES, READ_LATEST}) {
new_scan->set_read_mode(read_mode);
NO_FATALS(req_assert_invalid_argument(TabletServerErrorPB::INVALID_SCAN_SPEC,
"scan start timestamp is only supported in READ_AT_SNAPSHOT read mode"));
}
new_scan->set_read_mode(READ_AT_SNAPSHOT);
// Attempt to start a diff scan with an illegal order mode.
new_scan->set_order_mode(UNORDERED);
NO_FATALS(req_assert_invalid_argument(TabletServerErrorPB::INVALID_SCAN_SPEC,
"scan start timestamp is only supported in ORDERED order mode"));
new_scan->set_order_mode(ORDERED);
// Attempt to start a diff scan with a too-early start timestamp.
new_scan->set_snap_start_timestamp(0); // Way before the AHM.
NO_FATALS(req_assert_invalid_argument(TabletServerErrorPB::INVALID_SNAPSHOT,
"snapshot scan start timestamp is earlier than the ancient history mark"));
// Attempt to start a diff scan with a too-early end timestamp.
new_scan->set_snap_timestamp(1);
NO_FATALS(req_assert_invalid_argument(TabletServerErrorPB::INVALID_SNAPSHOT,
"snapshot scan end timestamp is earlier than the ancient history mark"));
// Attempt to start a diff scan with a start timestamp higher than the end
// timestamp.
new_scan->set_snap_start_timestamp(after_insert.ToUint64());
new_scan->set_snap_timestamp(before_insert.ToUint64());
NO_FATALS(req_assert_invalid_argument(TabletServerErrorPB::INVALID_SNAPSHOT,
"must be less than or equal to end timestamp"));
}
// Test requesting more rows from a scanner which doesn't exist
TEST_F(TabletServerTest, TestBadScannerID) {
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
req.set_scanner_id("does-not-exist");
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::SCANNER_EXPIRED, resp.error().code());
}
// Test passing a scanner ID, but also filling in some of the NewScanRequest
// field.
class InvalidScanRequest_NewScanAndScannerIDParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(InvalidScanRequest_NewScanAndScannerIDParamTest, Test) {
const ReadMode mode = GetParam();
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_read_mode(mode);
req.set_batch_size_bytes(0); // so it won't return data right away
req.set_scanner_id("x");
SCOPED_TRACE(SecureDebugString(req));
Status s = proxy_->Scan(req, &resp, &rpc);
ASSERT_FALSE(s.ok());
ASSERT_STR_CONTAINS(s.ToString(), "Must not pass both a scanner_id and new_scan_request");
}
INSTANTIATE_TEST_CASE_P(Params, InvalidScanRequest_NewScanAndScannerIDParamTest,
testing::ValuesIn(kReadModes));
// Test that passing a projection with fields not present in the tablet schema
// throws an exception.
TEST_F(TabletServerTest, TestInvalidScanRequest_BadProjection) {
const Schema projection({ ColumnSchema("col_doesnt_exist", INT32) }, 0);
VerifyScanRequestFailure(projection,
TabletServerErrorPB::MISMATCHED_SCHEMA,
"Some columns are not present in the current schema: col_doesnt_exist");
}
// Test that passing a projection with mismatched type/nullability throws an exception.
TEST_F(TabletServerTest, TestInvalidScanRequest_BadProjectionTypes) {
Schema projection;
// Verify mismatched nullability for the not-null int field
ASSERT_OK(
projection.Reset({ ColumnSchema("int_val", INT32, true) }, // should be NOT NULL
0));
VerifyScanRequestFailure(projection,
TabletServerErrorPB::MISMATCHED_SCHEMA,
"The column 'int_val' must have type INT32 NOT "
"NULL found INT32 NULLABLE");
// Verify mismatched nullability for the nullable string field
ASSERT_OK(
projection.Reset({ ColumnSchema("string_val", STRING, false) }, // should be NULLABLE
0));
VerifyScanRequestFailure(projection,
TabletServerErrorPB::MISMATCHED_SCHEMA,
"The column 'string_val' must have type STRING "
"NULLABLE found STRING NOT NULL");
// Verify mismatched type for the not-null int field
ASSERT_OK(
projection.Reset({ ColumnSchema("int_val", INT16, false) }, // should be INT32 NOT NULL
0));
VerifyScanRequestFailure(projection,
TabletServerErrorPB::MISMATCHED_SCHEMA,
"The column 'int_val' must have type INT32 NOT "
"NULL found INT16 NOT NULL");
// Verify mismatched type for the nullable string field
ASSERT_OK(projection.Reset(
{ ColumnSchema("string_val", INT32, true) }, // should be STRING NULLABLE
0));
VerifyScanRequestFailure(projection,
TabletServerErrorPB::MISMATCHED_SCHEMA,
"The column 'string_val' must have type STRING "
"NULLABLE found INT32 NULLABLE");
}
TEST_F(TabletServerTest, TestInvalidScanRequest_UnknownOrderMode) {
NO_FATALS(InsertTestRowsDirect(0, 10));
ScanRequestPB req;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_order_mode(OrderMode::UNKNOWN_ORDER_MODE);
ASSERT_OK(SchemaToColumnPBs(schema_, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
NO_FATALS(VerifyScanRequestFailure(req,
TabletServerErrorPB::INVALID_SCAN_SPEC,
"Unknown order mode specified"));
}
// Test that passing a projection with Column IDs throws an exception.
// Column IDs are assigned to the user request schema on the tablet server
// based on the latest schema.
class InvalidScanRequest_WithIdsParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(InvalidScanRequest_WithIdsParamTest, Test) {
const Schema* projection = tablet_replica_->tablet()->schema();
ASSERT_TRUE(projection->has_column_ids());
VerifyScanRequestFailure(*projection,
TabletServerErrorPB::INVALID_SCHEMA,
"User requests should not have Column IDs");
}
INSTANTIATE_TEST_CASE_P(Params, InvalidScanRequest_WithIdsParamTest,
testing::ValuesIn(kReadModes));
// Test scanning a tablet that has no entries.
TEST_F(TabletServerTest, TestScan_NoResults) {
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
// Set up a new request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
// Because there are no entries, we should immediately return "no results".
ASSERT_FALSE(resp.has_more_results());
}
}
// Test scanning a tablet that has no entries.
class InvalidScanSeqIdParamTest :
public TabletServerTest,
public ::testing::WithParamInterface<ReadMode> {
};
TEST_P(InvalidScanSeqIdParamTest, Test) {
const ReadMode mode = GetParam();
InsertTestRowsDirect(0, 10);
ScanRequestPB req;
ScanResponsePB resp;
RpcController rpc;
{
// Set up a new scan request with no predicates, all columns.
const Schema& projection = schema_;
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_read_mode(mode);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
// Create the scanner
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
ASSERT_FALSE(resp.has_error());
ASSERT_TRUE(resp.has_more_results());
}
string scanner_id = resp.scanner_id();
resp.Clear();
{
// Continue the scan with an invalid sequence ID
req.Clear();
rpc.Reset();
req.set_scanner_id(scanner_id);
req.set_batch_size_bytes(0); // so it won't return data right away
req.set_call_seq_id(42); // should be 1
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::INVALID_SCAN_CALL_SEQ_ID, resp.error().code());
}
}
INSTANTIATE_TEST_CASE_P(Params, InvalidScanSeqIdParamTest,
testing::ValuesIn(kReadModes));
// Regression test for KUDU-1789: when ScannerKeepAlive is called on a non-existent
// scanner, it should properly respond with an error.
TEST_F(TabletServerTest, TestScan_KeepAliveExpiredScanner) {
StringVectorSink capture_logs;
ScopedRegisterSink reg(&capture_logs);
ScannerKeepAliveRequestPB req;
ScannerKeepAliveResponsePB resp;
RpcController rpc;
rpc.set_timeout(MonoDelta::FromSeconds(5));
req.set_scanner_id("does-not-exist");
ASSERT_OK(proxy_->ScannerKeepAlive(req, &resp, &rpc));
ASSERT_TRUE(resp.has_error()) << SecureShortDebugString(resp);
ASSERT_EQ(resp.error().code(), TabletServerErrorPB::SCANNER_EXPIRED);
ASSERT_STR_MATCHES(resp.error().status().message(), "Scanner .* not found");
ASSERT_STRINGS_ANY_MATCH(capture_logs.logged_msgs(),
"ScannerKeepAlive: .* Scanner .* not found .* remote=");
}
void TabletServerTest::ScanYourWritesTest(uint64_t propagated_timestamp,
ScanResponsePB* resp) {
ScanRequestPB req;
// Set up a new request with no predicates, all columns.
const Schema &projection = schema_;
NewScanRequestPB *scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
scan->set_read_mode(READ_YOUR_WRITES);
if (propagated_timestamp != Timestamp::kInvalidTimestamp.ToUint64()) {
scan->set_propagated_timestamp(propagated_timestamp);
}
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
req.set_batch_size_bytes(0); // so it won't return data right away
{
RpcController rpc;
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Scan(req, resp, &rpc));
SCOPED_TRACE(SecureDebugString(*resp));
ASSERT_FALSE(resp->has_error());
}
// Make sure that the chosen snapshot timestamp is sent back and
// it is larger than the previous propagation timestamp.
ASSERT_TRUE(resp->has_snap_timestamp());
ASSERT_LT(propagated_timestamp, resp->snap_timestamp());
// The 'propagated_timestamp' field must be set for 'success' responses.
ASSERT_TRUE(resp->has_propagated_timestamp());
ASSERT_TRUE(resp->has_more_results());
}
void TabletServerTest::DoOrderedScanTest(const Schema& projection,
const string& expected_rows_as_string) {
InsertTestRowsDirect(0, 10);
ASSERT_OK(tablet_replica_->tablet()->Flush());
InsertTestRowsDirect(10, 10);
ASSERT_OK(tablet_replica_->tablet()->Flush());
InsertTestRowsDirect(20, 10);
ScanResponsePB resp;
ScanRequestPB req;
RpcController rpc;
// Set up a new snapshot scan without a specified timestamp.
NewScanRequestPB* scan = req.mutable_new_scan_request();
scan->set_tablet_id(kTabletId);
ASSERT_OK(SchemaToColumnPBs(projection, scan->mutable_projected_columns()));
req.set_call_seq_id(0);
scan->set_read_mode(READ_AT_SNAPSHOT);
scan->set_order_mode(ORDERED);
{
SCOPED_TRACE(SecureDebugString(req));
req.set_batch_size_bytes(0); // so it won't return data right away
ASSERT_OK(proxy_->Scan(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
vector<string> results;
NO_FATALS(
DrainScannerToStrings(resp.scanner_id(), projection, &results));
ASSERT_EQ(30, results.size());
for (int i = 0; i < results.size(); ++i) {
ASSERT_EQ(results[i], Substitute(expected_rows_as_string, i, i * 2));
}
}
// Tests for KUDU-967. This test creates multiple row sets and then performs an ordered
// scan including the key columns in the projection but without marking them as keys.
// Without a fix for KUDU-967 the scan will often return out-of-order results.
TEST_F(TabletServerTest, TestOrderedScan_ProjectionWithKeyColumnsInOrder) {
// Build a projection with all the columns, but don't mark the key columns as such.
SchemaBuilder sb;
for (int i = 0; i < schema_.num_columns(); i++) {
sb.AddColumn(schema_.column(i), false);
}
const Schema& projection = sb.BuildWithoutIds();
DoOrderedScanTest(projection,
R"((int32 key=$0, int32 int_val=$1, string string_val="hello $0"))");
}
// Same as above but doesn't add the key columns to the projection.
TEST_F(TabletServerTest, TestOrderedScan_ProjectionWithoutKeyColumns) {
// Build a projection without the key columns.
SchemaBuilder sb;
for (int i = schema_.num_key_columns(); i < schema_.num_columns(); i++) {
sb.AddColumn(schema_.column(i), false);
}
const Schema& projection = sb.BuildWithoutIds();
DoOrderedScanTest(projection, R"((int32 int_val=$1, string string_val="hello $0"))");
}
// Same as above but creates a projection with the order of columns reversed.
TEST_F(TabletServerTest, TestOrderedScan_ProjectionWithKeyColumnsOutOfOrder) {
// Build a projection with the order of the columns reversed.
SchemaBuilder sb;
for (int i = schema_.num_columns() - 1; i >= 0; i--) {
sb.AddColumn(schema_.column(i), false);
}
const Schema& projection = sb.BuildWithoutIds();
DoOrderedScanTest(projection,
R"((string string_val="hello $0", int32 int_val=$1, int32 key=$0))");
}
TEST_F(TabletServerTest, TestSplitKeyRange) {
int kNumRowsets = 10;
int kRowsetSize = 10;
scoped_refptr<TabletReplica> replica;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &replica));
for (int i = 0; i < kNumRowsets; i++) {
InsertTestRowsDirect(kRowsetSize * i, kRowsetSize);
ASSERT_OK(replica->tablet()->Flush());
}
{
SplitKeyRangeRequestPB req;
SplitKeyRangeResponsePB resp;
RpcController rpc;
req.set_tablet_id(kTabletId);
// Request the smallest possible chunk size, expecting we get back a range
// for every rowset.
req.set_target_chunk_size_bytes(1);
ColumnSchemaToPB(ColumnSchema("key", INT32), req.add_columns());
ASSERT_OK(proxy_->SplitKeyRange(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
ASSERT_EQ(kNumRowsets, resp.ranges_size());
}
}
TEST_F(TabletServerTest, TestAlterSchema) {
AlterSchemaRequestPB req;
AlterSchemaResponsePB resp;
RpcController rpc;
InsertTestRowsRemote(0, 2);
// Add one column with a default value
const int32_t c2_write_default = 5;
const int32_t c2_read_default = 7;
SchemaBuilder builder(schema_);
ASSERT_OK(builder.AddColumn("c2", INT32, false, &c2_read_default, &c2_write_default));
Schema s2 = builder.Build();
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_schema_version(1);
ASSERT_OK(SchemaToPB(s2, req.mutable_schema()));
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->AlterSchema(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
{
InsertTestRowsRemote(2, 2);
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
ASSERT_OK(tablet->tablet()->Flush());
}
const Schema projection({ ColumnSchema("key", INT32), (ColumnSchema("c2", INT32)) }, 1);
// Try recovering from the original log
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(projection, { KeyValue(0, 7),
KeyValue(1, 7),
KeyValue(2, 5),
KeyValue(3, 5) });
// Try recovering from the log generated on recovery
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(projection, { KeyValue(0, 7),
KeyValue(1, 7),
KeyValue(2, 5),
KeyValue(3, 5) });
}
// Adds a new column with no "write default", and then restarts the tablet
// server. Inserts that were made before the new column was added should
// still replay properly during bootstrap.
//
// Regression test for KUDU-181.
TEST_F(TabletServerTest, TestAlterSchema_AddColWithoutWriteDefault) {
AlterSchemaRequestPB req;
AlterSchemaResponsePB resp;
RpcController rpc;
InsertTestRowsRemote(0, 2);
// Add a column with a read-default but no write-default.
const uint32_t c2_read_default = 7;
SchemaBuilder builder(schema_);
ASSERT_OK(builder.AddColumn("c2", INT32, false, &c2_read_default, nullptr));
Schema s2 = builder.Build();
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_schema_version(1);
ASSERT_OK(SchemaToPB(s2, req.mutable_schema()));
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->AlterSchema(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Verify that the old data picked up the read default.
const Schema projection({ ColumnSchema("key", INT32), ColumnSchema("c2", INT32) }, 1);
VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
// Try recovering from the original log
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
// Try recovering from the log generated on recovery
NO_FATALS(ShutdownAndRebuildTablet());
VerifyRows(projection, { KeyValue(0, 7), KeyValue(1, 7) });
}
TEST_F(TabletServerTest, TestCreateTablet_TabletExists) {
CreateTabletRequestPB req;
CreateTabletResponsePB resp;
RpcController rpc;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_table_id("testtb");
req.set_tablet_id(kTabletId);
PartitionPB* partition = req.mutable_partition();
partition->set_partition_key_start(" ");
partition->set_partition_key_end(" ");
req.set_table_name("testtb");
req.mutable_config()->CopyFrom(mini_server_->CreateLocalConfig());
Schema schema = SchemaBuilder(schema_).Build();
ASSERT_OK(SchemaToPB(schema, req.mutable_schema()));
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->CreateTablet(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::TABLET_ALREADY_EXISTS, resp.error().code());
}
}
TEST_F(TabletServerTest, TestDeleteTablet) {
scoped_refptr<TabletReplica> tablet;
// Verify that the tablet exists
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
// Fetch the metric for the number of on-disk blocks, so we can later verify
// that we actually remove data.
scoped_refptr<AtomicGauge<uint64_t> > ondisk =
METRIC_log_block_manager_blocks_under_management.Instantiate(
mini_server_->server()->metric_entity(), 0);
const int block_count_before_flush = ondisk->value();
if (FLAGS_block_manager == "log") {
ASSERT_EQ(block_count_before_flush, 0);
}
// Put some data in the tablet. We flush and insert more rows to ensure that
// there is data both in the MRS and on disk.
NO_FATALS(InsertTestRowsRemote(1, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
NO_FATALS(InsertTestRowsRemote(2, 1));
const int block_count_after_flush = ondisk->value();
if (FLAGS_block_manager == "log") {
ASSERT_GT(block_count_after_flush, block_count_before_flush);
}
// Drop any local references to the tablet from within this test,
// so that when we delete it on the server, it's not held alive
// by the test code.
tablet_replica_.reset();
tablet.reset();
DeleteTabletRequestPB req;
DeleteTabletResponsePB resp;
RpcController rpc;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_delete_type(tablet::TABLET_DATA_DELETED);
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->DeleteTablet(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Verify that the tablet is removed from the tablet map
ASSERT_FALSE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
// Verify that fetching metrics doesn't crash. Regression test for KUDU-638.
EasyCurl c;
faststring buf;
ASSERT_OK(c.FetchURL(strings::Substitute(
"http://$0/jsonmetricz",
mini_server_->bound_http_addr().ToString()),
&buf));
// Verify data was actually removed.
const int block_count_after_delete = ondisk->value();
if (FLAGS_block_manager == "log") {
ASSERT_EQ(block_count_after_delete, 0);
}
// Verify that after restarting the TS, the tablet is still not in the tablet manager.
// This ensures that the on-disk metadata got removed.
Status s = ShutdownAndRebuildTablet();
ASSERT_TRUE(s.IsNotFound()) << s.ToString();
ASSERT_FALSE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
}
TEST_F(TabletServerTest, TestDeleteTablet_TabletNotCreated) {
DeleteTabletRequestPB req;
DeleteTabletResponsePB resp;
RpcController rpc;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id("NotPresentTabletId");
req.set_delete_type(tablet::TABLET_DATA_DELETED);
// Send the call
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->DeleteTablet(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::TABLET_NOT_FOUND, resp.error().code());
}
}
TEST_F(TabletServerTest, TestDeleteTabletBenchmark) {
// Collect some related metrics.
scoped_refptr<AtomicGauge<uint64_t>> block_count =
METRIC_log_block_manager_blocks_under_management.Instantiate(
mini_server_->server()->metric_entity(), 0);
scoped_refptr<AtomicGauge<uint64_t>> container =
METRIC_log_block_manager_containers.Instantiate(
mini_server_->server()->metric_entity(), 0);
scoped_refptr<Counter> holes_punched =
METRIC_log_block_manager_holes_punched.Instantiate(
mini_server_->server()->metric_entity());
// Put some data in the tablet. We insert rows and flush immediately to
// ensure that there is enough blocks on disk to run the benchmark.
for (int i = 0; i < FLAGS_delete_tablet_bench_num_flushes; i++) {
NO_FATALS(InsertTestRowsRemote(i, 1));
ASSERT_OK(tablet_replica_->tablet()->Flush());
}
const int block_count_before_delete = block_count->value();
// Drop any local references to the tablet from within this test,
// so that when we delete it on the server, it's not held alive
// by the test code.
tablet_replica_.reset();
DeleteTabletRequestPB req;
DeleteTabletResponsePB resp;
RpcController rpc;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_delete_type(tablet::TABLET_DATA_DELETED);
// Send the call and measure the time spent deleting the tablet.
LOG_TIMING(INFO, "deleting tablet") {
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->DeleteTablet(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// Log the related metrics.
LOG(INFO) << "block_count_before_delete : " << block_count_before_delete;
LOG(INFO) << "log_block_manager_containers : " << container->value();
LOG(INFO) << "log_block_manager_holes_punched : " << holes_punched->value();
}
// Test that with concurrent requests to delete the same tablet, one wins and
// the other fails, with no assertion failures. Regression test for KUDU-345.
TEST_F(TabletServerTest, TestConcurrentDeleteTablet) {
// Verify that the tablet exists
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
static const int kNumDeletes = 2;
RpcController rpcs[kNumDeletes];
DeleteTabletResponsePB responses[kNumDeletes];
CountDownLatch latch(kNumDeletes);
DeleteTabletRequestPB req;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_delete_type(tablet::TABLET_DATA_DELETED);
for (int i = 0; i < kNumDeletes; i++) {
SCOPED_TRACE(SecureDebugString(req));
admin_proxy_->DeleteTabletAsync(req, &responses[i], &rpcs[i],
boost::bind(&CountDownLatch::CountDown, &latch));
}
latch.Wait();
int num_success = 0;
for (int i = 0; i < kNumDeletes; i++) {
ASSERT_TRUE(rpcs[i].finished());
LOG(INFO) << "STATUS " << i << ": " << rpcs[i].status().ToString();
LOG(INFO) << "RESPONSE " << i << ": " << SecureDebugString(responses[i]);
if (!responses[i].has_error()) {
num_success++;
}
}
// Verify that the tablet is removed from the tablet map
ASSERT_FALSE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
ASSERT_EQ(1, num_success);
}
TEST_F(TabletServerTest, TestInsertLatencyMicroBenchmark) {
METRIC_DEFINE_entity(test);
METRIC_DEFINE_histogram(test, insert_latency,
"Insert Latency",
MetricUnit::kMicroseconds,
"TabletServer single threaded insert latency.",
kudu::MetricLevel::kInfo,
10000000,
2);
scoped_refptr<Histogram> histogram = METRIC_insert_latency.Instantiate(ts_test_metric_entity_);
int warmup = AllowSlowTests() ?
FLAGS_single_threaded_insert_latency_bench_warmup_rows : 10;
for (int i = 0; i < warmup; i++) {
InsertTestRowsRemote(i, 1);
}
int max_rows = AllowSlowTests() ?
FLAGS_single_threaded_insert_latency_bench_insert_rows : 100;
MonoTime start = MonoTime::Now();
for (int i = warmup; i < warmup + max_rows; i++) {
MonoTime before = MonoTime::Now();
InsertTestRowsRemote(i, 1);
MonoTime after = MonoTime::Now();
MonoDelta delta = after - before;
histogram->Increment(delta.ToMicroseconds());
}
MonoTime end = MonoTime::Now();
double throughput = ((max_rows - warmup) * 1.0) / (end - start).ToSeconds();
// Generate the JSON.
std::ostringstream out;
JsonWriter writer(&out, JsonWriter::PRETTY);
ASSERT_OK(histogram->WriteAsJson(&writer, MetricJsonOptions()));
LOG(INFO) << "Throughput: " << throughput << " rows/sec.";
LOG(INFO) << out.str();
}
// Simple test to ensure we can destroy an RpcServer in different states of
// initialization before Start()ing it.
TEST_F(TabletServerTest, TestRpcServerCreateDestroy) {
RpcServerOptions opts;
{
RpcServer server(opts);
}
{
RpcServer server(opts);
MessengerBuilder mb("foo");
shared_ptr<Messenger> messenger;
ASSERT_OK(mb.Build(&messenger));
ASSERT_OK(server.Init(messenger));
}
}
TEST_F(TabletServerTest, TestWriteOutOfBounds) {
const char *tabletId = "TestWriteOutOfBoundsTablet";
Schema schema = SchemaBuilder(schema_).Build();
PartitionSchema partition_schema;
CHECK_OK(PartitionSchema::FromPB(PartitionSchemaPB(), schema, &partition_schema));
KuduPartialRow start_row(&schema);
ASSERT_OK(start_row.SetInt32("key", 10));
KuduPartialRow end_row(&schema);
ASSERT_OK(end_row.SetInt32("key", 20));
vector<Partition> partitions;
ASSERT_OK(partition_schema.CreatePartitions({ start_row, end_row }, {}, schema, &partitions));
ASSERT_EQ(3, partitions.size());
ASSERT_OK(mini_server_->server()->tablet_manager()->CreateNewTablet(
"TestWriteOutOfBoundsTable", tabletId,
partitions[1],
tabletId, schema, partition_schema,
mini_server_->CreateLocalConfig(), boost::none, boost::none, nullptr));
ASSERT_OK(WaitForTabletRunning(tabletId));
WriteRequestPB req;
WriteResponsePB resp;
RpcController controller;
req.set_tablet_id(tabletId);
ASSERT_OK(SchemaToPB(schema_, req.mutable_schema()));
vector<RowOperationsPB::Type> ops = { RowOperationsPB::INSERT, RowOperationsPB::UPDATE };
for (const RowOperationsPB::Type &op : ops) {
RowOperationsPB* data = req.mutable_row_operations();
AddTestRowToPB(op, schema_, 20, 1, "1", data);
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(proxy_->Write(req, &resp, &controller));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_TRUE(resp.has_error());
ASSERT_EQ(TabletServerErrorPB::UNKNOWN_ERROR, resp.error().code());
Status s = StatusFromPB(resp.error().status());
EXPECT_TRUE(s.IsNotFound());
ASSERT_STR_CONTAINS(s.ToString(),
"Not found: Row not in tablet partition");
data->Clear();
controller.Reset();
}
}
static uint32_t CalcTestRowChecksum(int32_t key, uint8_t string_field_defined = true) {
crc::Crc* crc = crc::GetCrc32cInstance();
uint64_t row_crc = 0;
string strval = strings::Substitute("original$0", key);
uint32_t index = 0;
crc->Compute(&index, sizeof(index), &row_crc, nullptr);
crc->Compute(&key, sizeof(int32_t), &row_crc, nullptr);
index = 1;
crc->Compute(&index, sizeof(index), &row_crc, nullptr);
crc->Compute(&key, sizeof(int32_t), &row_crc, nullptr);
index = 2;
crc->Compute(&index, sizeof(index), &row_crc, nullptr);
crc->Compute(&string_field_defined, sizeof(string_field_defined), &row_crc, nullptr);
if (string_field_defined) {
crc->Compute(strval.c_str(), strval.size(), &row_crc, nullptr);
}
return static_cast<uint32_t>(row_crc);
}
// Simple test to check that our checksum scans work as expected.
TEST_F(TabletServerTest, TestChecksumScan) {
uint64_t total_crc = 0;
ChecksumRequestPB req;
req.mutable_new_request()->set_tablet_id(kTabletId);
req.mutable_new_request()->set_read_mode(READ_LATEST);
req.set_call_seq_id(0);
ASSERT_OK(SchemaToColumnPBs(schema_, req.mutable_new_request()->mutable_projected_columns(),
SCHEMA_PB_WITHOUT_IDS));
ChecksumRequestPB new_req = req; // Cache "new" request.
ChecksumResponsePB resp;
RpcController controller;
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
// No rows.
ASSERT_EQ(total_crc, resp.checksum());
ASSERT_FALSE(resp.has_more_results());
// First row.
int32_t key = 1;
InsertTestRowsRemote(key, 1);
controller.Reset();
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
total_crc += CalcTestRowChecksum(key);
uint64_t first_crc = total_crc; // Cache first record checksum.
ASSERT_FALSE(resp.has_error()) << SecureDebugString(resp.error());
ASSERT_EQ(total_crc, resp.checksum());
ASSERT_FALSE(resp.has_more_results());
EXPECT_TRUE(resp.has_resource_metrics());
EXPECT_EQ(1, resp.rows_checksummed());
// Second row (null string field).
key = 2;
InsertTestRowsRemote(key, 1, 1, nullptr, kTabletId, nullptr, nullptr, false);
controller.Reset();
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
total_crc += CalcTestRowChecksum(key, false);
ASSERT_FALSE(resp.has_error()) << SecureDebugString(resp.error());
ASSERT_EQ(total_crc, resp.checksum());
ASSERT_FALSE(resp.has_more_results());
// Now test the same thing, but with a scan requiring 2 passes (one per row).
FLAGS_scanner_batch_size_rows = 1;
req.set_batch_size_bytes(1);
controller.Reset();
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
string scanner_id = resp.scanner_id();
ASSERT_TRUE(resp.has_more_results());
uint64_t agg_checksum = resp.checksum();
// Second row.
req.clear_new_request();
req.mutable_continue_request()->set_scanner_id(scanner_id);
req.mutable_continue_request()->set_previous_checksum(agg_checksum);
req.set_call_seq_id(1);
controller.Reset();
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
ASSERT_EQ(total_crc, resp.checksum());
ASSERT_FALSE(resp.has_more_results());
// Finally, delete row 2, so we're back to the row 1 checksum.
NO_FATALS(DeleteTestRowsRemote(key, 1));
FLAGS_scanner_batch_size_rows = 100;
req = new_req;
controller.Reset();
ASSERT_OK(proxy_->Checksum(req, &resp, &controller));
ASSERT_NE(total_crc, resp.checksum());
ASSERT_EQ(first_crc, resp.checksum());
ASSERT_FALSE(resp.has_more_results());
}
class DelayFsyncLogHook : public log::LogFaultHooks {
public:
DelayFsyncLogHook() : log_latch1_(1), test_latch1_(1) {}
Status PostAppend() override {
test_latch1_.CountDown();
log_latch1_.Wait();
log_latch1_.Reset(1);
return Status::OK();
}
void Continue() {
test_latch1_.Wait();
log_latch1_.CountDown();
}
private:
CountDownLatch log_latch1_;
CountDownLatch test_latch1_;
};
namespace {
void DeleteOneRowAsync(TabletServerTest* test) {
test->DeleteTestRowsRemote(10, 1);
}
void CompactAsync(Tablet* tablet, CountDownLatch* flush_done_latch) {
CHECK_OK(tablet->Compact(Tablet::FORCE_COMPACT_ALL));
flush_done_latch->CountDown();
}
} // namespace
// Tests that in flight transactions are committed and that commit messages
// are durable before a compaction is allowed to flush the tablet metadata.
//
// This test is in preparation for KUDU-120 and should pass before and after
// it, but was also confirmed to fail if the pre-conditions it tests for
// fail. That is if KUDU-120 is implemented without these pre-requisites
// this test is confirmed to fail.
TEST_F(TabletServerTest, TestKudu120PreRequisites) {
// Insert a few rows...
InsertTestRowsRemote(0, 10);
// ... now flush ...
ASSERT_OK(tablet_replica_->tablet()->Flush());
// ... insert a few rows...
InsertTestRowsRemote(10, 10);
// ... and flush again so that we have two disk row sets.
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Add a hook so that we can make the log wait right after an append
// (before the callback is triggered).
log::Log* log = tablet_replica_->log();
shared_ptr<DelayFsyncLogHook> log_hook(new DelayFsyncLogHook);
log->SetLogFaultHooksForTests(log_hook);
// Now start a transaction (delete) and stop just before commit.
scoped_refptr<kudu::Thread> thread1;
CHECK_OK(kudu::Thread::Create("DeleteThread", "DeleteThread",
DeleteOneRowAsync, this, &thread1));
// Wait for the replicate message to arrive and continue.
log_hook->Continue();
// Wait a few msecs to make sure that the transaction is
// trying to commit.
usleep(100* 1000); // 100 msecs
// Now start a compaction before letting the commit message go through.
scoped_refptr<kudu::Thread> flush_thread;
CountDownLatch flush_done_latch(1);
CHECK_OK(kudu::Thread::Create("CompactThread", "CompactThread",
CompactAsync,
tablet_replica_->tablet(),
&flush_done_latch,
&flush_thread));
// At this point we have both a compaction and a transaction going on.
// If we allow the transaction to return before the commit message is
// durable (KUDU-120) that means that the mvcc transaction will no longer
// be in flight at this moment, nonetheless since we're blocking the WAL
// and not allowing the commit message to go through, the compaction should
// be forced to wait.
//
// We are thus testing two conditions:
// - That in-flight transactions are committed.
// - That commit messages for transactions that were in flight are durable.
//
// If these pre-conditions are not met, i.e. if the compaction is not forced
// to wait here for the conditions to be true, then the below assertion
// will fail, since the transaction's commit write callback will only
// return when we allow it (in log_hook->Continue());
CHECK(!flush_done_latch.WaitFor(MonoDelta::FromMilliseconds(300)));
// Now let the rest go through.
log_hook->Continue();
log_hook->Continue();
flush_done_latch.Wait();
}
// Test DNS resolution failure in the master heartbeater.
// Regression test for KUDU-1681.
TEST_F(TabletServerTest, TestFailedDnsResolution) {
FLAGS_fail_dns_resolution = true;
mini_server_->server()->heartbeater()->TriggerASAP();
// Wait to make sure the heartbeater thread attempts the DNS lookup.
usleep(100 * 1000);
}
TEST_F(TabletServerTest, TestDataDirGroupsCreated) {
// Get the original superblock.
TabletSuperBlockPB superblock;
tablet_replica_->tablet()->metadata()->ToSuperBlock(&superblock);
DataDirGroupPB orig_group = superblock.data_dir_group();
// Remove the DataDirGroupPB on-disk.
superblock.clear_data_dir_group();
ASSERT_FALSE(superblock.has_data_dir_group());
string tablet_meta_path = JoinPathSegments(GetTestPath("TabletServerTest-fsroot"), "tablet-meta");
string pb_path = JoinPathSegments(tablet_meta_path, tablet_replica_->tablet_id());
ASSERT_OK(pb_util::WritePBContainerToPath(Env::Default(),
pb_path, superblock, pb_util::OVERWRITE, pb_util::SYNC));
// Verify that the on-disk copy has its DataDirGroup missing.
ASSERT_OK(tablet_replica_->tablet()->metadata()->ReadSuperBlockFromDisk(&superblock));
ASSERT_FALSE(superblock.has_data_dir_group());
// Restart the server and check that a new group is created. By default, the
// group will be created with all data directories and should be identical to
// the original one.
ASSERT_OK(ShutdownAndRebuildTablet());
tablet_replica_->tablet()->metadata()->ToSuperBlock(&superblock);
DataDirGroupPB new_group = superblock.data_dir_group();
MessageDifferencer md;
ASSERT_TRUE(md.Compare(orig_group, new_group));
}
TEST_F(TabletServerTest, TestNoMetricsForTombstonedTablet) {
// Force the metrics to be retired immediately.
FLAGS_metrics_retirement_age_ms = 0;
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
// Insert one row and check the insertion is recorded in the metrics.
NO_FATALS(InsertTestRowsRemote(0, 1, 1));
scoped_refptr<Counter> rows_inserted =
METRIC_rows_inserted.Instantiate(tablet->tablet()->GetMetricEntity());
int64_t num_rows_running = rows_inserted->value();
ASSERT_EQ(1, num_rows_running);
// Tombstone the tablet.
DeleteTabletRequestPB req;
DeleteTabletResponsePB resp;
RpcController rpc;
req.set_dest_uuid(mini_server_->server()->fs_manager()->uuid());
req.set_tablet_id(kTabletId);
req.set_delete_type(tablet::TABLET_DATA_TOMBSTONED);
{
SCOPED_TRACE(SecureDebugString(req));
ASSERT_OK(admin_proxy_->DeleteTablet(req, &resp, &rpc));
SCOPED_TRACE(SecureDebugString(resp));
ASSERT_FALSE(resp.has_error());
}
// It takes three calls to /jsonmetricz for the tablet metrics to go away, based on the
// policy in MetricRegistry::RetireOldMetrics:
// 1. The entity's metrics are returned, but also marked for retirement.
// 2. The entity's metrics are returned, but also retired (causing the entity to be retired).
// 3. The metrics aren't returned-- the entity has been removed from the metrics registry.
EasyCurl c;
faststring buf;
for (int i = 0; i < 3; i++) {
ASSERT_OK(c.FetchURL(strings::Substitute("http://$0/jsonmetricz",
mini_server_->bound_http_addr().ToString()),
&buf));
if (i < 2) {
ASSERT_STR_CONTAINS(buf.ToString(), "\"type\": \"tablet\"");
} else {
ASSERT_STR_NOT_CONTAINS(buf.ToString(), "\"type\": \"tablet\"");
}
}
}
TEST_F(TabletServerTest, TestTabletNumberOfDiskRowSetsMetric) {
scoped_refptr<TabletReplica> tablet;
ASSERT_TRUE(mini_server_->server()->tablet_manager()->LookupTablet(kTabletId, &tablet));
ASSERT_TRUE(tablet->tablet()->GetMetricEntity());
// We don't care what the function is, since the metric is already instantiated.
auto num_diskrowsets = METRIC_num_rowsets_on_disk.InstantiateFunctionGauge(
tablet->tablet()->GetMetricEntity(), Callback<size_t(void)>());
// No data, no diskrowsets.
ASSERT_EQ(0, num_diskrowsets->value());
// Insert a row and flush. There should be 1 diskrowset.
NO_FATALS(InsertTestRowsRemote(0, 1, 1));
ASSERT_OK(tablet->tablet()->Flush());
ASSERT_EQ(1, num_diskrowsets->value());
}
// Test ensuring that when rowset min/max keys are stored with and read from
// the rowset metadata, the tablet server doesn't read any blocks when
// bootstrapping.
TEST_F(TabletServerTest, TestKeysInRowsetMetadataPreventStartupSeeks) {
// Write the min/max keys to the rowset metadata. This gives us the option to
// read from the CFile vs from the rowset metadata.
FLAGS_rowset_metadata_store_keys = true;
InsertTestRowsDirect(0, 100);
ASSERT_OK(tablet_replica_->tablet()->Flush());
// Disable the maintenance manager so we don't get any seeks from
// maintenance operations when we restart.
FLAGS_enable_maintenance_manager = false;
const auto restart_server_and_check_bytes_read = [&] (bool keys_in_rowset_meta) {
FLAGS_rowset_metadata_store_keys = keys_in_rowset_meta;
// Reset the replica to avoid any lingering references.
// Restart the server and wait for the tablet to bootstrap.
tablet_replica_.reset();
mini_server_->Shutdown();
ASSERT_OK(mini_server_->Restart());
ASSERT_OK(mini_server_->WaitStarted());
scoped_refptr<Counter> bytes_read_metric =
METRIC_block_manager_total_bytes_read.Instantiate(
mini_server_->server()->metric_entity());
int64_t bm_bytes_read = bytes_read_metric->value();
if (keys_in_rowset_meta) {
ASSERT_EQ(0, bm_bytes_read);
} else {
ASSERT_LT(0, bm_bytes_read);
}
};
// Test both reading and not reading the keys from the rowset metadata,
// making sure we read bytes in the block manager only when expected (no
// bytes should be read by the BM if storing keys in the rowset metadata).
restart_server_and_check_bytes_read(/*keys_in_rowset_meta=*/ false);
restart_server_and_check_bytes_read(/*keys_in_rowset_meta=*/ true);
}
// Test that each scanner can only be accessed by the user who created it.
TEST_F(TabletServerTest, TestScannerCheckMatchingUser) {
rpc::UserCredentials user;
user.set_real_user("good-guy");
proxy_->set_user_credentials(user);
InsertTestRowsDirect(0, 100);
ScanResponsePB resp;
NO_FATALS(OpenScannerWithAllColumns(&resp));
const string& scanner_id = resp.scanner_id();
ASSERT_TRUE(!scanner_id.empty());
// Now do a checksum scan as the user.
string checksum_scanner_id;
uint64_t checksum_val;
{
ChecksumRequestPB checksum_req;
ChecksumResponsePB checksum_resp;
RpcController rpc;
ASSERT_OK(FillNewScanRequest(READ_LATEST, checksum_req.mutable_new_request()));
// Set a batch size of 0 so we don't return rows and can expect the scanner
// to remain alive.
checksum_req.set_batch_size_bytes(0);
ASSERT_OK(proxy_->Checksum(checksum_req, &checksum_resp, &rpc));
SCOPED_TRACE(checksum_resp.DebugString());
ASSERT_FALSE(checksum_resp.has_error());
ASSERT_TRUE(checksum_resp.has_more_results());
checksum_scanner_id = checksum_resp.scanner_id();
checksum_val = checksum_resp.checksum();
}
const auto verify_authz_error = [] (const Status& s) {
EXPECT_TRUE(s.IsRemoteError()) << s.ToString();
ASSERT_STR_CONTAINS(s.ToString(), "Not authorized");
};
for (const string& other : { "", "bad-guy" }) {
TabletServerServiceProxy bad_proxy(
client_messenger_, mini_server_->bound_rpc_addr(),
mini_server_->bound_rpc_addr().host());
if (!other.empty()) {
rpc::UserCredentials other_user;
other_user.set_real_user(other);
bad_proxy.set_user_credentials(other_user);
}
// Other users and clients with no credentials will be bounced for scans,
// checksum scans, and keep-alive requests.
{
ScanRequestPB req;
RpcController rpc;
req.set_scanner_id(scanner_id);
Status s = bad_proxy.Scan(req, &resp, &rpc);
SCOPED_TRACE(resp.DebugString());
NO_FATALS(verify_authz_error(s));
}
{
ChecksumRequestPB req;
ContinueChecksumRequestPB* continue_req = req.mutable_continue_request();
continue_req->set_scanner_id(checksum_scanner_id);
continue_req->set_previous_checksum(checksum_val);
ChecksumResponsePB resp;
RpcController rpc;
Status s = bad_proxy.Checksum(req, &resp, &rpc);
SCOPED_TRACE(resp.DebugString());
NO_FATALS(verify_authz_error(s));
}
for (const string& id : { scanner_id, checksum_scanner_id }) {
ScannerKeepAliveRequestPB req;
req.set_scanner_id(id);
ScannerKeepAliveResponsePB resp;
RpcController rpc;
Status s = bad_proxy.ScannerKeepAlive(req, &resp, &rpc);
SCOPED_TRACE(resp.DebugString());
NO_FATALS(verify_authz_error(s));
}
}
}
} // namespace tserver
} // namespace kudu
|
// Copyright (c) 2013-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
// Unit tests for alert system
#include "alert.h"
#include "chain.h"
#include "chainparams.h"
#include "clientversion.h"
#include "data/alertTests.raw.h"
#include "main.h" // For PartitionCheck
#include "serialize.h"
#include "streams.h"
#include "util.h"
#include "utilstrencodings.h"
#include "test/test_coin2fly.h"
#include <fstream>
#include <boost/filesystem/operations.hpp>
#include <boost/foreach.hpp>
#include <boost/test/unit_test.hpp>
//
// Sign a CAlert and serialize it
//
bool SignAndSave(CAlert &alert)
{
// Sign
if(!alert.Sign())
{
printf("SignAndSave() : could not sign alert:\n%s", alert.ToString().c_str());
return false;
}
std::string strFilePath = "src/test/data/alertTests.raw";
// open output file and associate it with CAutoFile
FILE *file = fopen(strFilePath.c_str(), "ab+");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s: Failed to open file %s", __func__, strFilePath);
try {
fileout << alert;
}
catch (std::exception &e) {
return error("%s: Serialize or I/O error - %s", __func__, e.what());
}
fileout.fclose();
return true;
}
//
// alertTests contains 8 alerts, generated with this code
//
void GenerateAlertTests()
{
CAlert alert;
alert.nRelayUntil = 60;
alert.nExpiration = 24 * 60 * 60;
alert.nID = 1;
alert.nCancel = 0; // cancels previous messages up to this ID number
alert.nMinVer = 0; // These versions are protocol versions
alert.nMaxVer = 999001;
alert.nPriority = 1;
alert.strComment = "Alert comment";
alert.strStatusBar = "Alert 1";
SignAndSave(alert);
alert.setSubVer.insert(std::string("/Satoshi:0.1.0/"));
alert.strStatusBar = "Alert 1 for Satoshi 0.1.0";
SignAndSave(alert);
alert.setSubVer.insert(std::string("/Satoshi:0.2.0/"));
alert.strStatusBar = "Alert 1 for Satoshi 0.1.0, 0.2.0";
SignAndSave(alert);
alert.setSubVer.clear();
++alert.nID;
alert.nCancel = 1;
alert.nPriority = 100;
alert.strStatusBar = "Alert 2, cancels 1";
SignAndSave(alert);
alert.nExpiration += 60;
++alert.nID;
SignAndSave(alert);
++alert.nID;
alert.nMinVer = 11;
alert.nMaxVer = 22;
SignAndSave(alert);
++alert.nID;
alert.strStatusBar = "Alert 2 for Satoshi 0.1.0";
alert.setSubVer.insert(std::string("/Satoshi:0.1.0/"));
SignAndSave(alert);
++alert.nID;
alert.nMinVer = 0;
alert.nMaxVer = 999999;
alert.strStatusBar = "Evil Alert'; /bin/ls; echo '";
alert.setSubVer.clear();
SignAndSave(alert);
}
struct ReadAlerts : public TestingSetup
{
ReadAlerts()
{
std::vector<unsigned char> vch(alert_tests::alertTests, alert_tests::alertTests + sizeof(alert_tests::alertTests));
CDataStream stream(vch, SER_DISK, CLIENT_VERSION);
try {
while (!stream.eof())
{
CAlert alert;
stream >> alert;
alerts.push_back(alert);
}
}
catch (const std::exception&) { }
}
~ReadAlerts() { }
static std::vector<std::string> read_lines(boost::filesystem::path filepath)
{
std::vector<std::string> result;
std::ifstream f(filepath.string().c_str());
std::string line;
while (std::getline(f,line))
result.push_back(line);
return result;
}
std::vector<CAlert> alerts;
};
BOOST_FIXTURE_TEST_SUITE(Alert_tests, ReadAlerts)
// Steps to generate alert tests:
// - update alerts in GenerateAlertTests() (optional)
// - enable code below (#if 1)
// - replace "fffffffffffffffffffffffffffffffffffffffffffffffffff" with the actual MAINNET privkey
// - recompile and run "/path/to/test_coin2fly -t Alert_test"
//
// NOTE: make sure to disable code and remove alert privkey when you're done!
//
#if 0
BOOST_AUTO_TEST_CASE(GenerateAlerts)
{
SoftSetArg("-alertkey", "fffffffffffffffffffffffffffffffffffffffffffffffffff");
GenerateAlertTests();
}
#endif
BOOST_AUTO_TEST_CASE(AlertApplies)
{
SetMockTime(11);
const std::vector<unsigned char>& alertKey = Params(CBaseChainParams::MAIN).AlertKey();
BOOST_FOREACH(const CAlert& alert, alerts)
{
BOOST_CHECK(alert.CheckSignature(alertKey));
}
BOOST_CHECK(alerts.size() >= 3);
// Matches:
BOOST_CHECK(alerts[0].AppliesTo(1, ""));
BOOST_CHECK(alerts[0].AppliesTo(999001, ""));
BOOST_CHECK(alerts[0].AppliesTo(1, "/Satoshi:11.11.11/"));
BOOST_CHECK(alerts[1].AppliesTo(1, "/Satoshi:0.1.0/"));
BOOST_CHECK(alerts[1].AppliesTo(999001, "/Satoshi:0.1.0/"));
BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.1.0/"));
BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.2.0/"));
// Don't match:
BOOST_CHECK(!alerts[0].AppliesTo(-1, ""));
BOOST_CHECK(!alerts[0].AppliesTo(999002, ""));
BOOST_CHECK(!alerts[1].AppliesTo(1, ""));
BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0"));
BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.1.0"));
BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0/"));
BOOST_CHECK(!alerts[1].AppliesTo(-1, "/Satoshi:0.1.0/"));
BOOST_CHECK(!alerts[1].AppliesTo(999002, "/Satoshi:0.1.0/"));
BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.2.0/"));
BOOST_CHECK(!alerts[2].AppliesTo(1, "/Satoshi:0.3.0/"));
SetMockTime(0);
}
BOOST_AUTO_TEST_CASE(AlertNotify)
{
SetMockTime(11);
const std::vector<unsigned char>& alertKey = Params(CBaseChainParams::MAIN).AlertKey();
boost::filesystem::path temp = GetTempPath() /
boost::filesystem::unique_path("alertnotify-%%%%.txt");
mapArgs["-alertnotify"] = std::string("echo %s >> ") + temp.string();
BOOST_FOREACH(CAlert alert, alerts)
alert.ProcessAlert(alertKey, false);
std::vector<std::string> r = read_lines(temp);
BOOST_CHECK_EQUAL(r.size(), 4u);
// Windows built-in echo semantics are different than posixy shells. Quotes and
// whitespace are printed literally.
#ifndef WIN32
BOOST_CHECK_EQUAL(r[0], "Alert 1");
BOOST_CHECK_EQUAL(r[1], "Alert 2, cancels 1");
BOOST_CHECK_EQUAL(r[2], "Alert 2, cancels 1");
BOOST_CHECK_EQUAL(r[3], "Evil Alert; /bin/ls; echo "); // single-quotes should be removed
#else
BOOST_CHECK_EQUAL(r[0], "'Alert 1' ");
BOOST_CHECK_EQUAL(r[1], "'Alert 2, cancels 1' ");
BOOST_CHECK_EQUAL(r[2], "'Alert 2, cancels 1' ");
BOOST_CHECK_EQUAL(r[3], "'Evil Alert; /bin/ls; echo ' ");
#endif
boost::filesystem::remove(temp);
SetMockTime(0);
}
static bool falseFunc() { return false; }
BOOST_AUTO_TEST_CASE(PartitionAlert)
{
// Test PartitionCheck
CCriticalSection csDummy;
CBlockIndex indexDummy[100];
CChainParams& params = Params(CBaseChainParams::MAIN);
int64_t nPowTargetSpacing = params.GetConsensus().nPowTargetSpacing;
// Generate fake blockchain timestamps relative to
// an arbitrary time:
int64_t now = 1427379054;
SetMockTime(now);
for (int i = 0; i < 100; i++)
{
indexDummy[i].phashBlock = NULL;
if (i == 0) indexDummy[i].pprev = NULL;
else indexDummy[i].pprev = &indexDummy[i-1];
indexDummy[i].nHeight = i;
indexDummy[i].nTime = now - (100-i)*nPowTargetSpacing;
// Other members don't matter, the partition check code doesn't
// use them
}
strMiscWarning = "";
// Test 1: chain with blocks every nPowTargetSpacing seconds,
// as normal, no worries:
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK_MESSAGE(strMiscWarning.empty(), strMiscWarning);
// Test 2: go 52.5 minutes without a block, expect a warning:
now += (3*60*60+30*60)/4; // we have 4x faster blocks
SetMockTime(now);
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(!strMiscWarning.empty());
BOOST_TEST_MESSAGE(std::string("Got alert text: ")+strMiscWarning);
strMiscWarning = "";
// Test 3: test the "partition alerts only go off once per day"
// code:
now += 60*10;
SetMockTime(now);
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(strMiscWarning.empty());
// Test 4: get 2.5 times as many blocks as expected:
now += 60*60*24; // Pretend it is a day later
SetMockTime(now);
int64_t quickSpacing = nPowTargetSpacing*2/5;
for (int i = 0; i < 100; i++) // Tweak chain timestamps:
indexDummy[i].nTime = now - (100-i)*quickSpacing;
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(!strMiscWarning.empty());
BOOST_TEST_MESSAGE(std::string("Got alert text: ")+strMiscWarning);
strMiscWarning = "";
SetMockTime(0);
}
BOOST_AUTO_TEST_SUITE_END()
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file has been auto-generated by code_generator_v8.py. DO NOT MODIFY!
#include "config.h"
#include "V8TestException.h"
#include "bindings/core/v8/ExceptionState.h"
#include "bindings/core/v8/V8DOMConfiguration.h"
#include "bindings/core/v8/V8HiddenValue.h"
#include "bindings/core/v8/V8ObjectConstructor.h"
#include "core/dom/ContextFeatures.h"
#include "core/dom/Document.h"
#include "platform/RuntimeEnabledFeatures.h"
#include "platform/TraceEvent.h"
#include "wtf/GetPtr.h"
#include "wtf/RefPtr.h"
namespace blink {
const WrapperTypeInfo V8TestException::wrapperTypeInfo = { gin::kEmbedderBlink, V8TestException::domTemplate, V8TestException::refObject, V8TestException::derefObject, V8TestException::trace, 0, 0, V8TestException::installConditionallyEnabledMethods, V8TestException::installConditionallyEnabledProperties, 0, WrapperTypeInfo::WrapperTypeExceptionPrototype, WrapperTypeInfo::ObjectClassId, WrapperTypeInfo::NotInheritFromEventTarget, WrapperTypeInfo::Independent, WrapperTypeInfo::RefCountedObject };
// This static member must be declared by DEFINE_WRAPPERTYPEINFO in TestException.h.
// For details, see the comment of DEFINE_WRAPPERTYPEINFO in
// bindings/core/v8/ScriptWrappable.h.
const WrapperTypeInfo& TestException::s_wrapperTypeInfo = V8TestException::wrapperTypeInfo;
namespace TestExceptionV8Internal {
static void readonlyUnsignedShortAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
TestException* impl = V8TestException::toImpl(holder);
v8SetReturnValueUnsigned(info, impl->readonlyUnsignedShortAttribute());
}
static void readonlyUnsignedShortAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMGetter");
TestExceptionV8Internal::readonlyUnsignedShortAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
static void readonlyStringAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
TestException* impl = V8TestException::toImpl(holder);
v8SetReturnValueString(info, impl->readonlyStringAttribute(), info.GetIsolate());
}
static void readonlyStringAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMGetter");
TestExceptionV8Internal::readonlyStringAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
static void toStringMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TestException* impl = V8TestException::toImpl(info.Holder());
v8SetReturnValueString(info, impl->toString(), info.GetIsolate());
}
static void toStringMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMMethod");
TestExceptionV8Internal::toStringMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
} // namespace TestExceptionV8Internal
static const V8DOMConfiguration::AttributeConfiguration V8TestExceptionAttributes[] = {
{"readonlyUnsignedShortAttribute", TestExceptionV8Internal::readonlyUnsignedShortAttributeAttributeGetterCallback, 0, 0, 0, 0, static_cast<v8::AccessControl>(v8::DEFAULT), static_cast<v8::PropertyAttribute>(v8::None), V8DOMConfiguration::ExposedToAllScripts, V8DOMConfiguration::OnInstance},
{"readonlyStringAttribute", TestExceptionV8Internal::readonlyStringAttributeAttributeGetterCallback, 0, 0, 0, 0, static_cast<v8::AccessControl>(v8::DEFAULT), static_cast<v8::PropertyAttribute>(v8::None), V8DOMConfiguration::ExposedToAllScripts, V8DOMConfiguration::OnInstance},
};
static void installV8TestExceptionTemplate(v8::Local<v8::FunctionTemplate> functionTemplate, v8::Isolate* isolate)
{
functionTemplate->ReadOnlyPrototype();
v8::Local<v8::Signature> defaultSignature;
defaultSignature = V8DOMConfiguration::installDOMClassTemplate(isolate, functionTemplate, "TestException", v8::Local<v8::FunctionTemplate>(), V8TestException::internalFieldCount,
V8TestExceptionAttributes, WTF_ARRAY_LENGTH(V8TestExceptionAttributes),
0, 0,
0, 0);
v8::Local<v8::ObjectTemplate> instanceTemplate = functionTemplate->InstanceTemplate();
ALLOW_UNUSED_LOCAL(instanceTemplate);
v8::Local<v8::ObjectTemplate> prototypeTemplate = functionTemplate->PrototypeTemplate();
ALLOW_UNUSED_LOCAL(prototypeTemplate);
static const V8DOMConfiguration::ConstantConfiguration V8TestExceptionConstants[] = {
{"UNSIGNED_SHORT_CONSTANT", 1, 0, 0, V8DOMConfiguration::ConstantTypeUnsignedShort},
};
V8DOMConfiguration::installConstants(isolate, functionTemplate, prototypeTemplate, V8TestExceptionConstants, WTF_ARRAY_LENGTH(V8TestExceptionConstants));
const V8DOMConfiguration::MethodConfiguration toStringMethodConfiguration = {
"toString", TestExceptionV8Internal::toStringMethodCallback, 0, 0, V8DOMConfiguration::ExposedToAllScripts,
};
V8DOMConfiguration::installMethod(prototypeTemplate, defaultSignature, static_cast<v8::PropertyAttribute>(v8::DontDelete | v8::DontEnum), toStringMethodConfiguration, isolate);
// Custom toString template
functionTemplate->Set(v8AtomicString(isolate, "toString"), V8PerIsolateData::from(isolate)->toStringTemplate());
}
v8::Local<v8::FunctionTemplate> V8TestException::domTemplate(v8::Isolate* isolate)
{
return V8DOMConfiguration::domClassTemplate(isolate, const_cast<WrapperTypeInfo*>(&wrapperTypeInfo), installV8TestExceptionTemplate);
}
bool V8TestException::hasInstance(v8::Local<v8::Value> v8Value, v8::Isolate* isolate)
{
return V8PerIsolateData::from(isolate)->hasInstance(&wrapperTypeInfo, v8Value);
}
v8::Local<v8::Object> V8TestException::findInstanceInPrototypeChain(v8::Local<v8::Value> v8Value, v8::Isolate* isolate)
{
return V8PerIsolateData::from(isolate)->findInstanceInPrototypeChain(&wrapperTypeInfo, v8Value);
}
TestException* V8TestException::toImplWithTypeCheck(v8::Isolate* isolate, v8::Local<v8::Value> value)
{
return hasInstance(value, isolate) ? toImpl(v8::Local<v8::Object>::Cast(value)) : 0;
}
void V8TestException::refObject(ScriptWrappable* scriptWrappable)
{
scriptWrappable->toImpl<TestException>()->ref();
}
void V8TestException::derefObject(ScriptWrappable* scriptWrappable)
{
scriptWrappable->toImpl<TestException>()->deref();
}
} // namespace blink
|
#include "safe_vector.hpp"
#include "../shared.hpp"
int main() {
LM::safe_vector v;
std::cout << v.getat(2).value_or(69) << "\n";
}
|
// (C) Copyright 2016-2021 Xilinx, Inc.
// All Rights Reserved.
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <unistd.h>
#include <iostream>
#include <string>
#include <vector>
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#define PATH_MAX 1000
using std::string;
int main(int argc, char **argv) {
string str_prj;
int i = 1;
string str_args;
for (; i < argc; i++) {
if (strstr(argv[i], " ") != nullptr) {
str_args += "\"" + string(argv[i]) + "\" ";
} else {
str_args += string(argv[i]) + " ";
}
}
string cmd = "mars_perl "
"$MERLIN_COMPILER_HOME/mars-gen/scripts/merlin_flow/"
"merlin_backend_top.pl " +
str_args;
int ret = system(cmd.c_str());
if (ret == 0) {
return EXIT_SUCCESS;
}
return EXIT_FAILURE;
}
|
#include "ASTBlockStmt.h"
#include "ASTVisitor.h"
#include "ASTinternal.h"
std::vector<ASTStmt*> ASTBlockStmt::getStmts() const {
return rawRefs(STMTS);
}
void ASTBlockStmt::accept(ASTVisitor * visitor) {
if (visitor->visit(this)) {
for (auto s : getStmts()) {
s->accept(visitor);
}
}
visitor->endVisit(this);
}
std::ostream& ASTBlockStmt::print(std::ostream &out) const {
out << "{ ";
for (auto &s : getStmts()) {
out << *s << " ";
}
out << "}";
return out;
}
ASTNode* ASTBlockStmt::instantiate() const {
std::vector<std::unique_ptr<ASTStmt>> statements;
for (auto& stmt : this->STMTS) {
statements.push_back(
std::unique_ptr<ASTStmt>(static_cast<ASTStmt*>(stmt->instantiate())));
}
return new ASTBlockStmt(std::move(statements));
}
|
/**********************************************************************
Audacity: A Digital Audio Editor
ExportFLAC.cpp
Frederik M.J.V
This program is distributed under the GNU General Public License, version 2.
A copy of this license is included with this source.
Based on ExportOGG.cpp by:
Joshua Haberman
Portions from vorbis-tools, copyright 2000-2002 Michael Smith
<msmith@labyrinth.net.au>; Vorbize, Kenneth Arnold <kcarnold@yahoo.com>;
and libvorbis examples, Monty <monty@xiph.org>
**********************************************************************/
#ifdef USE_LIBFLAC
#include "Export.h"
#include <wx/ffile.h>
#include <wx/log.h>
#include "FLAC++/encoder.h"
#include "../float_cast.h"
#include "../ProjectSettings.h"
#include "../Mix.h"
#include "../Prefs.h"
#include "../ShuttleGui.h"
#include "../Tags.h"
#include "../Track.h"
#include "../widgets/AudacityMessageBox.h"
#include "../widgets/ProgressDialog.h"
#include "../wxFileNameWrapper.h"
//----------------------------------------------------------------------------
// ExportFLACOptions Class
//----------------------------------------------------------------------------
class ExportFLACOptions final : public wxPanelWrapper
{
public:
ExportFLACOptions(wxWindow *parent, int format);
virtual ~ExportFLACOptions();
void PopulateOrExchange(ShuttleGui & S);
bool TransferDataToWindow() override;
bool TransferDataFromWindow() override;
};
///
///
ExportFLACOptions::ExportFLACOptions(wxWindow *parent, int WXUNUSED(format))
: wxPanelWrapper(parent, wxID_ANY)
{
ShuttleGui S(this, eIsCreatingFromPrefs);
PopulateOrExchange(S);
TransferDataToWindow();
}
///
///
ExportFLACOptions::~ExportFLACOptions()
{
TransferDataFromWindow();
}
ChoiceSetting FLACBitDepth{
wxT("/FileFormats/FLACBitDepth"),
{
ByColumns,
{ XO("16 bit") , XO("24 bit") , },
{ wxT("16") , wxT("24") , }
},
0 // "16",
};
ChoiceSetting FLACLevel{
wxT("/FileFormats/FLACLevel"),
{
ByColumns,
{
XO("0 (fastest)") ,
XO("1") ,
XO("2") ,
XO("3") ,
XO("4") ,
XO("5") ,
XO("6") ,
XO("7") ,
XO("8 (best)") ,
},
{
wxT("0") ,
wxT("1") ,
wxT("2") ,
wxT("3") ,
wxT("4") ,
wxT("5") ,
wxT("6") ,
wxT("7") ,
wxT("8") ,
}
},
5 //"5"
};
///
///
void ExportFLACOptions::PopulateOrExchange(ShuttleGui & S)
{
S.StartVerticalLay();
{
S.StartHorizontalLay(wxCENTER);
{
S.StartMultiColumn(2, wxCENTER);
{
S.TieChoice( XXO("Level:"), FLACLevel);
S.TieChoice( XXO("Bit depth:"), FLACBitDepth);
}
S.EndMultiColumn();
}
S.EndHorizontalLay();
}
S.EndVerticalLay();
return;
}
///
///
bool ExportFLACOptions::TransferDataToWindow()
{
return true;
}
///
///
bool ExportFLACOptions::TransferDataFromWindow()
{
ShuttleGui S(this, eIsSavingToPrefs);
PopulateOrExchange(S);
gPrefs->Flush();
return true;
}
//----------------------------------------------------------------------------
// ExportFLAC Class
//----------------------------------------------------------------------------
#define SAMPLES_PER_RUN 8192u
/* FLACPP_API_VERSION_CURRENT is 6 for libFLAC++ from flac-1.1.3 (see <FLAC++/export.h>) */
#if !defined FLACPP_API_VERSION_CURRENT || FLACPP_API_VERSION_CURRENT < 6
#define LEGACY_FLAC
#else
#undef LEGACY_FLAC
#endif
static struct
{
bool do_exhaustive_model_search;
bool do_escape_coding;
bool do_mid_side_stereo;
bool loose_mid_side_stereo;
unsigned qlp_coeff_precision;
unsigned min_residual_partition_order;
unsigned max_residual_partition_order;
unsigned rice_parameter_search_dist;
unsigned max_lpc_order;
} flacLevels[] = {
{ false, false, false, false, 0, 2, 2, 0, 0 },
{ false, false, true, true, 0, 2, 2, 0, 0 },
{ false, false, true, false, 0, 0, 3, 0, 0 },
{ false, false, false, false, 0, 3, 3, 0, 6 },
{ false, false, true, true, 0, 3, 3, 0, 8 },
{ false, false, true, false, 0, 3, 3, 0, 8 },
{ false, false, true, false, 0, 0, 4, 0, 8 },
{ true, false, true, false, 0, 0, 6, 0, 8 },
{ true, false, true, false, 0, 0, 6, 0, 12 },
};
//----------------------------------------------------------------------------
struct FLAC__StreamMetadataDeleter {
void operator () (FLAC__StreamMetadata *p) const
{ if (p) ::FLAC__metadata_object_delete(p); }
};
using FLAC__StreamMetadataHandle = std::unique_ptr<
FLAC__StreamMetadata, FLAC__StreamMetadataDeleter
>;
class ExportFLAC final : public ExportPlugin
{
public:
ExportFLAC();
// Required
void OptionsCreate(ShuttleGui &S, int format) override;
ProgressResult Export(AudacityProject *project,
std::unique_ptr<ProgressDialog> &pDialog,
unsigned channels,
const wxFileNameWrapper &fName,
bool selectedOnly,
double t0,
double t1,
MixerSpec *mixerSpec = NULL,
const Tags *metadata = NULL,
int subformat = 0) override;
private:
bool GetMetadata(AudacityProject *project, const Tags *tags);
// Should this be a stack variable instead in Export?
FLAC__StreamMetadataHandle mMetadata;
};
//----------------------------------------------------------------------------
ExportFLAC::ExportFLAC()
: ExportPlugin()
{
AddFormat();
SetFormat(wxT("FLAC"),0);
AddExtension(wxT("flac"),0);
SetMaxChannels(FLAC__MAX_CHANNELS,0);
SetCanMetaData(true,0);
SetDescription(XO("FLAC Files"),0);
}
ProgressResult ExportFLAC::Export(AudacityProject *project,
std::unique_ptr<ProgressDialog> &pDialog,
unsigned numChannels,
const wxFileNameWrapper &fName,
bool selectionOnly,
double t0,
double t1,
MixerSpec *mixerSpec,
const Tags *metadata,
int WXUNUSED(subformat))
{
const auto &settings = ProjectSettings::Get( *project );
double rate = settings.GetRate();
const auto &tracks = TrackList::Get( *project );
wxLogNull logNo; // temporarily disable wxWidgets error messages
auto updateResult = ProgressResult::Success;
long levelPref;
FLACLevel.Read().ToLong( &levelPref );
auto bitDepthPref = FLACBitDepth.Read();
FLAC::Encoder::File encoder;
bool success = true;
success = success &&
#ifdef LEGACY_FLAC
encoder.set_filename(OSOUTPUT(fName)) &&
#endif
encoder.set_channels(numChannels) &&
encoder.set_sample_rate(lrint(rate));
// See note in GetMetadata() about a bug in libflac++ 1.1.2
if (success && !GetMetadata(project, metadata)) {
// TODO: more precise message
ShowExportErrorDialog("FLAC:283");
return ProgressResult::Cancelled;
}
if (success && mMetadata) {
// set_metadata expects an array of pointers to metadata and a size.
// The size is 1.
FLAC__StreamMetadata *p = mMetadata.get();
success = encoder.set_metadata(&p, 1);
}
auto cleanup1 = finally( [&] {
mMetadata.reset(); // need this?
} );
sampleFormat format;
if (bitDepthPref == wxT("24")) {
format = int24Sample;
success = success && encoder.set_bits_per_sample(24);
} else { //convert float to 16 bits
format = int16Sample;
success = success && encoder.set_bits_per_sample(16);
}
// Duplicate the flac command line compression levels
if (levelPref < 0 || levelPref > 8) {
levelPref = 5;
}
success = success &&
encoder.set_do_exhaustive_model_search(flacLevels[levelPref].do_exhaustive_model_search) &&
encoder.set_do_escape_coding(flacLevels[levelPref].do_escape_coding);
if (numChannels != 2) {
success = success &&
encoder.set_do_mid_side_stereo(false) &&
encoder.set_loose_mid_side_stereo(false);
}
else {
success = success &&
encoder.set_do_mid_side_stereo(flacLevels[levelPref].do_mid_side_stereo) &&
encoder.set_loose_mid_side_stereo(flacLevels[levelPref].loose_mid_side_stereo);
}
success = success &&
encoder.set_qlp_coeff_precision(flacLevels[levelPref].qlp_coeff_precision) &&
encoder.set_min_residual_partition_order(flacLevels[levelPref].min_residual_partition_order) &&
encoder.set_max_residual_partition_order(flacLevels[levelPref].max_residual_partition_order) &&
encoder.set_rice_parameter_search_dist(flacLevels[levelPref].rice_parameter_search_dist) &&
encoder.set_max_lpc_order(flacLevels[levelPref].max_lpc_order);
if (!success) {
// TODO: more precise message
ShowExportErrorDialog("FLAC:336");
return ProgressResult::Cancelled;
}
#ifdef LEGACY_FLAC
encoder.init();
#else
wxFFile f; // will be closed when it goes out of scope
const auto path = fName.GetFullPath();
if (!f.Open(path, wxT("w+b"))) {
AudacityMessageBox( XO("FLAC export couldn't open %s").Format( path ) );
return ProgressResult::Cancelled;
}
// Even though there is an init() method that takes a filename, use the one that
// takes a file handle because wxWidgets can open a file with a Unicode name and
// libflac can't (under Windows).
int status = encoder.init(f.fp());
if (status != FLAC__STREAM_ENCODER_INIT_STATUS_OK) {
AudacityMessageBox(
XO("FLAC encoder failed to initialize\nStatus: %d")
.Format( status ) );
return ProgressResult::Cancelled;
}
#endif
mMetadata.reset();
auto cleanup2 = finally( [&] {
if (!(updateResult == ProgressResult::Success ||
updateResult == ProgressResult::Stopped)) {
#ifndef LEGACY_FLAC
f.Detach(); // libflac closes the file
#endif
encoder.finish();
}
} );
auto mixer = CreateMixer(tracks, selectionOnly,
t0, t1,
numChannels, SAMPLES_PER_RUN, false,
rate, format, mixerSpec);
ArraysOf<FLAC__int32> tmpsmplbuf{ numChannels, SAMPLES_PER_RUN, true };
InitProgress( pDialog, fName,
selectionOnly
? XO("Exporting the selected audio as FLAC")
: XO("Exporting the audio as FLAC") );
auto &progress = *pDialog;
while (updateResult == ProgressResult::Success) {
auto samplesThisRun = mixer->Process(SAMPLES_PER_RUN);
if (samplesThisRun == 0) { //stop encoding
break;
}
else {
for (size_t i = 0; i < numChannels; i++) {
samplePtr mixed = mixer->GetBuffer(i);
if (format == int24Sample) {
for (decltype(samplesThisRun) j = 0; j < samplesThisRun; j++) {
tmpsmplbuf[i][j] = ((int *)mixed)[j];
}
}
else {
for (decltype(samplesThisRun) j = 0; j < samplesThisRun; j++) {
tmpsmplbuf[i][j] = ((short *)mixed)[j];
}
}
}
if (! encoder.process(
reinterpret_cast<FLAC__int32**>( tmpsmplbuf.get() ),
samplesThisRun) ) {
// TODO: more precise message
ShowDiskFullExportErrorDialog(fName);
updateResult = ProgressResult::Cancelled;
break;
}
if (updateResult == ProgressResult::Success)
updateResult =
progress.Update(mixer->MixGetCurrentTime() - t0, t1 - t0);
}
}
if (updateResult == ProgressResult::Success ||
updateResult == ProgressResult::Stopped) {
#ifndef LEGACY_FLAC
f.Detach(); // libflac closes the file
#endif
if (!encoder.finish())
// Do not reassign updateResult, see cleanup2
return ProgressResult::Failed;
#ifdef LEGACY_FLAC
if (!f.Flush() || !f.Close())
return ProgressResult::Failed;
#endif
}
return updateResult;
}
void ExportFLAC::OptionsCreate(ShuttleGui &S, int format)
{
S.AddWindow( safenew ExportFLACOptions{ S.GetParent(), format } );
}
// LL: There's a bug in libflac++ 1.1.2 that prevents us from using
// FLAC::Metadata::VorbisComment directly. The set_metadata()
// function allocates an array on the stack, but the base library
// expects that array to be valid until the stream is initialized.
//
// This has been fixed in 1.1.4.
bool ExportFLAC::GetMetadata(AudacityProject *project, const Tags *tags)
{
// Retrieve tags if needed
if (tags == NULL)
tags = &Tags::Get( *project );
mMetadata.reset(::FLAC__metadata_object_new(FLAC__METADATA_TYPE_VORBIS_COMMENT));
wxString n;
for (const auto &pair : tags->GetRange()) {
n = pair.first;
const auto &v = pair.second;
if (n == TAG_YEAR) {
n = wxT("DATE");
}
else if (n == TAG_COMMENTS) {
// Some apps like Foobar use COMMENT and some like Windows use DESCRIPTION,
// so add both to try and make everyone happy.
n = wxT("COMMENT");
FLAC::Metadata::VorbisComment::Entry entry(n.mb_str(wxConvUTF8),
v.mb_str(wxConvUTF8));
if (! ::FLAC__metadata_object_vorbiscomment_append_comment(mMetadata.get(),
entry.get_entry(),
true) ) {
return false;
}
n = wxT("DESCRIPTION");
}
FLAC::Metadata::VorbisComment::Entry entry(n.mb_str(wxConvUTF8),
v.mb_str(wxConvUTF8));
if (! ::FLAC__metadata_object_vorbiscomment_append_comment(mMetadata.get(),
entry.get_entry(),
true) ) {
return false;
}
}
return true;
}
static Exporter::RegisteredExportPlugin sRegisteredPlugin{ "FLAC",
[]{ return std::make_unique< ExportFLAC >(); }
};
#endif // USE_LIBFLAC
|
/*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <alibabacloud/edas/model/DeleteK8sIngressRuleRequest.h>
using AlibabaCloud::Edas::Model::DeleteK8sIngressRuleRequest;
DeleteK8sIngressRuleRequest::DeleteK8sIngressRuleRequest() :
RoaServiceRequest("edas", "2017-08-01")
{
setResourcePath("/pop/v5/k8s/acs/k8s_ingress");
setMethod(HttpRequest::Method::Delete);
}
DeleteK8sIngressRuleRequest::~DeleteK8sIngressRuleRequest()
{}
std::string DeleteK8sIngressRuleRequest::get_Namespace()const
{
return _namespace_;
}
void DeleteK8sIngressRuleRequest::set_Namespace(const std::string& _namespace)
{
_namespace_ = _namespace;
setParameter("_Namespace", _namespace);
}
std::string DeleteK8sIngressRuleRequest::getName()const
{
return name_;
}
void DeleteK8sIngressRuleRequest::setName(const std::string& name)
{
name_ = name;
setParameter("Name", name);
}
std::string DeleteK8sIngressRuleRequest::getClusterId()const
{
return clusterId_;
}
void DeleteK8sIngressRuleRequest::setClusterId(const std::string& clusterId)
{
clusterId_ = clusterId;
setParameter("ClusterId", clusterId);
}
|
//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This coordinates the debug information generation while generating code.
//
//===----------------------------------------------------------------------===//
#include "CGDebugInfo.h"
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace clang::CodeGen;
static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) {
auto TI = Ctx.getTypeInfo(Ty);
return TI.AlignIsRequired ? TI.Align : 0;
}
static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) {
return getTypeAlignIfRequired(Ty.getTypePtr(), Ctx);
}
static uint32_t getDeclAlignIfRequired(const Decl *D, const ASTContext &Ctx) {
return D->hasAttr<AlignedAttr>() ? D->getMaxAlignment() : 0;
}
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
DBuilder(CGM.getModule()) {
for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap)
DebugPrefixMap[KV.first] = KV.second;
CreateCompileUnit();
}
CGDebugInfo::~CGDebugInfo() {
assert(LexicalBlockStack.empty() &&
"Region stack mismatch, stack not empty!");
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
SourceLocation TemporaryLocation)
: CGF(&CGF) {
init(TemporaryLocation);
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
bool DefaultToEmpty,
SourceLocation TemporaryLocation)
: CGF(&CGF) {
init(TemporaryLocation, DefaultToEmpty);
}
void ApplyDebugLocation::init(SourceLocation TemporaryLocation,
bool DefaultToEmpty) {
auto *DI = CGF->getDebugInfo();
if (!DI) {
CGF = nullptr;
return;
}
OriginalLocation = CGF->Builder.getCurrentDebugLocation();
if (OriginalLocation && !DI->CGM.getExpressionLocationsEnabled())
return;
if (TemporaryLocation.isValid()) {
DI->EmitLocation(CGF->Builder, TemporaryLocation);
return;
}
if (DefaultToEmpty) {
CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc());
return;
}
// Construct a location that has a valid scope, but no line info.
assert(!DI->LexicalBlockStack.empty());
CGF->Builder.SetCurrentDebugLocation(
llvm::DILocation::get(DI->LexicalBlockStack.back()->getContext(), 0, 0,
DI->LexicalBlockStack.back(), DI->getInlinedAt()));
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E)
: CGF(&CGF) {
init(E->getExprLoc());
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc)
: CGF(&CGF) {
if (!CGF.getDebugInfo()) {
this->CGF = nullptr;
return;
}
OriginalLocation = CGF.Builder.getCurrentDebugLocation();
if (Loc)
CGF.Builder.SetCurrentDebugLocation(std::move(Loc));
}
ApplyDebugLocation::~ApplyDebugLocation() {
// Query CGF so the location isn't overwritten when location updates are
// temporarily disabled (for C++ default function arguments)
if (CGF)
CGF->Builder.SetCurrentDebugLocation(std::move(OriginalLocation));
}
ApplyInlineDebugLocation::ApplyInlineDebugLocation(CodeGenFunction &CGF,
GlobalDecl InlinedFn)
: CGF(&CGF) {
if (!CGF.getDebugInfo()) {
this->CGF = nullptr;
return;
}
auto &DI = *CGF.getDebugInfo();
SavedLocation = DI.getLocation();
assert((DI.getInlinedAt() ==
CGF.Builder.getCurrentDebugLocation()->getInlinedAt()) &&
"CGDebugInfo and IRBuilder are out of sync");
DI.EmitInlineFunctionStart(CGF.Builder, InlinedFn);
}
ApplyInlineDebugLocation::~ApplyInlineDebugLocation() {
if (!CGF)
return;
auto &DI = *CGF->getDebugInfo();
DI.EmitInlineFunctionEnd(CGF->Builder);
DI.EmitLocation(CGF->Builder, SavedLocation);
}
void CGDebugInfo::setLocation(SourceLocation Loc) {
// If the new location isn't valid return.
if (Loc.isInvalid())
return;
CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
// If we've changed files in the middle of a lexical scope go ahead
// and create a new lexical scope with file node if it's different
// from the one in the scope.
if (LexicalBlockStack.empty())
return;
SourceManager &SM = CGM.getContext().getSourceManager();
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
if (PCLoc.isInvalid() || Scope->getFile() == getOrCreateFile(CurLoc))
return;
if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) {
LexicalBlockStack.pop_back();
LexicalBlockStack.emplace_back(DBuilder.createLexicalBlockFile(
LBF->getScope(), getOrCreateFile(CurLoc)));
} else if (isa<llvm::DILexicalBlock>(Scope) ||
isa<llvm::DISubprogram>(Scope)) {
LexicalBlockStack.pop_back();
LexicalBlockStack.emplace_back(
DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc)));
}
}
llvm::DIScope *CGDebugInfo::getDeclContextDescriptor(const Decl *D) {
llvm::DIScope *Mod = getParentModuleOrNull(D);
return getContextDescriptor(cast<Decl>(D->getDeclContext()),
Mod ? Mod : TheCU);
}
llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
llvm::DIScope *Default) {
if (!Context)
return Default;
auto I = RegionMap.find(Context);
if (I != RegionMap.end()) {
llvm::Metadata *V = I->second;
return dyn_cast_or_null<llvm::DIScope>(V);
}
// Check namespace.
if (const auto *NSDecl = dyn_cast<NamespaceDecl>(Context))
return getOrCreateNamespace(NSDecl);
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
TheCU->getFile());
return Default;
}
PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
PrintingPolicy PP = CGM.getContext().getPrintingPolicy();
// If we're emitting codeview, it's important to try to match MSVC's naming so
// that visualizers written for MSVC will trigger for our class names. In
// particular, we can't have spaces between arguments of standard templates
// like basic_string and vector, but we must have spaces between consecutive
// angle brackets that close nested template argument lists.
if (CGM.getCodeGenOpts().EmitCodeView) {
PP.MSVCFormatting = true;
PP.SplitTemplateClosers = true;
} else {
// For DWARF, printing rules are underspecified.
// SplitTemplateClosers yields better interop with GCC and GDB (PR46052).
PP.SplitTemplateClosers = true;
}
// Apply -fdebug-prefix-map.
PP.Callbacks = &PrintCB;
return PP;
}
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
assert(FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
FunctionTemplateSpecializationInfo *Info =
FD->getTemplateSpecializationInfo();
if (!Info && FII)
return FII->getName();
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
FD->printName(OS);
// Add any template specialization args.
if (Info) {
const TemplateArgumentList *TArgs = Info->TemplateArguments;
printTemplateArgumentList(OS, TArgs->asArray(), getPrintingPolicy());
}
// Copy this name on the side and use its reference.
return internString(OS.str());
}
StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
SmallString<256> MethodName;
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
if (const auto *OID = dyn_cast<ObjCImplementationDecl>(DC)) {
OS << OID->getName();
} else if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(DC)) {
OS << OID->getName();
} else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
if (OC->IsClassExtension()) {
OS << OC->getClassInterface()->getName();
} else {
OS << OC->getIdentifier()->getNameStart() << '('
<< OC->getIdentifier()->getNameStart() << ')';
}
} else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
OS << OCD->getClassInterface()->getName() << '(' << OCD->getName() << ')';
}
OS << ' ' << OMD->getSelector().getAsString() << ']';
return internString(OS.str());
}
StringRef CGDebugInfo::getSelectorName(Selector S) {
return internString(S.getAsString());
}
StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
if (isa<ClassTemplateSpecializationDecl>(RD)) {
SmallString<128> Name;
llvm::raw_svector_ostream OS(Name);
PrintingPolicy PP = getPrintingPolicy();
PP.PrintCanonicalTypes = true;
RD->getNameForDiagnostic(OS, PP,
/*Qualified*/ false);
// Copy this name on the side and use its reference.
return internString(Name);
}
// quick optimization to avoid having to intern strings that are already
// stored reliably elsewhere
if (const IdentifierInfo *II = RD->getIdentifier())
return II->getName();
// The CodeView printer in LLVM wants to see the names of unnamed types
// because they need to have a unique identifier.
// These names are used to reconstruct the fully qualified type names.
if (CGM.getCodeGenOpts().EmitCodeView) {
if (const TypedefNameDecl *D = RD->getTypedefNameForAnonDecl()) {
assert(RD->getDeclContext() == D->getDeclContext() &&
"Typedef should not be in another decl context!");
assert(D->getDeclName().getAsIdentifierInfo() &&
"Typedef was not named!");
return D->getDeclName().getAsIdentifierInfo()->getName();
}
if (CGM.getLangOpts().CPlusPlus) {
StringRef Name;
ASTContext &Context = CGM.getContext();
if (const DeclaratorDecl *DD = Context.getDeclaratorForUnnamedTagDecl(RD))
// Anonymous types without a name for linkage purposes have their
// declarator mangled in if they have one.
Name = DD->getName();
else if (const TypedefNameDecl *TND =
Context.getTypedefNameForUnnamedTagDecl(RD))
// Anonymous types without a name for linkage purposes have their
// associate typedef mangled in if they have one.
Name = TND->getName();
// Give lambdas a display name based on their name mangling.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->isLambda())
return internString(
CGM.getCXXABI().getMangleContext().getLambdaString(CXXRD));
if (!Name.empty()) {
SmallString<256> UnnamedType("<unnamed-type-");
UnnamedType += Name;
UnnamedType += '>';
return internString(UnnamedType);
}
}
}
return StringRef();
}
Optional<llvm::DIFile::ChecksumKind>
CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
Checksum.clear();
if (!CGM.getCodeGenOpts().EmitCodeView &&
CGM.getCodeGenOpts().DwarfVersion < 5)
return None;
SourceManager &SM = CGM.getContext().getSourceManager();
Optional<llvm::MemoryBufferRef> MemBuffer = SM.getBufferOrNone(FID);
if (!MemBuffer)
return None;
llvm::MD5 Hash;
llvm::MD5::MD5Result Result;
Hash.update(MemBuffer->getBuffer());
Hash.final(Result);
Hash.stringifyResult(Result, Checksum);
return llvm::DIFile::CSK_MD5;
}
Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
FileID FID) {
if (!CGM.getCodeGenOpts().EmbedSource)
return None;
bool SourceInvalid = false;
StringRef Source = SM.getBufferData(FID, &SourceInvalid);
if (SourceInvalid)
return None;
return Source;
}
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
StringRef FileName;
FileID FID;
if (Loc.isInvalid()) {
// The DIFile used by the CU is distinct from the main source file. Call
// createFile() below for canonicalization if the source file was specified
// with an absolute path.
FileName = TheCU->getFile()->getFilename();
} else {
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
FileName = PLoc.getFilename();
if (FileName.empty()) {
FileName = TheCU->getFile()->getFilename();
} else {
FileName = PLoc.getFilename();
}
FID = PLoc.getFileID();
}
// Cache the results.
auto It = DIFileCache.find(FileName.data());
if (It != DIFileCache.end()) {
// Verify that the information still exists.
if (llvm::Metadata *V = It->second)
return cast<llvm::DIFile>(V);
}
SmallString<32> Checksum;
Optional<llvm::DIFile::ChecksumKind> CSKind = computeChecksum(FID, Checksum);
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
}
llvm::DIFile *
CGDebugInfo::createFile(StringRef FileName,
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
Optional<StringRef> Source) {
StringRef Dir;
StringRef File;
std::string RemappedFile = remapDIPath(FileName);
std::string CurDir = remapDIPath(getCurrentDirname());
SmallString<128> DirBuf;
SmallString<128> FileBuf;
if (llvm::sys::path::is_absolute(RemappedFile)) {
// Strip the common prefix (if it is more than just "/") from current
// directory and FileName for a more space-efficient encoding.
auto FileIt = llvm::sys::path::begin(RemappedFile);
auto FileE = llvm::sys::path::end(RemappedFile);
auto CurDirIt = llvm::sys::path::begin(CurDir);
auto CurDirE = llvm::sys::path::end(CurDir);
for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
llvm::sys::path::append(DirBuf, *CurDirIt);
if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
// Don't strip the common prefix if it is only the root "/"
// since that would make LLVM diagnostic locations confusing.
Dir = {};
File = RemappedFile;
} else {
for (; FileIt != FileE; ++FileIt)
llvm::sys::path::append(FileBuf, *FileIt);
Dir = DirBuf;
File = FileBuf;
}
} else {
Dir = CurDir;
File = RemappedFile;
}
llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source);
DIFileCache[FileName.data()].reset(F);
return F;
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
if (DebugPrefixMap.empty())
return Path.str();
SmallString<256> P = Path;
for (const auto &Entry : DebugPrefixMap)
if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second))
break;
return P.str().str();
}
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
if (Loc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
return SM.getPresumedLoc(Loc).getLine();
}
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
// We may not want column information at all.
if (!Force && !CGM.getCodeGenOpts().DebugColumnInfo)
return 0;
// If the location is invalid then use the current column.
if (Loc.isInvalid() && CurLoc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid() ? PLoc.getColumn() : 0;
}
StringRef CGDebugInfo::getCurrentDirname() {
if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
return CGM.getCodeGenOpts().DebugCompilationDir;
if (!CWDName.empty())
return CWDName;
SmallString<256> CWD;
llvm::sys::fs::current_path(CWD);
return CWDName = internString(CWD);
}
void CGDebugInfo::CreateCompileUnit() {
SmallString<32> Checksum;
Optional<llvm::DIFile::ChecksumKind> CSKind;
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
// Should we be asking the SourceManager for the main file name, instead of
// accepting it as an argument? This just causes the main file name to
// mismatch with source locations and create extra lexical scopes or
// mismatched debug info (a CU with a DW_AT_file of "-", because that's what
// the driver passed, but functions/other things have DW_AT_file of "<stdin>"
// because that's what the SourceManager says)
// Get absolute path name.
SourceManager &SM = CGM.getContext().getSourceManager();
std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
if (MainFileName.empty())
MainFileName = "<stdin>";
// The main file name provided via the "-main-file-name" option contains just
// the file name itself with no path information. This file name may have had
// a relative path, so we look into the actual file entry for the main
// file to determine the real absolute path for the file.
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
MainFileDir = std::string(MainFile->getDir()->getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
llvm::sys::path::append(MainFileDirSS, MainFileName);
MainFileName =
std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS));
}
// If the main file name provided is identical to the input file name, and
// if the input file is a preprocessed source, use the module name for
// debug info. The module name comes from the name specified in the first
// linemarker if the input is a preprocessed source.
if (MainFile->getName() == MainFileName &&
FrontendOptions::getInputKindForExtension(
MainFile->getName().rsplit('.').second)
.isPreprocessed())
MainFileName = CGM.getModule().getName().str();
CSKind = computeChecksum(SM.getMainFileID(), Checksum);
}
llvm::dwarf::SourceLanguage LangTag;
const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
else if (LO.CPlusPlus14)
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_14;
else if (LO.CPlusPlus11)
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_11;
else
LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
} else if (LO.ObjC) {
LangTag = llvm::dwarf::DW_LANG_ObjC;
} else if (LO.RenderScript) {
LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript;
} else if (LO.C99) {
LangTag = llvm::dwarf::DW_LANG_C99;
} else {
LangTag = llvm::dwarf::DW_LANG_C89;
}
std::string Producer = getClangFullVersion();
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
if (LO.ObjC)
RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
llvm::DICompileUnit::DebugEmissionKind EmissionKind;
switch (DebugKind) {
case codegenoptions::NoDebugInfo:
case codegenoptions::LocTrackingOnly:
EmissionKind = llvm::DICompileUnit::NoDebug;
break;
case codegenoptions::DebugLineTablesOnly:
EmissionKind = llvm::DICompileUnit::LineTablesOnly;
break;
case codegenoptions::DebugDirectivesOnly:
EmissionKind = llvm::DICompileUnit::DebugDirectivesOnly;
break;
case codegenoptions::DebugInfoConstructor:
case codegenoptions::LimitedDebugInfo:
case codegenoptions::FullDebugInfo:
case codegenoptions::UnusedTypeInfo:
EmissionKind = llvm::DICompileUnit::FullDebug;
break;
}
uint64_t DwoId = 0;
auto &CGOpts = CGM.getCodeGenOpts();
// The DIFile used by the CU is distinct from the main source
// file. Its directory part specifies what becomes the
// DW_AT_comp_dir (the compilation directory), even if the source
// file was specified with an absolute path.
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
llvm::DIFile *CUFile = DBuilder.createFile(
remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo,
getSource(SM, SM.getMainFileID()));
StringRef Sysroot, SDK;
if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB) {
Sysroot = CGM.getHeaderSearchOpts().Sysroot;
auto B = llvm::sys::path::rbegin(Sysroot);
auto E = llvm::sys::path::rend(Sysroot);
auto It = std::find_if(B, E, [](auto SDK) { return SDK.endswith(".sdk"); });
if (It != E)
SDK = *It;
}
// Create new compile unit.
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers, CGOpts.SplitDwarfFile, EmissionKind,
DwoId, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
CGM.getTarget().getTriple().isNVPTX()
? llvm::DICompileUnit::DebugNameTableKind::None
: static_cast<llvm::DICompileUnit::DebugNameTableKind>(
CGOpts.DebugNameTable),
CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm::dwarf::TypeKind Encoding;
StringRef BTName;
switch (BT->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
return DBuilder.createNullPtrType();
case BuiltinType::Void:
return nullptr;
case BuiltinType::ObjCClass:
if (!ClassTy)
ClassTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_class", TheCU, TheCU->getFile(), 0);
return ClassTy;
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
// Class isa;
// } *id;
if (ObjTy)
return ObjTy;
if (!ClassTy)
ClassTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_class", TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
ObjTy = DBuilder.createStructType(TheCU, "objc_object", TheCU->getFile(), 0,
0, 0, llvm::DINode::FlagZero, nullptr,
llvm::DINodeArray());
DBuilder.replaceArrays(
ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
ObjTy, "isa", TheCU->getFile(), 0, Size, 0, 0,
llvm::DINode::FlagZero, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
if (!SelTy)
SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_selector", TheCU,
TheCU->getFile(), 0);
return SelTy;
}
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
return getOrCreateStructPtrType("opencl_" #ImgType "_" #Suffix "_t", \
SingletonId);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
return getOrCreateStructPtrType("opencl_sampler_t", OCLSamplerDITy);
case BuiltinType::OCLEvent:
return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::OCLClkEvent:
return getOrCreateStructPtrType("opencl_clk_event_t", OCLClkEventDITy);
case BuiltinType::OCLQueue:
return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
case BuiltinType::OCLReserveID:
return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
case BuiltinType::Id: \
return getOrCreateStructPtrType("opencl_" #ExtType, Id##Ty);
#include "clang/Basic/OpenCLExtensionTypes.def"
#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
{
ASTContext::BuiltinVectorTypeInfo Info =
CGM.getContext().getBuiltinVectorTypeInfo(BT);
unsigned NumElemsPerVG = (Info.EC.getKnownMinValue() * Info.NumVectors) / 2;
// Debuggers can't extract 1bit from a vector, so will display a
// bitpattern for svbool_t instead.
if (Info.ElementType == CGM.getContext().BoolTy) {
NumElemsPerVG /= 8;
Info.ElementType = CGM.getContext().UnsignedCharTy;
}
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
SmallVector<int64_t, 9> Expr(
{llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
/* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
auto *UpperBound = DBuilder.createExpression(Expr);
llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(
/*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr);
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
llvm::DIType *ElemTy =
getOrCreateType(Info.ElementType, TheCU->getFile());
auto Align = getTypeAlignIfRequired(BT, CGM.getContext());
return DBuilder.createVectorType(/*Size*/ 0, Align, ElemTy,
SubscriptArray);
}
// It doesn't make sense to generate debug info for PowerPC MMA vector types.
// So we return a safe type here to avoid generating an error.
#define PPC_VECTOR_TYPE(Name, Id, size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
return CreateType(cast<const BuiltinType>(CGM.getContext().IntTy));
case BuiltinType::UChar:
case BuiltinType::Char_U:
Encoding = llvm::dwarf::DW_ATE_unsigned_char;
break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
Encoding = llvm::dwarf::DW_ATE_signed_char;
break;
case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
Encoding = llvm::dwarf::DW_ATE_UTF;
break;
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::UInt128:
case BuiltinType::ULong:
case BuiltinType::WChar_U:
case BuiltinType::ULongLong:
Encoding = llvm::dwarf::DW_ATE_unsigned;
break;
case BuiltinType::Short:
case BuiltinType::Int:
case BuiltinType::Int128:
case BuiltinType::Long:
case BuiltinType::WChar_S:
case BuiltinType::LongLong:
Encoding = llvm::dwarf::DW_ATE_signed;
break;
case BuiltinType::Bool:
Encoding = llvm::dwarf::DW_ATE_boolean;
break;
case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::LongDouble:
case BuiltinType::Float16:
case BuiltinType::BFloat16:
case BuiltinType::Float128:
case BuiltinType::Double:
// FIXME: For targets where long double and __float128 have the same size,
// they are currently indistinguishable in the debugger without some
// special treatment. However, there is currently no consensus on encoding
// and this should be updated once a DWARF encoding exists for distinct
// floating point types of the same size.
Encoding = llvm::dwarf::DW_ATE_float;
break;
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
case BuiltinType::ShortFract:
case BuiltinType::Fract:
case BuiltinType::LongFract:
case BuiltinType::SatShortFract:
case BuiltinType::SatFract:
case BuiltinType::SatLongFract:
case BuiltinType::SatShortAccum:
case BuiltinType::SatAccum:
case BuiltinType::SatLongAccum:
Encoding = llvm::dwarf::DW_ATE_signed_fixed;
break;
case BuiltinType::UShortAccum:
case BuiltinType::UAccum:
case BuiltinType::ULongAccum:
case BuiltinType::UShortFract:
case BuiltinType::UFract:
case BuiltinType::ULongFract:
case BuiltinType::SatUShortAccum:
case BuiltinType::SatUAccum:
case BuiltinType::SatULongAccum:
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
Encoding = llvm::dwarf::DW_ATE_unsigned_fixed;
break;
}
switch (BT->getKind()) {
case BuiltinType::Long:
BTName = "long int";
break;
case BuiltinType::LongLong:
BTName = "long long int";
break;
case BuiltinType::ULong:
BTName = "long unsigned int";
break;
case BuiltinType::ULongLong:
BTName = "long long unsigned int";
break;
default:
BTName = BT->getName(CGM.getLangOpts());
break;
}
// Bit size and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
return DBuilder.createBasicType(BTName, Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
return DBuilder.createUnspecifiedType("auto");
}
llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
return DBuilder.createBasicType(Name, CGM.getContext().getTypeSize(Ty),
Encoding);
}
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
// Bit size and offset of the type.
llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
Encoding = llvm::dwarf::DW_ATE_lo_user;
uint64_t Size = CGM.getContext().getTypeSize(Ty);
return DBuilder.createBasicType("complex", Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
llvm::DIFile *Unit) {
QualifierCollector Qc;
const Type *T = Qc.strip(Ty);
// Ignore these qualifiers for now.
Qc.removeObjCGCAttr();
Qc.removeAddressSpace();
Qc.removeObjCLifetime();
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
llvm::dwarf::Tag Tag;
if (Qc.hasConst()) {
Tag = llvm::dwarf::DW_TAG_const_type;
Qc.removeConst();
} else if (Qc.hasVolatile()) {
Tag = llvm::dwarf::DW_TAG_volatile_type;
Qc.removeVolatile();
} else if (Qc.hasRestrict()) {
Tag = llvm::dwarf::DW_TAG_restrict_type;
Qc.removeRestrict();
} else if (Qc.getPointerAuth().isPresent()) {
unsigned Key = Qc.getPointerAuth().getKey();
bool IsDiscr = Qc.getPointerAuth().isAddressDiscriminated();
unsigned ExtraDiscr = Qc.getPointerAuth().getExtraDiscriminator();
Qc.removePtrAuth();
assert(Qc.empty() && "Unknown type qualifier for debug info");
auto *FromTy = getOrCreateType(QualType(T, 0), Unit);
return DBuilder.createPtrAuthQualifiedType(FromTy, Key, IsDiscr,
ExtraDiscr);
} else {
assert(Qc.empty() && "Unknown type qualifier for debug info");
return getOrCreateType(QualType(T, 0), Unit);
}
auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
return DBuilder.createQualifiedType(Tag, FromTy);
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
llvm::DIFile *Unit) {
// The frontend treats 'id' as a typedef to an ObjCObjectType,
// whereas 'id<protocol>' is treated as an ObjCPointerType. For the
// debug info, we want to emit 'id' in both cases.
if (Ty->isObjCQualifiedIdType())
return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
}
llvm::DIType *
CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile *Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
}
/// \return whether a C++ mangling exists for the type defined by TD.
static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) {
switch (TheCU->getSourceLanguage()) {
case llvm::dwarf::DW_LANG_C_plus_plus:
case llvm::dwarf::DW_LANG_C_plus_plus_11:
case llvm::dwarf::DW_LANG_C_plus_plus_14:
return true;
case llvm::dwarf::DW_LANG_ObjC_plus_plus:
return isa<CXXRecordDecl>(TD) || isa<EnumDecl>(TD);
default:
return false;
}
}
// Determines if the debug info for this tag declaration needs a type
// identifier. The purpose of the unique identifier is to deduplicate type
// information for identical types across TUs. Because of the C++ one definition
// rule (ODR), it is valid to assume that the type is defined the same way in
// every TU and its debug info is equivalent.
//
// C does not have the ODR, and it is common for codebases to contain multiple
// different definitions of a struct with the same name in different TUs.
// Therefore, if the type doesn't have a C++ mangling, don't give it an
// identifer. Type information in C is smaller and simpler than C++ type
// information, so the increase in debug info size is negligible.
//
// If the type is not externally visible, it should be unique to the current TU,
// and should not need an identifier to participate in type deduplication.
// However, when emitting CodeView, the format internally uses these
// unique type name identifers for references between debug info. For example,
// the method of a class in an anonymous namespace uses the identifer to refer
// to its parent class. The Microsoft C++ ABI attempts to provide unique names
// for such types, so when emitting CodeView, always use identifiers for C++
// types. This may create problems when attempting to emit CodeView when the MS
// C++ ABI is not in use.
static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
// We only add a type identifier for types with C++ name mangling.
if (!hasCXXMangling(TD, TheCU))
return false;
// Externally visible types with C++ mangling need a type identifier.
if (TD->isExternallyVisible())
return true;
// CodeView types with C++ mangling need a type identifier.
if (CGM.getCodeGenOpts().EmitCodeView)
return true;
return false;
}
// Returns a unique type identifier string if one exists, or an empty string.
static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
SmallString<256> Identifier;
const TagDecl *TD = Ty->getDecl();
if (!needsTypeIdentifier(TD, CGM, TheCU))
return Identifier;
if (const auto *RD = dyn_cast<CXXRecordDecl>(TD))
if (RD->getDefinition())
if (RD->isDynamicClass() &&
CGM.getVTableLinkage(RD) == llvm::GlobalValue::ExternalLinkage)
return Identifier;
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
llvm::raw_svector_ostream Out(Identifier);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out);
return Identifier;
}
/// \return the appropriate DWARF tag for a composite type.
static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
llvm::dwarf::Tag Tag;
if (RD->isStruct() || RD->isInterface())
Tag = llvm::dwarf::DW_TAG_structure_type;
else if (RD->isUnion())
Tag = llvm::dwarf::DW_TAG_union_type;
else {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
assert(RD->isClass());
Tag = llvm::dwarf::DW_TAG_class_type;
}
return Tag;
}
llvm::DICompositeType *
CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
llvm::DIScope *Ctx) {
const RecordDecl *RD = Ty->getDecl();
if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
return cast<llvm::DICompositeType>(T);
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
const unsigned Line =
getLineNumber(RD->getLocation().isValid() ? RD->getLocation() : CurLoc);
StringRef RDName = getClassName(RD);
uint64_t Size = 0;
uint32_t Align = 0;
const RecordDecl *D = RD->getDefinition();
if (D && D->isCompleteDefinition())
Size = CGM.getContext().getTypeSize(Ty);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagFwdDecl;
// Add flag to nontrivial forward declarations. To be consistent with MSVC,
// add the flag if a record has no definition because we don't know whether
// it will be trivial or not.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (!CXXRD->hasDefinition() ||
(CXXRD->hasDefinition() && !CXXRD->isTrivial()))
Flags |= llvm::DINode::FlagNonTrivial;
// Create the type.
SmallString<256> Identifier;
// Don't include a linkage name in line tables only.
if (CGM.getCodeGenOpts().hasReducedDebugInfo())
Identifier = getTypeIdentifier(Ty, CGM, TheCU);
llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, Flags,
Identifier);
if (CGM.getCodeGenOpts().DebugFwdTemplateParams)
if (auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
DBuilder.replaceArrays(RetTy, llvm::DINodeArray(),
CollectCXXTemplateParams(TSpecial, DefUnit));
ReplaceMap.emplace_back(
std::piecewise_construct, std::make_tuple(Ty),
std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
llvm::DIType *CGDebugInfo::CreatePointerLikeType(
llvm::dwarf::Tag Tag, const Type *Ty, QualType PointeeTy,
llvm::DIFile *Unit) {
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AddressSpace);
auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit),
Size, Align, DWARFAddressSpace);
else
return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
Align, DWARFAddressSpace);
}
llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
llvm::DIType *&Cache) {
if (Cache)
return Cache;
Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name,
TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
Cache = DBuilder.createPointerType(Cache, Size);
return Cache;
}
uint64_t CGDebugInfo::collectDefaultElementTypesForBlockPointer(
const BlockPointerType *Ty, llvm::DIFile *Unit, llvm::DIDerivedType *DescTy,
unsigned LineNo, SmallVectorImpl<llvm::Metadata *> &EltTys) {
QualType FType;
// Advanced by calls to CreateMemberType in increments of FType, then
// returned as the overall size of the default elements.
uint64_t FieldOffset = 0;
// Blocks in OpenCL have unique constraints which make the standard fields
// redundant while requiring size and align fields for enqueue_kernel. See
// initializeForBlockHeader in CGBlocks.cpp
if (CGM.getLangOpts().OpenCL) {
FType = CGM.getContext().IntTy;
EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__align", &FieldOffset));
} else {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
FType = CGM.getContext().IntTy;
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
FType = CGM.getContext().getPointerType(Ty->getPointeeType());
EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
uint64_t FieldSize = CGM.getContext().getTypeSize(Ty);
uint32_t FieldAlign = CGM.getContext().getTypeAlign(Ty);
EltTys.push_back(DBuilder.createMemberType(
Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign,
FieldOffset, llvm::DINode::FlagZero, DescTy));
FieldOffset += FieldSize;
}
return FieldOffset;
}
llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
llvm::DIFile *Unit) {
SmallVector<llvm::Metadata *, 8> EltTys;
QualType FType;
uint64_t FieldOffset;
llvm::DINodeArray Elements;
FieldOffset = 0;
FType = CGM.getContext().UnsignedLongTy;
EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
Elements = DBuilder.getOrCreateArray(EltTys);
EltTys.clear();
llvm::DINode::DIFlags Flags = llvm::DINode::FlagAppleBlock;
auto *EltTy =
DBuilder.createStructType(Unit, "__block_descriptor", nullptr, 0,
FieldOffset, 0, Flags, nullptr, Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto *DescTy = DBuilder.createPointerType(EltTy, Size);
FieldOffset = collectDefaultElementTypesForBlockPointer(Ty, Unit, DescTy,
0, EltTys);
Elements = DBuilder.getOrCreateArray(EltTys);
// The __block_literal_generic structs are marked with a special
// DW_AT_APPLE_BLOCK attribute and are an implementation detail only
// the debugger needs to know about. To allow type uniquing, emit
// them without a name or a location.
EltTy = DBuilder.createStructType(Unit, "", nullptr, 0, FieldOffset, 0,
Flags, nullptr, Elements);
return DBuilder.createPointerType(EltTy, Size);
}
llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
llvm::DIFile *Unit) {
assert(Ty->isTypeAlias());
llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit);
auto *AliasDecl =
cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
->getTemplatedDecl();
if (AliasDecl->hasAttr<NoDebugAttr>())
return Src;
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
Ty->getTemplateName().print(OS, getPrintingPolicy(), /*qualified*/ false);
printTemplateArgumentList(OS, Ty->template_arguments(), getPrintingPolicy());
SourceLocation Loc = AliasDecl->getLocation();
return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
getLineNumber(Loc),
getDeclContextDescriptor(AliasDecl));
}
llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
llvm::DIFile *Unit) {
llvm::DIType *Underlying =
getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
if (Ty->getDecl()->hasAttr<NoDebugAttr>())
return Underlying;
// We don't set size information, but do specify where the typedef was
// declared.
SourceLocation Loc = Ty->getDecl()->getLocation();
uint32_t Align = getDeclAlignIfRequired(Ty->getDecl(), CGM.getContext());
// Typedefs are derived from some other type.
return DBuilder.createTypedef(Underlying, Ty->getDecl()->getName(),
getOrCreateFile(Loc), getLineNumber(Loc),
getDeclContextDescriptor(Ty->getDecl()), Align);
}
static unsigned getDwarfCC(CallingConv CC) {
switch (CC) {
case CC_C:
// Avoid emitting DW_AT_calling_convention if the C convention was used.
return 0;
case CC_X86StdCall:
return llvm::dwarf::DW_CC_BORLAND_stdcall;
case CC_X86FastCall:
return llvm::dwarf::DW_CC_BORLAND_msfastcall;
case CC_X86ThisCall:
return llvm::dwarf::DW_CC_BORLAND_thiscall;
case CC_X86VectorCall:
return llvm::dwarf::DW_CC_LLVM_vectorcall;
case CC_X86Pascal:
return llvm::dwarf::DW_CC_BORLAND_pascal;
case CC_Win64:
return llvm::dwarf::DW_CC_LLVM_Win64;
case CC_X86_64SysV:
return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
case CC_AArch64VectorCall:
return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
case CC_IntelOclBicc:
return llvm::dwarf::DW_CC_LLVM_IntelOclBicc;
case CC_SpirFunction:
return llvm::dwarf::DW_CC_LLVM_SpirFunction;
case CC_OpenCLKernel:
return llvm::dwarf::DW_CC_LLVM_OpenCLKernel;
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
case CC_PreserveMost:
return llvm::dwarf::DW_CC_LLVM_PreserveMost;
case CC_PreserveAll:
return llvm::dwarf::DW_CC_LLVM_PreserveAll;
case CC_X86RegCall:
return llvm::dwarf::DW_CC_LLVM_X86RegCall;
}
return 0;
}
llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile *Unit) {
SmallVector<llvm::Metadata *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
// Set up remainder of arguments if there is a prototype.
// otherwise emit it as a variadic function.
if (isa<FunctionNoProtoType>(Ty))
EltTys.push_back(DBuilder.createUnspecifiedParameter());
else if (const auto *FPT = dyn_cast<FunctionProtoType>(Ty)) {
for (const QualType &ParamType : FPT->param_types())
EltTys.push_back(getOrCreateType(ParamType, Unit));
if (FPT->isVariadic())
EltTys.push_back(DBuilder.createUnspecifiedParameter());
}
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
getDwarfCC(Ty->getCallConv()));
}
/// Convert an AccessSpecifier into the corresponding DINode flag.
/// As an optimization, return 0 if the access specifier equals the
/// default for the containing type.
static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access,
const RecordDecl *RD) {
AccessSpecifier Default = clang::AS_none;
if (RD && RD->isClass())
Default = clang::AS_private;
else if (RD && (RD->isStruct() || RD->isUnion()))
Default = clang::AS_public;
if (Access == Default)
return llvm::DINode::FlagZero;
switch (Access) {
case clang::AS_private:
return llvm::DINode::FlagPrivate;
case clang::AS_protected:
return llvm::DINode::FlagProtected;
case clang::AS_public:
return llvm::DINode::FlagPublic;
case clang::AS_none:
return llvm::DINode::FlagZero;
}
llvm_unreachable("unexpected access enumerator");
}
llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
llvm::DIScope *RecordTy,
const RecordDecl *RD) {
StringRef Name = BitFieldDecl->getName();
QualType Ty = BitFieldDecl->getType();
SourceLocation Loc = BitFieldDecl->getLocation();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
llvm::DIType *DebugType = getOrCreateType(Ty, VUnit);
// Get the location for the field.
llvm::DIFile *File = getOrCreateFile(Loc);
unsigned Line = getLineNumber(Loc);
const CGBitFieldInfo &BitFieldInfo =
CGM.getTypes().getCGRecordLayout(RD).getBitFieldInfo(BitFieldDecl);
uint64_t SizeInBits = BitFieldInfo.Size;
assert(SizeInBits > 0 && "found named 0-width bitfield");
uint64_t StorageOffsetInBits =
CGM.getContext().toBits(BitFieldInfo.StorageOffset);
uint64_t Offset = BitFieldInfo.Offset;
// The bit offsets for big endian machines are reversed for big
// endian target, compensate for that as the DIDerivedType requires
// un-reversed offsets.
if (CGM.getDataLayout().isBigEndian())
Offset = BitFieldInfo.StorageSize - BitFieldInfo.Size - Offset;
uint64_t OffsetInBits = StorageOffsetInBits + Offset;
llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
return DBuilder.createBitFieldMemberType(
RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits,
Flags, DebugType);
}
llvm::DIType *
CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
uint32_t AlignInBits, llvm::DIFile *tunit,
llvm::DIScope *scope, const RecordDecl *RD) {
llvm::DIType *debugType = getOrCreateType(type, tunit);
// Get the location for the field.
llvm::DIFile *file = getOrCreateFile(loc);
const unsigned line = getLineNumber(loc.isValid() ? loc : CurLoc);
uint64_t SizeInBits = 0;
auto Align = AlignInBits;
if (!type->isIncompleteArrayType()) {
TypeInfo TI = CGM.getContext().getTypeInfo(type);
SizeInBits = TI.Width;
if (!Align)
Align = getTypeAlignIfRequired(type, CGM.getContext());
}
llvm::DINode::DIFlags flags = getAccessFlag(AS, RD);
return DBuilder.createMemberType(scope, name, file, line, SizeInBits, Align,
offsetInBits, flags, debugType);
}
void CGDebugInfo::CollectRecordLambdaFields(
const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements,
llvm::DIType *RecordTy) {
// For C++11 Lambdas a Field will be the same as a Capture, but the Capture
// has the name and the location of the variable so we should iterate over
// both concurrently.
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl);
RecordDecl::field_iterator Field = CXXDecl->field_begin();
unsigned fieldno = 0;
for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
E = CXXDecl->captures_end();
I != E; ++I, ++Field, ++fieldno) {
const LambdaCapture &C = *I;
if (C.capturesVariable()) {
SourceLocation Loc = C.getLocation();
assert(!Field->isBitField() && "lambdas don't have bitfield members!");
VarDecl *V = C.getCapturedVar();
StringRef VName = V->getName();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
auto Align = getDeclAlignIfRequired(V, CGM.getContext());
llvm::DIType *FieldType = createFieldType(
VName, Field->getType(), Loc, Field->getAccess(),
layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl);
elements.push_back(FieldType);
} else if (C.capturesThis()) {
// TODO: Need to handle 'this' in some way by probably renaming the
// this of the lambda class and having a field member of 'this' or
// by using AT_object_pointer for the function and having that be
// used as 'this' for semantic references.
FieldDecl *f = *Field;
llvm::DIFile *VUnit = getOrCreateFile(f->getLocation());
QualType type = f->getType();
llvm::DIType *fieldType = createFieldType(
"this", type, f->getLocation(), f->getAccess(),
layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
elements.push_back(fieldType);
}
}
}
llvm::DIDerivedType *
CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
const RecordDecl *RD) {
// Create the descriptor for the static variable, with or without
// constant initializers.
Var = Var->getCanonicalDecl();
llvm::DIFile *VUnit = getOrCreateFile(Var->getLocation());
llvm::DIType *VTy = getOrCreateType(Var->getType(), VUnit);
unsigned LineNumber = getLineNumber(Var->getLocation());
StringRef VName = Var->getName();
llvm::Constant *C = nullptr;
if (Var->getInit()) {
const APValue *Value = Var->evaluateValue();
if (Value) {
if (Value->isInt())
C = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
if (Value->isFloat())
C = llvm::ConstantFP::get(CGM.getLLVMContext(), Value->getFloat());
}
}
llvm::DINode::DIFlags Flags = getAccessFlag(Var->getAccess(), RD);
auto Align = getDeclAlignIfRequired(Var, CGM.getContext());
llvm::DIDerivedType *GV = DBuilder.createStaticMemberType(
RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Align);
StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
void CGDebugInfo::CollectRecordNormalField(
const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile *tunit,
SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType *RecordTy,
const RecordDecl *RD) {
StringRef name = field->getName();
QualType type = field->getType();
// Ignore unnamed fields unless they're anonymous structs/unions.
if (name.empty() && !type->isRecordType())
return;
llvm::DIType *FieldType;
if (field->isBitField()) {
FieldType = createBitFieldType(field, RecordTy, RD);
} else {
auto Align = getDeclAlignIfRequired(field, CGM.getContext());
FieldType =
createFieldType(name, type, field->getLocation(), field->getAccess(),
OffsetInBits, Align, tunit, RecordTy, RD);
}
elements.push_back(FieldType);
}
void CGDebugInfo::CollectRecordNestedType(
const TypeDecl *TD, SmallVectorImpl<llvm::Metadata *> &elements) {
QualType Ty = CGM.getContext().getTypeDeclType(TD);
// Injected class names are not considered nested records.
if (isa<InjectedClassNameType>(Ty))
return;
SourceLocation Loc = TD->getLocation();
llvm::DIType *nestedType = getOrCreateType(Ty, getOrCreateFile(Loc));
elements.push_back(nestedType);
}
void CGDebugInfo::CollectRecordFields(
const RecordDecl *record, llvm::DIFile *tunit,
SmallVectorImpl<llvm::Metadata *> &elements,
llvm::DICompositeType *RecordTy) {
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(record);
if (CXXDecl && CXXDecl->isLambda())
CollectRecordLambdaFields(CXXDecl, elements, RecordTy);
else {
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
// Field number for non-static fields.
unsigned fieldNo = 0;
// Static and non-static members should appear in the same order as
// the corresponding declarations in the source program.
for (const auto *I : record->decls())
if (const auto *V = dyn_cast<VarDecl>(I)) {
if (V->hasAttr<NoDebugAttr>())
continue;
// Skip variable template specializations when emitting CodeView. MSVC
// doesn't emit them.
if (CGM.getCodeGenOpts().EmitCodeView &&
isa<VarTemplateSpecializationDecl>(V))
continue;
if (isa<VarTemplatePartialSpecializationDecl>(V))
continue;
// Reuse the existing static member declaration if one exists
auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second &&
"Static data member declaration should still exist");
elements.push_back(MI->second);
} else {
auto Field = CreateRecordStaticField(V, RecordTy, record);
elements.push_back(Field);
}
} else if (const auto *field = dyn_cast<FieldDecl>(I)) {
CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit,
elements, RecordTy, record);
// Bump field number for next field.
++fieldNo;
} else if (CGM.getCodeGenOpts().EmitCodeView) {
// Debug info for nested types is included in the member list only for
// CodeView.
if (const auto *nestedType = dyn_cast<TypeDecl>(I))
if (!nestedType->isImplicit() &&
nestedType->getDeclContext() == record)
CollectRecordNestedType(nestedType, elements);
}
}
}
llvm::DISubroutineType *
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile *Unit, bool decl) {
const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, decl);
}
llvm::DISubroutineType *
CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
const FunctionProtoType *Func,
llvm::DIFile *Unit, bool decl) {
// Add "this" pointer.
llvm::DITypeRefArray Args(
cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit))
->getTypeArray());
assert(Args.size() && "Invalid number of arguments!");
SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
QualType temp = Func->getReturnType();
if (temp->getTypeClass() == Type::Auto && decl)
Elts.push_back(CreateType(cast<AutoType>(temp)));
else
Elts.push_back(Args[0]);
// "this" pointer is always first argument.
const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl();
if (isa<ClassTemplateSpecializationDecl>(RD)) {
// Create pointer type directly in this case.
const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
QualType PointeeTy = ThisPtrTy->getPointeeType();
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
auto Align = getTypeAlignIfRequired(ThisPtrTy, CGM.getContext());
llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType *ThisPtrType =
DBuilder.createPointerType(PointeeType, Size, Align);
TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
// metadata doesn't represent that.
ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
} else {
llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit);
TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
}
// Copy rest of the arguments.
for (unsigned i = 1, e = Args.size(); i != e; ++i)
Elts.push_back(Args[i]);
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
Flags |= llvm::DINode::FlagLValueReference;
if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
return DBuilder.createSubroutineType(EltTypeArray, Flags,
getDwarfCC(Func->getCallConv()));
}
/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
/// inside a function.
static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
if (const auto *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
return isFunctionLocalClass(NRD);
if (isa<FunctionDecl>(RD->getDeclContext()))
return true;
return false;
}
llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
const CXXMethodDecl *Method, llvm::DIFile *Unit, llvm::DIType *RecordTy) {
bool IsCtorOrDtor =
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit, true);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
StringRef MethodLinkageName;
// FIXME: 'isFunctionLocalClass' seems like an arbitrary/unintentional
// property to use here. It may've been intended to model "is non-external
// type" but misses cases of non-function-local but non-external classes such
// as those in anonymous namespaces as well as the reverse - external types
// that are function local, such as those in (non-local) inline functions.
if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
llvm::DIFile *MethodDefUnit = nullptr;
unsigned MethodLine = 0;
if (!Method->isImplicit()) {
MethodDefUnit = getOrCreateFile(Method->getLocation());
MethodLine = getLineNumber(Method->getLocation());
}
// Collect virtual method info.
llvm::DIType *ContainingType = nullptr;
unsigned VIndex = 0;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
int ThisAdjustment = 0;
if (Method->isVirtual()) {
if (Method->isPure())
SPFlags |= llvm::DISubprogram::SPFlagPureVirtual;
else
SPFlags |= llvm::DISubprogram::SPFlagVirtual;
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
// It doesn't make sense to give a virtual destructor a vtable index,
// since a single destructor has two entries in the vtable.
if (!isa<CXXDestructorDecl>(Method))
VIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(Method);
} else {
// Emit MS ABI vftable information. There is only one entry for the
// deleting dtor.
const auto *DD = dyn_cast<CXXDestructorDecl>(Method);
GlobalDecl GD = DD ? GlobalDecl(DD, Dtor_Deleting) : GlobalDecl(Method);
MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
VIndex = ML.Index;
// CodeView only records the vftable offset in the class that introduces
// the virtual method. This is possible because, unlike Itanium, the MS
// C++ ABI does not include all virtual methods from non-primary bases in
// the vtable for the most derived class. For example, if C inherits from
// A and B, C's primary vftable will not include B's virtual methods.
if (Method->size_overridden_methods() == 0)
Flags |= llvm::DINode::FlagIntroducedVirtual;
// The 'this' adjustment accounts for both the virtual and non-virtual
// portions of the adjustment. Presumably the debugger only uses it when
// it knows the dynamic type of an object.
ThisAdjustment = CGM.getCXXABI()
.getVirtualFunctionPrologueThisAdjustment(GD)
.getQuantity();
}
ContainingType = RecordTy;
}
// We're checking for deleted C++ special member functions
// [Ctors,Dtors, Copy/Move]
auto checkAttrDeleted = [&](const auto *Method) {
if (Method->getCanonicalDecl()->isDeleted())
SPFlags |= llvm::DISubprogram::SPFlagDeleted;
};
switch (Method->getKind()) {
case Decl::CXXConstructor:
case Decl::CXXDestructor:
checkAttrDeleted(Method);
break;
case Decl::CXXMethod:
if (Method->isCopyAssignmentOperator() ||
Method->isMoveAssignmentOperator())
checkAttrDeleted(Method);
break;
default:
break;
}
if (Method->isNoReturn())
Flags |= llvm::DINode::FlagNoReturn;
if (Method->isStatic())
Flags |= llvm::DINode::FlagStaticMember;
if (Method->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
Flags |= getAccessFlag(Method->getAccess(), Method->getParent());
if (const auto *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DINode::FlagExplicit;
} else if (const auto *CXXC = dyn_cast<CXXConversionDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DINode::FlagExplicit;
}
if (Method->hasPrototype())
Flags |= llvm::DINode::FlagPrototyped;
if (Method->getRefQualifier() == RQ_LValue)
Flags |= llvm::DINode::FlagLValueReference;
if (Method->getRefQualifier() == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
// In this debug mode, emit type info for a class when its constructor type
// info is emitted.
if (DebugKind == codegenoptions::DebugInfoConstructor)
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
completeUnusedClass(*CD->getParent());
llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram *SP = DBuilder.createMethod(
RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
MethodTy, VIndex, ThisAdjustment, ContainingType, Flags, SPFlags,
TParamsArray.get());
SPCache[Method->getCanonicalDecl()].reset(SP);
return SP;
}
void CGDebugInfo::CollectCXXMemberFunctions(
const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy) {
// Since we want more than just the individual member decls if we
// have templated functions iterate over every declaration to gather
// the functions.
for (const auto *I : RD->decls()) {
const auto *Method = dyn_cast<CXXMethodDecl>(I);
// If the member is implicit, don't add it to the member list. This avoids
// the member being added to type units by LLVM, while still allowing it
// to be emitted into the type declaration/reference inside the compile
// unit.
// Ditto 'nodebug' methods, for consistency with CodeGenFunction.cpp.
// FIXME: Handle Using(Shadow?)Decls here to create
// DW_TAG_imported_declarations inside the class for base decls brought into
// derived classes. GDB doesn't seem to notice/leverage these when I tried
// it, so I'm not rushing to fix this. (GCC seems to produce them, if
// referenced)
if (!Method || Method->isImplicit() || Method->hasAttr<NoDebugAttr>())
continue;
if (Method->getType()->castAs<FunctionProtoType>()->getContainedAutoType())
continue;
// Reuse the existing member function declaration if it exists.
// It may be associated with the declaration of the type & should be
// reused as we're building the definition.
//
// This situation can arise in the vtable-based debug info reduction where
// implicit members are emitted in a non-vtable TU.
auto MI = SPCache.find(Method->getCanonicalDecl());
EltTys.push_back(MI == SPCache.end()
? CreateCXXMemberFunction(Method, Unit, RecordTy)
: static_cast<llvm::Metadata *>(MI->second));
}
}
void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy) {
llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> SeenTypes;
CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->bases(), SeenTypes,
llvm::DINode::FlagZero);
// If we are generating CodeView debug info, we also need to emit records for
// indirect virtual base classes.
if (CGM.getCodeGenOpts().EmitCodeView) {
CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->vbases(), SeenTypes,
llvm::DINode::FlagIndirectVirtualBase);
}
}
void CGDebugInfo::CollectCXXBasesAux(
const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy,
const CXXRecordDecl::base_class_const_range &Bases,
llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
llvm::DINode::DIFlags StartingFlags) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : Bases) {
const auto *Base =
cast<CXXRecordDecl>(BI.getType()->castAs<RecordType>()->getDecl());
if (!SeenTypes.insert(Base).second)
continue;
auto *BaseTy = getOrCreateType(BI.getType(), Unit);
llvm::DINode::DIFlags BFlags = StartingFlags;
uint64_t BaseOffset;
uint32_t VBPtrOffset = 0;
if (BI.isVirtual()) {
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
BaseOffset = 0 - CGM.getItaniumVTableContext()
.getVirtualBaseOffsetOffset(RD, Base)
.getQuantity();
} else {
// In the MS ABI, store the vbtable offset, which is analogous to the
// vbase offset offset in Itanium.
BaseOffset =
4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
VBPtrOffset = CGM.getContext()
.getASTRecordLayout(RD)
.getVBPtrOffset()
.getQuantity();
}
BFlags |= llvm::DINode::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
llvm::DIType *DTy = DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset,
VBPtrOffset, BFlags);
EltTys.push_back(DTy);
}
}
llvm::DINodeArray
CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
ArrayRef<TemplateArgument> TAList,
llvm::DIFile *Unit) {
SmallVector<llvm::Metadata *, 16> TemplateParams;
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
StringRef Name;
bool defaultParameter = false;
if (TPList)
Name = TPList->getParam(i)->getName();
switch (TA.getKind()) {
case TemplateArgument::Type: {
llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
if (TPList)
if (auto *templateType =
dyn_cast_or_null<TemplateTypeParmDecl>(TPList->getParam(i)))
if (templateType->hasDefaultArgument())
defaultParameter =
templateType->getDefaultArgument() == TA.getAsType();
TemplateParams.push_back(DBuilder.createTemplateTypeParameter(
TheCU, Name, TTy, defaultParameter));
} break;
case TemplateArgument::Integral: {
llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit);
if (TPList && CGM.getCodeGenOpts().DwarfVersion >= 5)
if (auto *templateType =
dyn_cast_or_null<NonTypeTemplateParmDecl>(TPList->getParam(i)))
if (templateType->hasDefaultArgument() &&
!templateType->getDefaultArgument()->isValueDependent())
defaultParameter = llvm::APSInt::isSameValue(
templateType->getDefaultArgument()->EvaluateKnownConstInt(
CGM.getContext()),
TA.getAsIntegral());
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter,
llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())));
} break;
case TemplateArgument::Declaration: {
const ValueDecl *D = TA.getAsDecl();
QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext());
llvm::DIType *TTy = getOrCreateType(T, Unit);
llvm::Constant *V = nullptr;
// Skip retrieve the value if that template parameter has cuda device
// attribute, i.e. that value is not available at the host side.
if (!CGM.getLangOpts().CUDA || CGM.getLangOpts().CUDAIsDevice ||
!D->hasAttr<CUDADeviceAttr>()) {
const CXXMethodDecl *MD;
// Variable pointer template parameters have a value that is the address
// of the variable.
if (const auto *VD = dyn_cast<VarDecl>(D))
V = CGM.GetAddrOfGlobalVar(VD);
// Member function pointers have special support for building them,
// though this is currently unsupported in LLVM CodeGen.
else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
V = CGM.getCXXABI().EmitMemberFunctionPointer(MD);
else if (const auto *FD = dyn_cast<FunctionDecl>(D))
V = CGM.GetAddrOfFunction(FD);
// Member data pointers have special handling too to compute the fixed
// offset within the object.
else if (const auto *MPT =
dyn_cast<MemberPointerType>(T.getTypePtr())) {
// These five lines (& possibly the above member function pointer
// handling) might be able to be refactored to use similar code in
// CodeGenModule::getMemberPointerConstant
uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
CharUnits chars =
CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
} else if (const auto *GD = dyn_cast<MSGuidDecl>(D)) {
V = CGM.GetAddrOfMSGuidDecl(GD).getPointer();
} else if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
if (T->isRecordType())
V = ConstantEmitter(CGM).emitAbstract(
SourceLocation(), TPO->getValue(), TPO->getType());
else
V = CGM.GetAddrOfTemplateParamObject(TPO).getPointer();
}
assert(V && "Failed to find template parameter pointer");
V = V->stripPointerCasts();
}
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter, cast_or_null<llvm::Constant>(V)));
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
llvm::DIType *TTy = getOrCreateType(T, Unit);
llvm::Constant *V = nullptr;
// Special case member data pointer null values since they're actually -1
// instead of zero.
if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr()))
// But treat member function pointers as simple zero integers because
// it's easier than having a special case in LLVM's CodeGen. If LLVM
// CodeGen grows handling for values of non-null member function
// pointers then perhaps we could remove this special case and rely on
// EmitNullMemberPointer for member function pointers.
if (MPT->isMemberDataPointer())
V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter, V));
} break;
case TemplateArgument::Template:
TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
TheCU, Name, nullptr,
TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString()));
break;
case TemplateArgument::Pack:
TemplateParams.push_back(DBuilder.createTemplateParameterPack(
TheCU, Name, nullptr,
CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit)));
break;
case TemplateArgument::Expression: {
const Expr *E = TA.getAsExpr();
QualType T = E->getType();
if (E->isGLValue())
T = CGM.getContext().getLValueReferenceType(T);
llvm::Constant *V = ConstantEmitter(CGM).emitAbstract(E, T);
assert(V && "Expression in template argument isn't constant");
llvm::DIType *TTy = getOrCreateType(T, Unit);
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter, V->stripPointerCasts()));
} break;
// And the following should never occur:
case TemplateArgument::TemplateExpansion:
case TemplateArgument::Null:
llvm_unreachable(
"These argument types shouldn't exist in concrete types");
}
}
return DBuilder.getOrCreateArray(TemplateParams);
}
llvm::DINodeArray
CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
llvm::DIFile *Unit) {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
const TemplateParameterList *TList = FD->getTemplateSpecializationInfo()
->getTemplate()
->getTemplateParameters();
return CollectTemplateParams(
TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
}
return llvm::DINodeArray();
}
llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
llvm::DIFile *Unit) {
// Always get the full list of parameters, not just the ones from the
// specialization. A partial specialization may have fewer parameters than
// there are arguments.
auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL);
if (!TS)
return llvm::DINodeArray();
VarTemplateDecl *T = TS->getSpecializedTemplate();
const TemplateParameterList *TList = T->getTemplateParameters();
auto TA = TS->getTemplateArgs().asArray();
return CollectTemplateParams(TList, TA, Unit);
}
llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
// Always get the full list of parameters, not just the ones from the
// specialization. A partial specialization may have fewer parameters than
// there are arguments.
TemplateParameterList *TPList =
TSpecial->getSpecializedTemplate()->getTemplateParameters();
const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
return CollectTemplateParams(TPList, TAList.asArray(), Unit);
}
llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
if (VTablePtrType)
return VTablePtrType;
ASTContext &Context = CGM.getContext();
/* Function type */
llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit);
llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy);
llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType(
SubTy, Size, 0, DWARFAddressSpace, "__vtbl_ptr_type");
VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
// Copy the gdb compatible name on the side and use its reference.
return internString("_vptr$", RD->getNameAsString());
}
StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD,
DynamicInitKind StubKind,
llvm::Function *InitFn) {
// If we're not emitting codeview, use the mangled name. For Itanium, this is
// arbitrary.
if (!CGM.getCodeGenOpts().EmitCodeView ||
StubKind == DynamicInitKind::GlobalArrayDestructor)
return InitFn->getName();
// Print the normal qualified name for the variable, then break off the last
// NNS, and add the appropriate other text. Clang always prints the global
// variable name without template arguments, so we can use rsplit("::") and
// then recombine the pieces.
SmallString<128> QualifiedGV;
StringRef Quals;
StringRef GVName;
{
llvm::raw_svector_ostream OS(QualifiedGV);
VD->printQualifiedName(OS, getPrintingPolicy());
std::tie(Quals, GVName) = OS.str().rsplit("::");
if (GVName.empty())
std::swap(Quals, GVName);
}
SmallString<128> InitName;
llvm::raw_svector_ostream OS(InitName);
if (!Quals.empty())
OS << Quals << "::";
switch (StubKind) {
case DynamicInitKind::NoStub:
case DynamicInitKind::GlobalArrayDestructor:
llvm_unreachable("not an initializer");
case DynamicInitKind::Initializer:
OS << "`dynamic initializer for '";
break;
case DynamicInitKind::AtExit:
OS << "`dynamic atexit destructor for '";
break;
}
OS << GVName;
// Add any template specialization args.
if (const auto *VTpl = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
printTemplateArgumentList(OS, VTpl->getTemplateArgs().asArray(),
getPrintingPolicy());
}
OS << '\'';
return internString(OS.str());
}
void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys) {
// If this class is not dynamic then there is not any vtable info to collect.
if (!RD->isDynamicClass())
return;
// Don't emit any vtable shape or vptr info if this class doesn't have an
// extendable vfptr. This can happen if the class doesn't have virtual
// methods, or in the MS ABI if those virtual methods only come from virtually
// inherited bases.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (!RL.hasExtendableVFPtr())
return;
// CodeView needs to know how large the vtable of every dynamic class is, so
// emit a special named pointer type into the element list. The vptr type
// points to this type as well.
llvm::DIType *VPtrTy = nullptr;
bool NeedVTableShape = CGM.getCodeGenOpts().EmitCodeView &&
CGM.getTarget().getCXXABI().isMicrosoft();
if (NeedVTableShape) {
uint64_t PtrWidth =
CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
const VTableLayout &VFTLayout =
CGM.getMicrosoftVTableContext().getVFTableLayout(RD, CharUnits::Zero());
unsigned VSlotCount =
VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData;
unsigned VTableWidth = PtrWidth * VSlotCount;
unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
// Create a very wide void* type and insert it directly in the element list.
llvm::DIType *VTableType = DBuilder.createPointerType(
nullptr, VTableWidth, 0, DWARFAddressSpace, "__vtbl_ptr_type");
EltTys.push_back(VTableType);
// The vptr is a pointer to this special vtable type.
VPtrTy = DBuilder.createPointerType(VTableType, PtrWidth);
}
// If there is a primary base then the artificial vptr member lives there.
if (RL.getPrimaryBase())
return;
if (!VPtrTy)
VPtrTy = getOrCreateVTablePtrType(Unit);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType *VPtrMember =
DBuilder.createMemberType(Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
llvm::DINode::FlagArtificial, VPtrTy);
EltTys.push_back(VPtrMember);
}
llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
llvm::DIType *T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
return getOrCreateStandaloneType(D, Loc);
}
llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
SourceLocation Loc) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
assert(!D.isNull() && "null type");
llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc));
assert(T && "could not create debug info for type");
RetainedTypes.push_back(D.getAsOpaquePtr());
return T;
}
void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
QualType AllocatedTy,
SourceLocation Loc) {
if (CGM.getCodeGenOpts().getDebugInfo() <=
codegenoptions::DebugLineTablesOnly)
return;
llvm::MDNode *node;
if (AllocatedTy->isVoidType())
node = llvm::MDNode::get(CGM.getLLVMContext(), None);
else
node = getOrCreateType(AllocatedTy, getOrCreateFile(Loc));
CI->setMetadata("heapallocsite", node);
}
void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getEnumType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>());
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
void CGDebugInfo::completeType(const RecordDecl *RD) {
if (DebugKind > codegenoptions::LimitedDebugInfo ||
!CGM.getLangOpts().CPlusPlus)
completeRequiredType(RD);
}
/// Return true if the class or any of its methods are marked dllimport.
static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) {
if (RD->hasAttr<DLLImportAttr>())
return true;
for (const CXXMethodDecl *MD : RD->methods())
if (MD->hasAttr<DLLImportAttr>())
return true;
return false;
}
/// Does a type definition exist in an imported clang module?
static bool isDefinedInClangModule(const RecordDecl *RD) {
// Only definitions that where imported from an AST file come from a module.
if (!RD || !RD->isFromASTFile())
return false;
// Anonymous entities cannot be addressed. Treat them as not from module.
if (!RD->isExternallyVisible() && RD->getName().empty())
return false;
if (auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) {
if (!CXXDecl->isCompleteDefinition())
return false;
// Check wether RD is a template.
auto TemplateKind = CXXDecl->getTemplateSpecializationKind();
if (TemplateKind != TSK_Undeclared) {
// Unfortunately getOwningModule() isn't accurate enough to find the
// owning module of a ClassTemplateSpecializationDecl that is inside a
// namespace spanning multiple modules.
bool Explicit = false;
if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(CXXDecl))
Explicit = TD->isExplicitInstantiationOrSpecialization();
if (!Explicit && CXXDecl->getEnclosingNamespaceContext())
return false;
// This is a template, check the origin of the first member.
if (CXXDecl->field_begin() == CXXDecl->field_end())
return TemplateKind == TSK_ExplicitInstantiationDeclaration;
if (!CXXDecl->field_begin()->isFromASTFile())
return false;
}
}
return true;
}
void CGDebugInfo::completeClassData(const RecordDecl *RD) {
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->isDynamicClass() &&
CGM.getVTableLinkage(CXXRD) ==
llvm::GlobalValue::AvailableExternallyLinkage &&
!isClassOrMethodDLLImport(CXXRD))
return;
if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition()))
return;
completeClass(RD);
}
void CGDebugInfo::completeClass(const RecordDecl *RD) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>());
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
CXXRecordDecl::method_iterator End) {
for (CXXMethodDecl *MD : llvm::make_range(I, End))
if (FunctionDecl *Tmpl = MD->getInstantiatedFromMemberFunction())
if (!Tmpl->isImplicit() && Tmpl->isThisDeclarationADefinition() &&
!MD->getMemberSpecializationInfo()->isExplicitSpecialization())
return true;
return false;
}
static bool canUseCtorHoming(const CXXRecordDecl *RD) {
// Constructor homing can be used for classes that cannnot be constructed
// without emitting code for one of their constructors. This is classes that
// don't have trivial or constexpr constructors, or can be created from
// aggregate initialization. Also skip lambda objects because they don't call
// constructors.
// Skip this optimization if the class or any of its methods are marked
// dllimport.
if (isClassOrMethodDLLImport(RD))
return false;
return !RD->isLambda() && !RD->isAggregate() &&
!RD->hasTrivialDefaultConstructor() &&
!RD->hasConstexprNonCopyMoveConstructor();
}
static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition()))
return true;
if (auto *ES = RD->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(RD) == ExternalASTSource::EK_Always)
return true;
// Only emit forward declarations in line tables only to keep debug info size
// small. This only applies to CodeView, since we don't emit types in DWARF
// line tables only.
if (DebugKind == codegenoptions::DebugLineTablesOnly)
return true;
if (DebugKind > codegenoptions::LimitedDebugInfo)
return false;
if (!LangOpts.CPlusPlus)
return false;
if (!RD->isCompleteDefinitionRequired())
return true;
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (!CXXDecl)
return false;
// Only emit complete debug info for a dynamic class when its vtable is
// emitted. However, Microsoft debuggers don't resolve type information
// across DLL boundaries, so skip this optimization if the class or any of its
// methods are marked dllimport. This isn't a complete solution, since objects
// without any dllimport methods can be used in one DLL and constructed in
// another, but it is the current behavior of LimitedDebugInfo.
if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass() &&
!isClassOrMethodDLLImport(CXXDecl))
return true;
TemplateSpecializationKind Spec = TSK_Undeclared;
if (const auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
Spec = SD->getSpecializationKind();
if (Spec == TSK_ExplicitInstantiationDeclaration &&
hasExplicitMemberDefinition(CXXDecl->method_begin(),
CXXDecl->method_end()))
return true;
// In constructor homing mode, only emit complete debug info for a class
// when its constructor is emitted.
if ((DebugKind == codegenoptions::DebugInfoConstructor) &&
canUseCtorHoming(CXXDecl))
return true;
return false;
}
void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
return;
QualType Ty = CGM.getContext().getRecordType(RD);
llvm::DIType *T = getTypeOrNull(Ty);
if (T && T->isForwardDecl())
completeClassData(RD);
}
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
CGM.getLangOpts())) {
if (!T)
T = getOrCreateRecordFwdDecl(Ty, getDeclContextDescriptor(RD));
return T;
}
return CreateTypeDefinition(Ty);
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
// Then (if it is a definition) we go through and get debug info for all of
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty);
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
return FwdDecl;
if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
CollectContainingType(CXXDecl, FwdDecl);
// Push the struct on region stack.
LexicalBlockStack.emplace_back(&*FwdDecl);
RegionMap[Ty->getDecl()].reset(FwdDecl);
// Convert all the elements.
SmallVector<llvm::Metadata *, 16> EltTys;
// what about nested types?
// Note: The split of CXXDecl information here is intentional, the
// gdb tests will depend on a certain ordering at printout. The debug
// information offsets are still correct if we merge them all together
// though.
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
CollectVTableInfo(CXXDecl, DefUnit, EltTys);
}
// Collect data fields (including static variables and any initializers).
CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
if (CXXDecl)
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
LexicalBlockStack.pop_back();
RegionMap.erase(Ty->getDecl());
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(FwdDecl, Elements);
if (FwdDecl->isTemporary())
FwdDecl =
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
RegionMap[Ty->getDecl()].reset(FwdDecl);
return FwdDecl;
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
llvm::DIFile *Unit) {
// Ignore protocols.
return getOrCreateType(Ty->getBaseType(), Unit);
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCTypeParamType *Ty,
llvm::DIFile *Unit) {
// Ignore protocols.
SourceLocation Loc = Ty->getDecl()->getLocation();
// Use Typedefs to represent ObjCTypeParamType.
return DBuilder.createTypedef(
getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
getDeclContextDescriptor(Ty->getDecl()));
}
/// \return true if Getter has the default name for the property PD.
static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Getter) {
assert(PD);
if (!Getter)
return true;
assert(Getter->getDeclName().isObjCZeroArgSelector());
return PD->getName() ==
Getter->getDeclName().getObjCSelector().getNameForSlot(0);
}
/// \return true if Setter has the default name for the property PD.
static bool hasDefaultSetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Setter) {
assert(PD);
if (!Setter)
return true;
assert(Setter->getDeclName().isObjCOneArgSelector());
return SelectorTable::constructSetterName(PD->getName()) ==
Setter->getDeclName().getObjCSelector().getNameForSlot(0);
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIFile *Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
if (!ID)
return nullptr;
// Return a forward declaration if this type was imported from a clang module,
// and this is not the compile unit with the implementation of the type (which
// may contain hidden ivars).
if (DebugTypeExtRefs && ID->isFromASTFile() && ID->getDefinition() &&
!ID->getImplementation())
return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
ID->getName(),
getDeclContextDescriptor(ID), Unit, 0);
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation());
unsigned Line = getLineNumber(ID->getLocation());
auto RuntimeLang =
static_cast<llvm::dwarf::SourceLanguage>(TheCU->getSourceLanguage());
// If this is just a forward declaration return a special forward-declaration
// debug type since we won't be able to lay out the entire type.
ObjCInterfaceDecl *Def = ID->getDefinition();
if (!Def || !Def->getImplementation()) {
llvm::DIScope *Mod = getParentModuleOrNull(ID);
llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_structure_type, ID->getName(), Mod ? Mod : TheCU,
DefUnit, Line, RuntimeLang);
ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit));
return FwdDecl;
}
return CreateTypeDefinition(Ty, Unit);
}
llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
bool CreateSkeletonCU) {
// Use the Module pointer as the key into the cache. This is a
// nullptr if the "Module" is a PCH, which is safe because we don't
// support chained PCH debug info, so there can only be a single PCH.
const Module *M = Mod.getModuleOrNull();
auto ModRef = ModuleCache.find(M);
if (ModRef != ModuleCache.end())
return cast<llvm::DIModule>(ModRef->second);
// Macro definitions that were defined with "-D" on the command line.
SmallString<128> ConfigMacros;
{
llvm::raw_svector_ostream OS(ConfigMacros);
const auto &PPOpts = CGM.getPreprocessorOpts();
unsigned I = 0;
// Translate the macro definitions back into a command line.
for (auto &M : PPOpts.Macros) {
if (++I > 1)
OS << " ";
const std::string &Macro = M.first;
bool Undef = M.second;
OS << "\"-" << (Undef ? 'U' : 'D');
for (char c : Macro)
switch (c) {
case '\\':
OS << "\\\\";
break;
case '"':
OS << "\\\"";
break;
default:
OS << c;
}
OS << '\"';
}
}
bool IsRootModule = M ? !M->Parent : true;
// When a module name is specified as -fmodule-name, that module gets a
// clang::Module object, but it won't actually be built or imported; it will
// be textual.
if (CreateSkeletonCU && IsRootModule && Mod.getASTFile().empty() && M)
assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) &&
"clang module without ASTFile must be specified by -fmodule-name");
// Return a StringRef to the remapped Path.
auto RemapPath = [this](StringRef Path) -> std::string {
std::string Remapped = remapDIPath(Path);
StringRef Relative(Remapped);
StringRef CompDir = TheCU->getDirectory();
if (Relative.consume_front(CompDir))
Relative.consume_front(llvm::sys::path::get_separator());
return Relative.str();
};
if (CreateSkeletonCU && IsRootModule && !Mod.getASTFile().empty()) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
uint64_t Signature = 0;
if (const auto &ModSig = Mod.getSignature())
Signature = ModSig.truncatedValue();
else
Signature = ~1ULL;
llvm::DIBuilder DIB(CGM.getModule());
SmallString<0> PCM;
if (!llvm::sys::path::is_absolute(Mod.getASTFile()))
PCM = Mod.getPath();
llvm::sys::path::append(PCM, Mod.getASTFile());
DIB.createCompileUnit(
TheCU->getSourceLanguage(),
// TODO: Support "Source" from external AST providers?
DIB.createFile(Mod.getModuleName(), TheCU->getDirectory()),
TheCU->getProducer(), false, StringRef(), 0, RemapPath(PCM),
llvm::DICompileUnit::FullDebug, Signature);
DIB.finalize();
}
llvm::DIModule *Parent =
IsRootModule ? nullptr
: getOrCreateModuleRef(ASTSourceDescriptor(*M->Parent),
CreateSkeletonCU);
std::string IncludePath = Mod.getPath().str();
llvm::DIModule *DIMod =
DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros,
RemapPath(IncludePath), M ? M->APINotesFile : "");
ModuleCache[M].reset(DIMod);
return DIMod;
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
llvm::DIFile *Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation());
unsigned Line = getLineNumber(ID->getLocation());
unsigned RuntimeLang = TheCU->getSourceLanguage();
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (ID->getImplementation())
Flags |= llvm::DINode::FlagObjcClassComplete;
llvm::DIScope *Mod = getParentModuleOrNull(ID);
llvm::DICompositeType *RealDecl = DBuilder.createStructType(
Mod ? Mod : Unit, ID->getName(), DefUnit, Line, Size, Align, Flags,
nullptr, llvm::DINodeArray(), RuntimeLang);
QualType QTy(Ty, 0);
TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl);
// Push the struct on region stack.
LexicalBlockStack.emplace_back(RealDecl);
RegionMap[Ty->getDecl()].reset(RealDecl);
// Convert all the elements.
SmallVector<llvm::Metadata *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
llvm::DIType *SClassTy =
getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
if (!SClassTy)
return nullptr;
llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0,
llvm::DINode::FlagZero);
EltTys.push_back(InhTag);
}
// Create entries for all of the properties.
auto AddProperty = [&](const ObjCPropertyDecl *PD) {
SourceLocation Loc = PD->getLocation();
llvm::DIFile *PUnit = getOrCreateFile(Loc);
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
llvm::MDNode *PropertyNode = DBuilder.createObjCProperty(
PD->getName(), PUnit, PLine,
hasDefaultGetterName(PD, Getter) ? ""
: getSelectorName(PD->getGetterName()),
hasDefaultSetterName(PD, Setter) ? ""
: getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit));
EltTys.push_back(PropertyNode);
};
{
llvm::SmallPtrSet<const IdentifierInfo *, 16> PropertySet;
for (const ObjCCategoryDecl *ClassExt : ID->known_extensions())
for (auto *PD : ClassExt->properties()) {
PropertySet.insert(PD->getIdentifier());
AddProperty(PD);
}
for (const auto *PD : ID->properties()) {
// Don't emit duplicate metadata for properties that were already in a
// class extension.
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
AddProperty(PD);
}
}
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
unsigned FieldNo = 0;
for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
Field = Field->getNextIvar(), ++FieldNo) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
if (!FieldTy)
return nullptr;
StringRef FieldName = Field->getName();
// Ignore unnamed fields.
if (FieldName.empty())
continue;
// Get the location for the field.
llvm::DIFile *FieldDefUnit = getOrCreateFile(Field->getLocation());
unsigned FieldLine = getLineNumber(Field->getLocation());
QualType FType = Field->getType();
uint64_t FieldSize = 0;
uint32_t FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
// Bit size, align and offset of the type.
FieldSize = Field->isBitField()
? Field->getBitWidthValue(CGM.getContext())
: CGM.getContext().getTypeSize(FType);
FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
}
uint64_t FieldOffset;
if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
// We don't know the runtime offset of an ivar if we're using the
// non-fragile ABI. For bitfields, use the bit offset into the first
// byte of storage of the bitfield. For other fields, use zero.
if (Field->isBitField()) {
FieldOffset =
CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field);
FieldOffset %= CGM.getContext().getCharWidth();
} else {
FieldOffset = 0;
}
} else {
FieldOffset = RL.getFieldOffset(FieldNo);
}
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
Flags = llvm::DINode::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DINode::FlagPrivate;
else if (Field->getAccessControl() == ObjCIvarDecl::Public)
Flags = llvm::DINode::FlagPublic;
llvm::MDNode *PropertyNode = nullptr;
if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
SourceLocation Loc = PD->getLocation();
llvm::DIFile *PUnit = getOrCreateFile(Loc);
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PImpD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PImpD->getSetterMethodDecl();
PropertyNode = DBuilder.createObjCProperty(
PD->getName(), PUnit, PLine,
hasDefaultGetterName(PD, Getter)
? ""
: getSelectorName(PD->getGetterName()),
hasDefaultSetterName(PD, Setter)
? ""
: getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(),
getOrCreateType(PD->getType(), PUnit));
}
}
}
FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine,
FieldSize, FieldAlign, FieldOffset, Flags,
FieldTy, PropertyNode);
EltTys.push_back(FieldTy);
}
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(RealDecl, Elements);
LexicalBlockStack.pop_back();
return RealDecl;
}
llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DIFile *Unit) {
llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t Count = Ty->getNumElements();
llvm::Metadata *Subscript;
QualType QTy(Ty, 0);
auto SizeExpr = SizeExprCache.find(QTy);
if (SizeExpr != SizeExprCache.end())
Subscript = DBuilder.getOrCreateSubrange(
SizeExpr->getSecond() /*count*/, nullptr /*lowerBound*/,
nullptr /*upperBound*/, nullptr /*stride*/);
else {
auto *CountNode =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count ? Count : -1));
Subscript = DBuilder.getOrCreateSubrange(
CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
nullptr /*stride*/);
}
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType *CGDebugInfo::CreateType(const ConstantMatrixType *Ty,
llvm::DIFile *Unit) {
// FIXME: Create another debug type for matrices
// For the time being, it treats it like a nested ArrayType.
llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint32_t Align = getTypeAlignIfRequired(Ty, CGM.getContext());
// Create ranges for both dimensions.
llvm::SmallVector<llvm::Metadata *, 2> Subscripts;
auto *ColumnCountNode =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumColumns()));
auto *RowCountNode =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumRows()));
Subscripts.push_back(DBuilder.getOrCreateSubrange(
ColumnCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
nullptr /*stride*/));
Subscripts.push_back(DBuilder.getOrCreateSubrange(
RowCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
nullptr /*stride*/));
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
return DBuilder.createArrayType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
uint64_t Size;
uint32_t Align;
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
Align = getTypeAlignIfRequired(CGM.getContext().getBaseElementType(VAT),
CGM.getContext());
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
if (Ty->getElementType()->isIncompleteType())
Align = 0;
else
Align = getTypeAlignIfRequired(Ty->getElementType(), CGM.getContext());
} else if (Ty->isIncompleteType()) {
Size = 0;
Align = 0;
} else {
// Size and align of the whole array, not the element type.
Size = CGM.getContext().getTypeSize(Ty);
Align = getTypeAlignIfRequired(Ty, CGM.getContext());
}
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
SmallVector<llvm::Metadata *, 8> Subscripts;
QualType EltTy(Ty, 0);
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
// If the number of elements is known, then count is that number. Otherwise,
// it's -1. This allows us to represent a subrange with an array of 0
// elements, like this:
//
// struct foo {
// int x[0];
// };
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
if (Expr *Size = VAT->getSizeExpr()) {
Expr::EvalResult Result;
if (Size->EvaluateAsInt(Result, CGM.getContext()))
Count = Result.Val.getInt().getExtValue();
}
}
auto SizeNode = SizeExprCache.find(EltTy);
if (SizeNode != SizeExprCache.end())
Subscripts.push_back(DBuilder.getOrCreateSubrange(
SizeNode->getSecond() /*count*/, nullptr /*lowerBound*/,
nullptr /*upperBound*/, nullptr /*stride*/));
else {
auto *CountNode =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count));
Subscripts.push_back(DBuilder.getOrCreateSubrange(
CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
nullptr /*stride*/));
}
EltTy = Ty->getElementType();
}
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
return DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
SubscriptArray);
}
llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty,
llvm::DIFile *Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty,
Ty->getPointeeType(), Unit);
}
llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIFile *Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty,
Ty->getPointeeType(), Unit);
}
llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile *U) {
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
uint64_t Size = 0;
if (!Ty->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(Ty);
// Set the MS inheritance model. There is no flag for the unspecified model.
if (CGM.getTarget().getCXXABI().isMicrosoft()) {
switch (Ty->getMostRecentCXXRecordDecl()->getMSInheritanceModel()) {
case MSInheritanceModel::Single:
Flags |= llvm::DINode::FlagSingleInheritance;
break;
case MSInheritanceModel::Multiple:
Flags |= llvm::DINode::FlagMultipleInheritance;
break;
case MSInheritanceModel::Virtual:
Flags |= llvm::DINode::FlagVirtualInheritance;
break;
case MSInheritanceModel::Unspecified:
break;
}
}
}
llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
if (Ty->isMemberDataPointerType())
return DBuilder.createMemberPointerType(
getOrCreateType(Ty->getPointeeType(), U), ClassType, Size, /*Align=*/0,
Flags);
const FunctionProtoType *FPT =
Ty->getPointeeType()->getAs<FunctionProtoType>();
return DBuilder.createMemberPointerType(
getOrCreateInstanceMethodType(
CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
FPT, U, false),
ClassType, Size, /*Align=*/0, Flags);
}
llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
auto *FromTy = getOrCreateType(Ty->getValueType(), U);
return DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_atomic_type, FromTy);
}
llvm::DIType *CGDebugInfo::CreateType(const PipeType *Ty, llvm::DIFile *U) {
return getOrCreateType(Ty->getElementType(), U);
}
llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
bool isImportedFromModule =
DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
if (isImportedFromModule || !ED->getDefinition()) {
// Note that it is possible for enums to be created as part of
// their own declcontext. In this case a FwdDecl will be created
// twice. This doesn't cause a problem because both FwdDecls are
// entered into the ReplaceMap: finalize() will replace the first
// FwdDecl with the second and then replace the second with
// complete type.
llvm::DIScope *EDContext = getDeclContextDescriptor(ED);
llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
llvm::TempDIScope TmpContext(DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_enumeration_type, "", TheCU, DefUnit, 0));
unsigned Line = getLineNumber(ED->getLocation());
StringRef EDName = ED->getName();
llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line,
0, Size, Align, llvm::DINode::FlagFwdDecl, Identifier);
ReplaceMap.emplace_back(
std::piecewise_construct, std::make_tuple(Ty),
std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
return CreateTypeDefinition(Ty);
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
// Create elements for each enumerator.
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
bool IsSigned = ED->getIntegerType()->isSignedIntegerType();
for (const auto *Enum : ED->enumerators()) {
const auto &InitVal = Enum->getInitVal();
auto Value = IsSigned ? InitVal.getSExtValue() : InitVal.getZExtValue();
Enumerators.push_back(
DBuilder.createEnumerator(Enum->getName(), Value, !IsSigned));
}
// Return a CompositeType for the enum itself.
llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators);
llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIScope *EnumContext = getDeclContextDescriptor(ED);
llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit);
return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
Line, Size, Align, EltArray, ClassTy,
Identifier, ED->isScoped());
}
llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent,
unsigned MType, SourceLocation LineLoc,
StringRef Name, StringRef Value) {
unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc);
return DBuilder.createMacro(Parent, Line, MType, Name, Value);
}
llvm::DIMacroFile *CGDebugInfo::CreateTempMacroFile(llvm::DIMacroFile *Parent,
SourceLocation LineLoc,
SourceLocation FileLoc) {
llvm::DIFile *FName = getOrCreateFile(FileLoc);
unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc);
return DBuilder.createTempMacroFile(Parent, Line, FName);
}
static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
Qualifiers Quals;
do {
Qualifiers InnerQuals = T.getLocalQualifiers();
// Qualifiers::operator+() doesn't like it if you add a Qualifier
// that is already there.
Quals += Qualifiers::removeCommonQualifiers(Quals, InnerQuals);
Quals += InnerQuals;
QualType LastT = T;
switch (T->getTypeClass()) {
default:
return C.getQualifiedType(T.getTypePtr(), Quals);
case Type::TemplateSpecialization: {
const auto *Spec = cast<TemplateSpecializationType>(T);
if (Spec->isTypeAlias())
return C.getQualifiedType(T.getTypePtr(), Quals);
T = Spec->desugar();
break;
}
case Type::TypeOfExpr:
T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
case Type::TypeOf:
T = cast<TypeOfType>(T)->getUnderlyingType();
break;
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
break;
case Type::UnaryTransform:
T = cast<UnaryTransformType>(T)->getUnderlyingType();
break;
case Type::Attributed:
T = cast<AttributedType>(T)->getEquivalentType();
break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
case Type::MacroQualified:
T = cast<MacroQualifiedType>(T)->getUnderlyingType();
break;
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
case Type::Auto:
case Type::DeducedTemplateSpecialization: {
QualType DT = cast<DeducedType>(T)->getDeducedType();
assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
}
case Type::Adjusted:
case Type::Decayed:
// Decayed and adjusted types use the adjusted type in LLVM and DWARF.
T = cast<AdjustedType>(T)->getAdjustedType();
break;
}
assert(T != LastT && "Type unwrapping failed to unwrap!");
(void)LastT;
} while (true);
}
llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) {
assert(Ty == UnwrapTypeForDebugInfo(Ty, CGM.getContext()));
auto It = TypeCache.find(Ty.getAsOpaquePtr());
if (It != TypeCache.end()) {
// Verify that the debug info still exists.
if (llvm::Metadata *V = It->second)
return cast<llvm::DIType>(V);
}
return nullptr;
}
void CGDebugInfo::completeTemplateDefinition(
const ClassTemplateSpecializationDecl &SD) {
completeUnusedClass(SD);
}
void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
completeClassData(&D);
// In case this type has no member function definitions being emitted, ensure
// it is retained
RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
}
llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
if (Ty.isNull())
return nullptr;
llvm::TimeTraceScope TimeScope("DebugType", [&]() {
std::string Name;
llvm::raw_string_ostream OS(Name);
Ty.print(OS, getPrintingPolicy());
return Name;
});
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
if (auto *T = getTypeOrNull(Ty))
return T;
llvm::DIType *Res = CreateTypeNode(Ty, Unit);
void *TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
TypeCache[TyPtr].reset(Res);
return Res;
}
llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
// A forward declaration inside a module header does not belong to the module.
if (isa<RecordDecl>(D) && !cast<RecordDecl>(D)->getDefinition())
return nullptr;
if (DebugTypeExtRefs && D->isFromASTFile()) {
// Record a reference to an imported clang module or precompiled header.
auto *Reader = CGM.getContext().getExternalSource();
auto Idx = D->getOwningModuleID();
auto Info = Reader->getSourceDescriptor(Idx);
if (Info)
return getOrCreateModuleRef(*Info, /*SkeletonCU=*/true);
} else if (ClangModuleMap) {
// We are building a clang module or a precompiled header.
//
// TODO: When D is a CXXRecordDecl or a C++ Enum, the ODR applies
// and it wouldn't be necessary to specify the parent scope
// because the type is already unique by definition (it would look
// like the output of -fno-standalone-debug). On the other hand,
// the parent scope helps a consumer to quickly locate the object
// file where the type's definition is located, so it might be
// best to make this behavior a command line or debugger tuning
// option.
if (Module *M = D->getOwningModule()) {
// This is a (sub-)module.
auto Info = ASTSourceDescriptor(*M);
return getOrCreateModuleRef(Info, /*SkeletonCU=*/false);
} else {
// This the precompiled header being built.
return getOrCreateModuleRef(PCHDescriptor, /*SkeletonCU=*/false);
}
}
return nullptr;
}
llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
// Handle qualifiers, which recursively handles what they refer to.
if (Ty.hasLocalQualifiers())
return CreateQualifiedType(Ty, Unit);
// Work out details of type.
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.inc"
llvm_unreachable("Dependent types cannot show up in debug information");
case Type::ExtVector:
case Type::Vector:
return CreateType(cast<VectorType>(Ty), Unit);
case Type::ConstantMatrix:
return CreateType(cast<ConstantMatrixType>(Ty), Unit);
case Type::ObjCObjectPointer:
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCObject:
return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCTypeParam:
return CreateType(cast<ObjCTypeParamType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
case Type::Builtin:
return CreateType(cast<BuiltinType>(Ty));
case Type::Complex:
return CreateType(cast<ComplexType>(Ty));
case Type::Pointer:
return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef:
return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
return CreateType(cast<RecordType>(Ty));
case Type::Enum:
return CreateEnumType(cast<EnumType>(Ty));
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
case Type::ConstantArray:
case Type::VariableArray:
case Type::IncompleteArray:
return CreateType(cast<ArrayType>(Ty), Unit);
case Type::LValueReference:
return CreateType(cast<LValueReferenceType>(Ty), Unit);
case Type::RValueReference:
return CreateType(cast<RValueReferenceType>(Ty), Unit);
case Type::MemberPointer:
return CreateType(cast<MemberPointerType>(Ty), Unit);
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
case Type::ExtInt:
return CreateType(cast<ExtIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
case Type::TemplateSpecialization:
return CreateType(cast<TemplateSpecializationType>(Ty), Unit);
case Type::Auto:
case Type::Attributed:
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
case Type::Paren:
case Type::MacroQualified:
case Type::SubstTemplateTypeParm:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
break;
}
llvm_unreachable("type should have been unwrapped!");
}
llvm::DICompositeType *
CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty) {
QualType QTy(Ty, 0);
auto *T = cast_or_null<llvm::DICompositeType>(getTypeOrNull(QTy));
// We may have cached a forward decl when we could have created
// a non-forward decl. Go ahead and create a non-forward decl
// now.
if (T && !T->isForwardDecl())
return T;
// Otherwise create the type.
llvm::DICompositeType *Res = CreateLimitedType(Ty);
// Propagate members from the declaration to the definition
// CreateType(const RecordType*) will overwrite this with the members in the
// correct order if the full type is needed.
DBuilder.replaceArrays(Res, T ? T->getElements() : llvm::DINodeArray());
// And update the type cache.
TypeCache[QTy.getAsOpaquePtr()].reset(Res);
return Res;
}
// TODO: Currently used for context chains when limiting debug info.
llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
StringRef RDName = getClassName(RD);
const SourceLocation Loc = RD->getLocation();
llvm::DIFile *DefUnit = nullptr;
unsigned Line = 0;
if (Loc.isValid()) {
DefUnit = getOrCreateFile(Loc);
Line = getLineNumber(Loc);
}
llvm::DIScope *RDContext = getDeclContextDescriptor(RD);
// If we ended up creating the type during the context chain construction,
// just return that.
auto *T = cast_or_null<llvm::DICompositeType>(
getTypeOrNull(CGM.getContext().getRecordType(RD)));
if (T && (!T->isForwardDecl() || !RD->getDefinition()))
return T;
// If this is just a forward or incomplete declaration, construct an
// appropriately marked node and just return it.
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
// Explicitly record the calling convention and export symbols for C++
// records.
auto Flags = llvm::DINode::FlagZero;
if (auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CGM.getCXXABI().getRecordArgABI(CXXRD) == CGCXXABI::RAA_Indirect)
Flags |= llvm::DINode::FlagTypePassByReference;
else
Flags |= llvm::DINode::FlagTypePassByValue;
// Record if a C++ record is non-trivial type.
if (!CXXRD->isTrivial())
Flags |= llvm::DINode::FlagNonTrivial;
// Record exports it symbols to the containing structure.
if (CXXRD->isAnonymousStructOrUnion())
Flags |= llvm::DINode::FlagExportSymbols;
}
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align,
Flags, Identifier);
// Elements of composite types usually have back to the type, creating
// uniquing cycles. Distinct nodes are more efficient.
switch (RealDecl->getTag()) {
default:
llvm_unreachable("invalid composite type tag");
case llvm::dwarf::DW_TAG_array_type:
case llvm::dwarf::DW_TAG_enumeration_type:
// Array elements and most enumeration elements don't have back references,
// so they don't tend to be involved in uniquing cycles and there is some
// chance of merging them when linking together two modules. Only make
// them distinct if they are ODR-uniqued.
if (Identifier.empty())
break;
LLVM_FALLTHROUGH;
case llvm::dwarf::DW_TAG_structure_type:
case llvm::dwarf::DW_TAG_union_type:
case llvm::dwarf::DW_TAG_class_type:
// Immediately resolve to a distinct node.
RealDecl =
llvm::MDNode::replaceWithDistinct(llvm::TempDICompositeType(RealDecl));
break;
}
RegionMap[Ty->getDecl()].reset(RealDecl);
TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
DBuilder.replaceArrays(RealDecl, llvm::DINodeArray(),
CollectCXXTemplateParams(TSpecial, DefUnit));
return RealDecl;
}
void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *RealDecl) {
// A class's primary base or the class itself contains the vtable.
llvm::DICompositeType *ContainingType = nullptr;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non-virtual primary base root.
while (1) {
const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
const CXXRecordDecl *PBT = BRL.getPrimaryBase();
if (PBT && !BRL.isPrimaryBaseVirtual())
PBase = PBT;
else
break;
}
ContainingType = cast<llvm::DICompositeType>(
getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
getOrCreateFile(RD->getLocation())));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
DBuilder.replaceVTableHolder(RealDecl, ContainingType);
}
llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType,
StringRef Name, uint64_t *Offset) {
llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
auto FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
llvm::DIType *Ty =
DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, FieldAlign,
*Offset, llvm::DINode::FlagZero, FieldTy);
*Offset += FieldSize;
return Ty;
}
void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
StringRef &Name,
StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
llvm::DINode::DIFlags &Flags) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
Name = getFunctionName(FD);
// Use mangled name as linkage name for C/C++ functions.
if (FD->hasPrototype()) {
LinkageName = CGM.getMangledName(GD);
Flags |= llvm::DINode::FlagPrototyped;
}
// No need to replicate the linkage name if it isn't different from the
// subprogram name, no need to have it at all unless coverage is enabled or
// debug is set to more than just line tables or extra debug info is needed.
if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs &&
!CGM.getCodeGenOpts().EmitGcovNotes &&
!CGM.getCodeGenOpts().DebugInfoForProfiling &&
DebugKind <= codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
// Emit the function scope in line tables only mode (if CodeView) to
// differentiate between function names.
if (CGM.getCodeGenOpts().hasReducedDebugInfo() ||
(DebugKind == codegenoptions::DebugLineTablesOnly &&
CGM.getCodeGenOpts().EmitCodeView)) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNamespace(NSDecl);
else if (const RecordDecl *RDecl =
dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) {
llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
}
}
if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
// Check if it is a noreturn-marked function
if (FD->isNoReturn())
Flags |= llvm::DINode::FlagNoReturn;
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
}
}
void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T,
StringRef &Name, StringRef &LinkageName,
llvm::MDTuple *&TemplateParameters,
llvm::DIScope *&VDContext) {
Unit = getOrCreateFile(VD->getLocation());
LineNo = getLineNumber(VD->getLocation());
setLocation(VD->getLocation());
T = VD->getType();
if (T->isIncompleteArrayType()) {
// CodeGen turns int[] into int[1] so we'll do the same here.
llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal, nullptr,
ArrayType::Normal, 0);
}
Name = VD->getName();
if (VD->getDeclContext() && !isa<FunctionDecl>(VD->getDeclContext()) &&
!isa<ObjCMethodDecl>(VD->getDeclContext()))
LinkageName = CGM.getMangledName(VD);
if (LinkageName == Name)
LinkageName = StringRef();
if (isa<VarTemplateSpecializationDecl>(VD)) {
llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VD, &*Unit);
TemplateParameters = parameterNodes.get();
} else {
TemplateParameters = nullptr;
}
// Since we emit declarations (DW_AT_members) for static members, place the
// definition of those static members in the namespace they were declared in
// in the source code (the lexical decl context).
// FIXME: Generalize this for even non-member global variables where the
// declaration and definition may have different lexical decl contexts, once
// we have support for emitting declarations of (non-member) global variables.
const DeclContext *DC = VD->isStaticDataMember() ? VD->getLexicalDeclContext()
: VD->getDeclContext();
// When a record type contains an in-line initialization of a static data
// member, and the record type is marked as __declspec(dllexport), an implicit
// definition of the member will be created in the record context. DWARF
// doesn't seem to have a nice way to describe this in a form that consumers
// are likely to understand, so fake the "normal" situation of a definition
// outside the class by putting it in the global scope.
if (DC->isRecord())
DC = CGM.getContext().getTranslationUnitDecl();
llvm::DIScope *Mod = getParentModuleOrNull(VD);
VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
}
llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
bool Stub) {
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
SourceLocation Loc = GD.getDecl()->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
collectFunctionDeclProps(GD, Unit, Name, LinkageName, DContext, TParamsArray,
Flags);
auto *FD = cast<FunctionDecl>(GD.getDecl());
// Build function type.
SmallVector<QualType, 16> ArgTypes;
for (const ParmVarDecl *Parm : FD->parameters())
ArgTypes.push_back(Parm->getType());
CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
QualType FnType = CGM.getContext().getFunctionType(
FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
if (!FD->isExternallyVisible())
SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
if (Stub) {
Flags |= getCallSiteRelatedAttrs();
SPFlags |= llvm::DISubprogram::SPFlagDefinition;
return DBuilder.createFunction(
DContext, Name, LinkageName, Unit, Line,
getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
}
llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
DContext, Name, LinkageName, Unit, Line,
getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
const FunctionDecl *CanonDecl = FD->getCanonicalDecl();
FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
std::make_tuple(CanonDecl),
std::make_tuple(SP));
return SP;
}
llvm::DISubprogram *CGDebugInfo::getFunctionForwardDeclaration(GlobalDecl GD) {
return getFunctionFwdDeclOrStub(GD, /* Stub = */ false);
}
llvm::DISubprogram *CGDebugInfo::getFunctionStub(GlobalDecl GD) {
return getFunctionFwdDeclOrStub(GD, /* Stub = */ true);
}
llvm::DIGlobalVariable *
CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
QualType T;
StringRef Name, LinkageName;
SourceLocation Loc = VD->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
llvm::MDTuple *TemplateParameters = nullptr;
collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, TemplateParameters,
DContext);
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
!VD->isExternallyVisible(), nullptr, TemplateParameters, Align);
FwdDeclReplaceMap.emplace_back(
std::piecewise_construct,
std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
std::make_tuple(static_cast<llvm::Metadata *>(GV)));
return GV;
}
llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// We only need a declaration (not a definition) of the type - so use whatever
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
// in unlimited debug info)
if (const auto *TD = dyn_cast<TypeDecl>(D))
return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
getOrCreateFile(TD->getLocation()));
auto I = DeclCache.find(D->getCanonicalDecl());
if (I != DeclCache.end()) {
auto N = I->second;
if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(N))
return GVE->getVariable();
return dyn_cast_or_null<llvm::DINode>(N);
}
// No definition for now. Emit a forward definition that might be
// merged with a potential upcoming definition.
if (const auto *FD = dyn_cast<FunctionDecl>(D))
return getFunctionForwardDeclaration(FD);
else if (const auto *VD = dyn_cast<VarDecl>(D))
return getGlobalVariableForwardDeclaration(VD);
return nullptr;
}
llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return nullptr;
// Setup context.
auto *S = getDeclContextDescriptor(D);
auto MI = SPCache.find(FD->getCanonicalDecl());
if (MI == SPCache.end()) {
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) {
return CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()),
cast<llvm::DICompositeType>(S));
}
}
if (MI != SPCache.end()) {
auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second);
if (SP && !SP->isDefinition())
return SP;
}
for (auto NextFD : FD->redecls()) {
auto MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second);
if (SP && !SP->isDefinition())
return SP;
}
}
return nullptr;
}
llvm::DISubprogram *CGDebugInfo::getObjCMethodDeclaration(
const Decl *D, llvm::DISubroutineType *FnType, unsigned LineNo,
llvm::DINode::DIFlags Flags, llvm::DISubprogram::DISPFlags SPFlags) {
if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
if (!OMD)
return nullptr;
if (CGM.getCodeGenOpts().DwarfVersion < 5 && !OMD->isDirectMethod())
return nullptr;
if (OMD->isDirectMethod())
SPFlags |= llvm::DISubprogram::SPFlagObjCDirect;
// Starting with DWARF V5 method declarations are emitted as children of
// the interface type.
auto *ID = dyn_cast_or_null<ObjCInterfaceDecl>(D->getDeclContext());
if (!ID)
ID = OMD->getClassInterface();
if (!ID)
return nullptr;
QualType QTy(ID->getTypeForDecl(), 0);
auto It = TypeCache.find(QTy.getAsOpaquePtr());
if (It == TypeCache.end())
return nullptr;
auto *InterfaceType = cast<llvm::DICompositeType>(It->second);
llvm::DISubprogram *FD = DBuilder.createFunction(
InterfaceType, getObjCMethodName(OMD), StringRef(),
InterfaceType->getFile(), LineNo, FnType, LineNo, Flags, SPFlags);
DBuilder.finalizeSubprogram(FD);
ObjCMethodCache[ID].push_back({FD, OMD->isDirectMethod()});
return FD;
}
// getOrCreateFunctionType - Construct type. If it is a c++ method, include
// implicit parameter "this".
llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
QualType FnType,
llvm::DIFile *F) {
// In CodeView, we emit the function types in line tables only because the
// only way to distinguish between functions is by display name and type.
if (!D || (DebugKind <= codegenoptions::DebugLineTablesOnly &&
!CGM.getCodeGenOpts().EmitCodeView))
// Create fake but valid subroutine type. Otherwise -verify would fail, and
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
if (const auto *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F, false);
const auto *FTy = FnType->getAs<FunctionType>();
CallingConv CC = FTy ? FTy->getCallConv() : CallingConv::CC_C;
if (const auto *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
QualType ResultTy = OMethod->getReturnType();
// Replace the instancetype keyword with the actual type.
if (ResultTy == CGM.getContext().getObjCInstanceType())
ResultTy = CGM.getContext().getPointerType(
QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
Elts.push_back(getOrCreateType(ResultTy, F));
// "self" pointer is always first argument.
QualType SelfDeclTy;
if (auto *SelfDecl = OMethod->getSelfDecl())
SelfDeclTy = SelfDecl->getType();
else if (auto *FPT = dyn_cast<FunctionProtoType>(FnType))
if (FPT->getNumParams() > 1)
SelfDeclTy = FPT->getParamType(0);
if (!SelfDeclTy.isNull())
Elts.push_back(
CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
// "_cmd" pointer is always second argument.
Elts.push_back(DBuilder.createArtificialType(
getOrCreateType(CGM.getContext().getObjCSelType(), F)));
// Get rest of the arguments.
for (const auto *PI : OMethod->parameters())
Elts.push_back(getOrCreateType(PI->getType(), F));
// Variadic methods need a special marker at the end of the type list.
if (OMethod->isVariadic())
Elts.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
getDwarfCC(CC));
}
// Handle variadic function types; they need an additional
// unspecified parameter.
if (const auto *FD = dyn_cast<FunctionDecl>(D))
if (FD->isVariadic()) {
SmallVector<llvm::Metadata *, 16> EltTys;
EltTys.push_back(getOrCreateType(FD->getReturnType(), F));
if (const auto *FPT = dyn_cast<FunctionProtoType>(FnType))
for (QualType ParamType : FPT->param_types())
EltTys.push_back(getOrCreateType(ParamType, F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
getDwarfCC(CC));
}
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
}
void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
llvm::Function *Fn, bool CurFuncIsThunk) {
StringRef Name;
StringRef LinkageName;
FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
bool HasDecl = (D != nullptr);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = Unit;
llvm::DINodeArray TParamsArray;
if (!HasDecl) {
// Use llvm function name.
LinkageName = Fn->getName();
} else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a subprogram for this function available then use it.
auto FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
auto *SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second);
if (SP && SP->isDefinition()) {
LexicalBlockStack.emplace_back(SP);
RegionMap[D].reset(SP);
return;
}
}
collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
TParamsArray, Flags);
} else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DINode::FlagPrototyped;
} else if (isa<VarDecl>(D) &&
GD.getDynamicInitKind() != DynamicInitKind::NoStub) {
// This is a global initializer or atexit destructor for a global variable.
Name = getDynamicInitializerName(cast<VarDecl>(D), GD.getDynamicInitKind(),
Fn);
} else {
Name = Fn->getName();
if (isa<BlockDecl>(D))
LinkageName = Name;
Flags |= llvm::DINode::FlagPrototyped;
}
if (Name.startswith("\01"))
Name = Name.substr(1);
if (!HasDecl || D->isImplicit() || D->hasAttr<ArtificialAttr>() ||
(isa<VarDecl>(D) && GD.getDynamicInitKind() != DynamicInitKind::NoStub)) {
Flags |= llvm::DINode::FlagArtificial;
// Artificial functions should not silently reuse CurLoc.
CurLoc = SourceLocation();
}
if (CurFuncIsThunk)
Flags |= llvm::DINode::FlagThunk;
if (Fn->hasLocalLinkage())
SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
llvm::DINode::DIFlags FlagsForDef = Flags | getCallSiteRelatedAttrs();
llvm::DISubprogram::DISPFlags SPFlagsForDef =
SPFlags | llvm::DISubprogram::SPFlagDefinition;
const unsigned LineNo = getLineNumber(Loc.isValid() ? Loc : CurLoc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
llvm::DISubroutineType *DIFnType = getOrCreateFunctionType(D, FnType, Unit);
llvm::DISubprogram *Decl = nullptr;
if (D)
Decl = isa<ObjCMethodDecl>(D)
? getObjCMethodDeclaration(D, DIFnType, LineNo, Flags, SPFlags)
: getFunctionDeclaration(D);
// FIXME: The function declaration we're constructing here is mostly reusing
// declarations from CXXMethodDecl and not constructing new ones for arbitrary
// FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for
// all subprograms instead of the actual context since subprogram definitions
// are emitted as CU level entities by the backend.
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo, DIFnType, ScopeLine,
FlagsForDef, SPFlagsForDef, TParamsArray.get(), Decl);
Fn->setSubprogram(SP);
// We might get here with a VarDecl in the case we're generating
// code for the initialization of globals. Do not record these decls
// as they will overwrite the actual VarDecl Decl in the cache.
if (HasDecl && isa<FunctionDecl>(D))
DeclCache[D->getCanonicalDecl()].reset(SP);
// Push the function onto the lexical block stack.
LexicalBlockStack.emplace_back(SP);
if (HasDecl)
RegionMap[D].reset(SP);
}
void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
QualType FnType, llvm::Function *Fn) {
StringRef Name;
StringRef LinkageName;
const Decl *D = GD.getDecl();
if (!D)
return;
llvm::TimeTraceScope TimeScope("DebugFunction", [&]() {
std::string Name;
llvm::raw_string_ostream OS(Name);
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
ND->getNameForDiagnostic(OS, getPrintingPolicy(),
/*Qualified=*/true);
return Name;
});
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
bool IsDeclForCallSite = Fn ? true : false;
llvm::DIScope *FDContext =
IsDeclForCallSite ? Unit : getDeclContextDescriptor(D);
llvm::DINodeArray TParamsArray;
if (isa<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
TParamsArray, Flags);
} else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DINode::FlagPrototyped;
} else {
llvm_unreachable("not a function or ObjC method");
}
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
if (D->isImplicit()) {
Flags |= llvm::DINode::FlagArtificial;
// Artificial functions without a location should not silently reuse CurLoc.
if (Loc.isInvalid())
CurLoc = SourceLocation();
}
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = 0;
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(D));
if (IsDeclForCallSite)
Fn->setSubprogram(SP);
DBuilder.finalizeSubprogram(SP);
}
void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
QualType CalleeType,
const FunctionDecl *CalleeDecl) {
if (!CallOrInvoke)
return;
auto *Func = CallOrInvoke->getCalledFunction();
if (!Func)
return;
if (Func->getSubprogram())
return;
// Do not emit a declaration subprogram for a builtin, a function with nodebug
// attribute, or if call site info isn't required. Also, elide declarations
// for functions with reserved names, as call site-related features aren't
// interesting in this case (& also, the compiler may emit calls to these
// functions without debug locations, which makes the verifier complain).
if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr<NoDebugAttr>() ||
getCallSiteRelatedAttrs() == llvm::DINode::FlagZero)
return;
if (const auto *Id = CalleeDecl->getIdentifier())
if (Id->isReservedName())
return;
// If there is no DISubprogram attached to the function being called,
// create the one describing the function in order to have complete
// call site debug info.
if (!CalleeDecl->isStatic() && !CalleeDecl->isInlined())
EmitFunctionDecl(CalleeDecl, CalleeDecl->getLocation(), CalleeType, Func);
}
void CGDebugInfo::EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
// If there is a subprogram for this function available then use it.
auto FI = SPCache.find(FD->getCanonicalDecl());
llvm::DISubprogram *SP = nullptr;
if (FI != SPCache.end())
SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second);
if (!SP || !SP->isDefinition())
SP = getFunctionStub(GD);
FnBeginRegionCount.push_back(LexicalBlockStack.size());
LexicalBlockStack.emplace_back(SP);
setInlinedAt(Builder.getCurrentDebugLocation());
EmitLocation(Builder, FD->getLocation());
}
void CGDebugInfo::EmitInlineFunctionEnd(CGBuilderTy &Builder) {
assert(CurInlinedAt && "unbalanced inline scope stack");
EmitFunctionEnd(Builder, nullptr);
setInlinedAt(llvm::DebugLoc(CurInlinedAt).getInlinedAt());
}
void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
if (CurLoc.isInvalid() || CurLoc.isMacroID() || LexicalBlockStack.empty())
return;
llvm::MDNode *Scope = LexicalBlockStack.back();
Builder.SetCurrentDebugLocation(
llvm::DILocation::get(CGM.getLLVMContext(), getLineNumber(CurLoc),
getColumnNumber(CurLoc), Scope, CurInlinedAt));
}
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
llvm::MDNode *Back = nullptr;
if (!LexicalBlockStack.empty())
Back = LexicalBlockStack.back().get();
LexicalBlockStack.emplace_back(DBuilder.createLexicalBlock(
cast<llvm::DIScope>(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc),
getColumnNumber(CurLoc)));
}
void CGDebugInfo::AppendAddressSpaceXDeref(
unsigned AddressSpace, SmallVectorImpl<int64_t> &Expr) const {
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (!DWARFAddressSpace)
return;
Expr.push_back(llvm::dwarf::DW_OP_constu);
Expr.push_back(DWARFAddressSpace.getValue());
Expr.push_back(llvm::dwarf::DW_OP_swap);
Expr.push_back(llvm::dwarf::DW_OP_xderef);
}
void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
SourceLocation Loc) {
// Set our current location.
setLocation(Loc);
// Emit a line table change for the current location inside the new scope.
Builder.SetCurrentDebugLocation(llvm::DILocation::get(
CGM.getLLVMContext(), getLineNumber(Loc), getColumnNumber(Loc),
LexicalBlockStack.back(), CurInlinedAt));
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
// Create a new lexical block and push it on the stack.
CreateLexicalBlock(Loc);
}
void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
SourceLocation Loc) {
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, Loc);
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
LexicalBlockStack.pop_back();
}
void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder, llvm::Function *Fn) {
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
unsigned RCount = FnBeginRegionCount.back();
assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
// Pop all regions for this function.
while (LexicalBlockStack.size() != RCount) {
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, CurLoc);
LexicalBlockStack.pop_back();
}
FnBeginRegionCount.pop_back();
if (Fn && Fn->getSubprogram())
DBuilder.finalizeSubprogram(Fn->getSubprogram());
}
CGDebugInfo::BlockByRefType
CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *XOffset) {
SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
uint32_t FieldAlign;
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
QualType Type = VD->getType();
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
FType = CGM.getContext().IntTy;
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(
CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset));
EltTys.push_back(
CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset));
}
bool HasByrefExtendedLayout;
Qualifiers::ObjCLifetime Lifetime;
if (CGM.getContext().getByrefLifetime(Type, Lifetime,
HasByrefExtendedLayout) &&
HasByrefExtendedLayout) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(
CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset));
}
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
CGM.getTarget().getPointerAlign(0))) {
CharUnits FieldOffsetInBytes =
CGM.getContext().toCharUnitsFromBits(FieldOffset);
CharUnits AlignedOffsetInBytes = FieldOffsetInBytes.alignTo(Align);
CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes;
if (NumPaddingBytes.isPositive()) {
llvm::APInt pad(32, NumPaddingBytes.getQuantity());
FType = CGM.getContext().getConstantArrayType(
CGM.getContext().CharTy, pad, nullptr, ArrayType::Normal, 0);
EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
}
}
FType = Type;
llvm::DIType *WrappedTy = getOrCreateType(FType, Unit);
FieldSize = CGM.getContext().getTypeSize(FType);
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
llvm::DIType *FieldTy = DBuilder.createMemberType(
Unit, VD->getName(), Unit, 0, FieldSize, FieldAlign, FieldOffset,
llvm::DINode::FlagZero, WrappedTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
return {DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0,
llvm::DINode::FlagZero, nullptr, Elements),
WrappedTy};
}
llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
llvm::Value *Storage,
llvm::Optional<unsigned> ArgNo,
CGBuilderTy &Builder,
const bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (VD->hasAttr<NoDebugAttr>())
return nullptr;
bool Unwritten =
VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) &&
cast<Decl>(VD->getDeclContext())->isImplicit());
llvm::DIFile *Unit = nullptr;
if (!Unwritten)
Unit = getOrCreateFile(VD->getLocation());
llvm::DIType *Ty;
uint64_t XOffset = 0;
if (VD->hasAttr<BlocksAttr>())
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType;
else
Ty = getOrCreateType(VD->getType(), Unit);
// If there is no debug info for this type then do not emit debug info
// for this variable.
if (!Ty)
return nullptr;
// Get location information.
unsigned Line = 0;
unsigned Column = 0;
if (!Unwritten) {
Line = getLineNumber(VD->getLocation());
Column = getColumnNumber(VD->getLocation());
}
SmallVector<int64_t, 13> Expr;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(VD->getType());
AppendAddressSpaceXDeref(AddressSpace, Expr);
// If this is implicit parameter of CXXThis or ObjCSelf kind, then give it an
// object pointer flag.
if (const auto *IPD = dyn_cast<ImplicitParamDecl>(VD)) {
if (IPD->getParameterKind() == ImplicitParamDecl::CXXThis ||
IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf)
Flags |= llvm::DINode::FlagObjectPointer;
}
// Note: Older versions of clang used to emit byval references with an extra
// DW_OP_deref, because they referenced the IR arg directly instead of
// referencing an alloca. Newer versions of LLVM don't treat allocas
// differently from other function arguments when used in a dbg.declare.
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
StringRef Name = VD->getName();
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
// Here, we need an offset *into* the alloca.
CharUnits offset = CharUnits::fromQuantity(32);
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
CGM.getTarget().getPointerWidth(0));
Expr.push_back(offset.getQuantity());
Expr.push_back(llvm::dwarf::DW_OP_deref);
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
Expr.push_back(offset.getQuantity());
}
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
const RecordDecl *RD = RT->getDecl();
if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
// GDB has trouble finding local variables in anonymous unions, so we emit
// artificial local variables for each of the members.
//
// FIXME: Remove this code as soon as GDB supports this.
// The debug info verifier in LLVM operates based on the assumption that a
// variable has the same size as its storage and we had to disable the
// check for artificial variables.
for (const auto *Field : RD->fields()) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
StringRef FieldName = Field->getName();
// Ignore unnamed fields. Do not ignore unnamed records.
if (FieldName.empty() && !isa<RecordType>(Field->getType()))
continue;
// Use VarDecl's Tag, Scope and Line number.
auto FieldAlign = getDeclAlignIfRequired(Field, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize,
Flags | llvm::DINode::FlagArtificial, FieldAlign);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
llvm::DILocation::get(CGM.getLLVMContext(), Line,
Column, Scope,
CurInlinedAt),
Builder.GetInsertBlock());
}
}
}
// Clang stores the sret pointer provided by the caller in a static alloca.
// Use DW_OP_deref to tell the debugger to load the pointer and treat it as
// the address of the variable.
if (UsePointerValue) {
assert(std::find(Expr.begin(), Expr.end(), llvm::dwarf::DW_OP_deref) ==
Expr.end() &&
"Debug info already contains DW_OP_deref.");
Expr.push_back(llvm::dwarf::DW_OP_deref);
}
// Create the descriptor for the variable.
auto *D = ArgNo ? DBuilder.createParameterVariable(
Scope, Name, *ArgNo, Unit, Line, Ty,
CGM.getLangOpts().Optimize, Flags)
: DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
CGM.getLangOpts().Optimize,
Flags, Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
llvm::DILocation::get(CGM.getLLVMContext(), Line,
Column, Scope, CurInlinedAt),
Builder.GetInsertBlock());
return D;
}
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage,
CGBuilderTy &Builder,
const bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
return EmitDeclare(VD, Storage, llvm::None, Builder, UsePointerValue);
}
void CGDebugInfo::EmitLabel(const LabelDecl *D, CGBuilderTy &Builder) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (D->hasAttr<NoDebugAttr>())
return;
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
llvm::DIFile *Unit = getOrCreateFile(D->getLocation());
// Get location information.
unsigned Line = getLineNumber(D->getLocation());
unsigned Column = getColumnNumber(D->getLocation());
StringRef Name = D->getName();
// Create the descriptor for the label.
auto *L =
DBuilder.createLabel(Scope, Name, Unit, Line, CGM.getLangOpts().Optimize);
// Insert an llvm.dbg.label into the current block.
DBuilder.insertLabel(L,
llvm::DILocation::get(CGM.getLLVMContext(), Line, Column,
Scope, CurInlinedAt),
Builder.GetInsertBlock());
}
llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy,
llvm::DIType *Ty) {
llvm::DIType *CachedTy = getTypeOrNull(QualTy);
if (CachedTy)
Ty = CachedTy;
return DBuilder.createObjectPointerType(Ty);
}
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == nullptr)
return;
if (VD->hasAttr<NoDebugAttr>())
return;
bool isByRef = VD->hasAttr<BlocksAttr>();
uint64_t XOffset = 0;
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
llvm::DIType *Ty;
if (isByRef)
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType;
else
Ty = getOrCreateType(VD->getType(), Unit);
// Self is passed along as an implicit non-arg variable in a
// block. Mark it as the object pointer.
if (const auto *IPD = dyn_cast<ImplicitParamDecl>(VD))
if (IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf)
Ty = CreateSelfType(VD->getType(), Ty);
// Get location information.
const unsigned Line =
getLineNumber(VD->getLocation().isValid() ? VD->getLocation() : CurLoc);
unsigned Column = getColumnNumber(VD->getLocation());
const llvm::DataLayout &target = CGM.getDataLayout();
CharUnits offset = CharUnits::fromQuantity(
target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
SmallVector<int64_t, 9> addr;
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
addr.push_back(offset.getQuantity());
if (isByRef) {
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
// offset of __forwarding field
offset =
CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0));
addr.push_back(offset.getQuantity());
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
addr.push_back(offset.getQuantity());
}
// Create the descriptor for the variable.
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit,
Line, Ty, false, llvm::DINode::FlagZero, Align);
// Insert an llvm.dbg.declare into the current block.
auto DL = llvm::DILocation::get(CGM.getLLVMContext(), Line, Column,
LexicalBlockStack.back(), CurInlinedAt);
auto *Expr = DBuilder.createExpression(addr);
if (InsertPoint)
DBuilder.insertDeclare(Storage, D, Expr, DL, InsertPoint);
else
DBuilder.insertDeclare(Storage, D, Expr, DL, Builder.GetInsertBlock());
}
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
EmitDeclare(VD, AI, ArgNo, Builder);
}
namespace {
struct BlockLayoutChunk {
uint64_t OffsetInBits;
const BlockDecl::Capture *Capture;
};
bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
return l.OffsetInBits < r.OffsetInBits;
}
} // namespace
void CGDebugInfo::collectDefaultFieldsForBlockLiteralDeclare(
const CGBlockInfo &Block, const ASTContext &Context, SourceLocation Loc,
const llvm::StructLayout &BlockLayout, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &Fields) {
// Blocks in OpenCL have unique constraints which make the standard fields
// redundant while requiring size and align fields for enqueue_kernel. See
// initializeForBlockHeader in CGBlocks.cpp
if (CGM.getLangOpts().OpenCL) {
Fields.push_back(createFieldType("__size", Context.IntTy, Loc, AS_public,
BlockLayout.getElementOffsetInBits(0),
Unit, Unit));
Fields.push_back(createFieldType("__align", Context.IntTy, Loc, AS_public,
BlockLayout.getElementOffsetInBits(1),
Unit, Unit));
} else {
Fields.push_back(createFieldType("__isa", Context.VoidPtrTy, Loc, AS_public,
BlockLayout.getElementOffsetInBits(0),
Unit, Unit));
Fields.push_back(createFieldType("__flags", Context.IntTy, Loc, AS_public,
BlockLayout.getElementOffsetInBits(1),
Unit, Unit));
Fields.push_back(
createFieldType("__reserved", Context.IntTy, Loc, AS_public,
BlockLayout.getElementOffsetInBits(2), Unit, Unit));
auto *FnTy = Block.getBlockExpr()->getFunctionType();
auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
Fields.push_back(createFieldType("__FuncPtr", FnPtrType, Loc, AS_public,
BlockLayout.getElementOffsetInBits(3),
Unit, Unit));
Fields.push_back(createFieldType(
"__descriptor",
Context.getPointerType(Block.NeedsCopyDispose
? Context.getBlockDescriptorExtendedType()
: Context.getBlockDescriptorType()),
Loc, AS_public, BlockLayout.getElementOffsetInBits(4), Unit, Unit));
}
}
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
StringRef Name,
unsigned ArgNo,
llvm::AllocaInst *Alloca,
CGBuilderTy &Builder) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
// Collect some general information about the block's location.
SourceLocation loc = blockDecl->getCaretLocation();
llvm::DIFile *tunit = getOrCreateFile(loc);
unsigned line = getLineNumber(loc);
unsigned column = getColumnNumber(loc);
// Build the debug-info type for the block literal.
getDeclContextDescriptor(blockDecl);
const llvm::StructLayout *blockLayout =
CGM.getDataLayout().getStructLayout(block.StructureType);
SmallVector<llvm::Metadata *, 16> fields;
collectDefaultFieldsForBlockLiteralDeclare(block, C, loc, *blockLayout, tunit,
fields);
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
SmallVector<BlockLayoutChunk, 8> chunks;
// 'this' capture.
if (blockDecl->capturesCXXThis()) {
BlockLayoutChunk chunk;
chunk.OffsetInBits =
blockLayout->getElementOffsetInBits(block.CXXThisIndex);
chunk.Capture = nullptr;
chunks.push_back(chunk);
}
// Variable captures.
for (const auto &capture : blockDecl->captures()) {
const VarDecl *variable = capture.getVariable();
const CGBlockInfo::Capture &captureInfo = block.getCapture(variable);
// Ignore constant captures.
if (captureInfo.isConstant())
continue;
BlockLayoutChunk chunk;
chunk.OffsetInBits =
blockLayout->getElementOffsetInBits(captureInfo.getIndex());
chunk.Capture = &capture;
chunks.push_back(chunk);
}
// Sort by offset.
llvm::array_pod_sort(chunks.begin(), chunks.end());
for (const BlockLayoutChunk &Chunk : chunks) {
uint64_t offsetInBits = Chunk.OffsetInBits;
const BlockDecl::Capture *capture = Chunk.Capture;
// If we have a null capture, this must be the C++ 'this' capture.
if (!capture) {
QualType type;
if (auto *Method =
cast_or_null<CXXMethodDecl>(blockDecl->getNonClosureContext()))
type = Method->getThisType();
else if (auto *RDecl = dyn_cast<CXXRecordDecl>(blockDecl->getParent()))
type = QualType(RDecl->getTypeForDecl(), 0);
else
llvm_unreachable("unexpected block declcontext");
fields.push_back(createFieldType("this", type, loc, AS_public,
offsetInBits, tunit, tunit));
continue;
}
const VarDecl *variable = capture->getVariable();
StringRef name = variable->getName();
llvm::DIType *fieldType;
if (capture->isByRef()) {
TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0;
// FIXME: This recomputes the layout of the BlockByRefWrapper.
uint64_t xoffset;
fieldType =
EmitTypeForVarWithBlocksAttr(variable, &xoffset).BlockByRefWrapper;
fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
PtrInfo.Width, Align, offsetInBits,
llvm::DINode::FlagZero, fieldType);
} else {
auto Align = getDeclAlignIfRequired(variable, CGM.getContext());
fieldType = createFieldType(name, variable->getType(), loc, AS_public,
offsetInBits, Align, tunit, tunit);
}
fields.push_back(fieldType);
}
SmallString<36> typeName;
llvm::raw_svector_ostream(typeName)
<< "__block_literal_" << CGM.getUniqueBlockCount();
llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields);
llvm::DIType *type =
DBuilder.createStructType(tunit, typeName.str(), tunit, line,
CGM.getContext().toBits(block.BlockSize), 0,
llvm::DINode::FlagZero, nullptr, fieldsArray);
type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
// Get overall information about the block.
llvm::DINode::DIFlags flags = llvm::DINode::FlagArtificial;
auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back());
// Create the descriptor for the parameter.
auto *debugVar = DBuilder.createParameterVariable(
scope, Name, ArgNo, tunit, line, type, CGM.getLangOpts().Optimize, flags);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Alloca, debugVar, DBuilder.createExpression(),
llvm::DILocation::get(CGM.getLLVMContext(), line,
column, scope, CurInlinedAt),
Builder.GetInsertBlock());
}
llvm::DIDerivedType *
CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
if (!D || !D->isStaticDataMember())
return nullptr;
auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second && "Static data member declaration should still exist");
return MI->second;
}
// If the member wasn't found in the cache, lazily construct and add it to the
// type (used when a limited form of the type is emitted).
auto DC = D->getDeclContext();
auto *Ctxt = cast<llvm::DICompositeType>(getDeclContextDescriptor(D));
return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo,
StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) {
llvm::DIGlobalVariableExpression *GVE = nullptr;
for (const auto *Field : RD->fields()) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
StringRef FieldName = Field->getName();
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
GVE = DBuilder.createGlobalVariableExpression(
DContext, FieldName, LinkageName, Unit, LineNo, FieldTy,
Var->hasLocalLinkage());
Var->addDebugInfo(GVE);
}
return GVE;
}
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
if (D->hasAttr<NoDebugAttr>())
return;
llvm::TimeTraceScope TimeScope("DebugGlobalVariable", [&]() {
std::string Name;
llvm::raw_string_ostream OS(Name);
D->getNameForDiagnostic(OS, getPrintingPolicy(),
/*Qualified=*/true);
return Name;
});
// If we already created a DIGlobalVariable for this declaration, just attach
// it to the llvm::GlobalVariable.
auto Cached = DeclCache.find(D->getCanonicalDecl());
if (Cached != DeclCache.end())
return Var->addDebugInfo(
cast<llvm::DIGlobalVariableExpression>(Cached->second));
// Create global variable debug descriptor.
llvm::DIFile *Unit = nullptr;
llvm::DIScope *DContext = nullptr;
unsigned LineNo;
StringRef DeclName, LinkageName;
QualType T;
llvm::MDTuple *TemplateParameters = nullptr;
collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName,
TemplateParameters, DContext);
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
llvm::DIGlobalVariableExpression *GVE = nullptr;
// If this is an anonymous union then we'll want to emit a global
// variable for each member of the anonymous union so that it's possible
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
SmallVector<int64_t, 4> Expr;
unsigned AddressSpace =
CGM.getContext().getTargetAddressSpace(D->getType());
if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) {
if (D->hasAttr<CUDASharedAttr>())
AddressSpace =
CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared);
else if (D->hasAttr<CUDAConstantAttr>())
AddressSpace =
CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
}
AppendAddressSpaceXDeref(AddressSpace, Expr);
GVE = DBuilder.createGlobalVariableExpression(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasLocalLinkage(), true,
Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters,
Align);
Var->addDebugInfo(GVE);
}
DeclCache[D->getCanonicalDecl()].reset(GVE);
}
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
if (VD->hasAttr<NoDebugAttr>())
return;
llvm::TimeTraceScope TimeScope("DebugConstGlobalVariable", [&]() {
std::string Name;
llvm::raw_string_ostream OS(Name);
VD->getNameForDiagnostic(OS, getPrintingPolicy(),
/*Qualified=*/true);
return Name;
});
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
// Create the descriptor for the variable.
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit);
if (const auto *ECD = dyn_cast<EnumConstantDecl>(VD)) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
if (CGM.getCodeGenOpts().EmitCodeView) {
// If CodeView, emit enums as global variables, unless they are defined
// inside a class. We do this because MSVC doesn't emit S_CONSTANTs for
// enums in classes, and because it is difficult to attach this scope
// information to the global variable.
if (isa<RecordDecl>(ED->getDeclContext()))
return;
} else {
// If not CodeView, emit DW_TAG_enumeration_type if necessary. For
// example: for "enum { ZERO };", a DW_TAG_enumeration_type is created the
// first time `ZERO` is referenced in a function.
llvm::DIType *EDTy =
getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
assert (EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
(void)EDTy;
return;
}
}
// Do not emit separate definitions for function local consts.
if (isa<FunctionDecl>(VD->getDeclContext()))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
auto *VarD = dyn_cast<VarDecl>(VD);
if (VarD && VarD->isStaticDataMember()) {
auto *RD = cast<RecordDecl>(VarD->getDeclContext());
getDeclContextDescriptor(VarD);
// Ensure that the type is retained even though it's otherwise unreferenced.
//
// FIXME: This is probably unnecessary, since Ty should reference RD
// through its scope.
RetainedTypes.push_back(
CGM.getContext().getRecordType(RD).getAsOpaquePtr());
return;
}
llvm::DIScope *DContext = getDeclContextDescriptor(VD);
auto &GV = DeclCache[VD];
if (GV)
return;
llvm::DIExpression *InitExpr = nullptr;
if (CGM.getContext().getTypeSize(VD->getType()) <= 64) {
// FIXME: Add a representation for integer constants wider than 64 bits.
if (Init.isInt())
InitExpr =
DBuilder.createConstantValueExpression(Init.getInt().getExtValue());
else if (Init.isFloat())
InitExpr = DBuilder.createConstantValueExpression(
Init.getFloat().bitcastToAPInt().getZExtValue());
}
llvm::MDTuple *TemplateParameters = nullptr;
if (isa<VarTemplateSpecializationDecl>(VD))
if (VarD) {
llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VarD, &*Unit);
TemplateParameters = parameterNodes.get();
}
GV.reset(DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
true, true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD),
TemplateParameters, Align));
}
void CGDebugInfo::EmitExternalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
if (D->hasAttr<NoDebugAttr>())
return;
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
llvm::DIFile *Unit = getOrCreateFile(D->getLocation());
StringRef Name = D->getName();
llvm::DIType *Ty = getOrCreateType(D->getType(), Unit);
llvm::DIScope *DContext = getDeclContextDescriptor(D);
llvm::DIGlobalVariableExpression *GVE =
DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(D->getLocation()),
Ty, false, false, nullptr, nullptr, nullptr, Align);
Var->addDebugInfo(GVE);
}
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
if (!LexicalBlockStack.empty())
return LexicalBlockStack.back();
llvm::DIScope *Mod = getParentModuleOrNull(D);
return getContextDescriptor(D, Mod ? Mod : TheCU);
}
void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
return;
const NamespaceDecl *NSDecl = UD.getNominatedNamespace();
if (!NSDecl->isAnonymousNamespace() ||
CGM.getCodeGenOpts().DebugExplicitImport) {
auto Loc = UD.getLocation();
if (!Loc.isValid())
Loc = CurLoc;
DBuilder.createImportedModule(
getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
getOrCreateNamespace(NSDecl), getOrCreateFile(Loc), getLineNumber(Loc));
}
}
void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
return;
assert(UD.shadow_size() &&
"We shouldn't be codegening an invalid UsingDecl containing no decls");
// Emitting one decl is sufficient - debuggers can detect that this is an
// overloaded name & provide lookup for all the overloads.
const UsingShadowDecl &USD = **UD.shadow_begin();
// FIXME: Skip functions with undeduced auto return type for now since we
// don't currently have the plumbing for separate declarations & definitions
// of free functions and mismatched types (auto in the declaration, concrete
// return type in the definition)
if (const auto *FD = dyn_cast<FunctionDecl>(USD.getUnderlyingDecl()))
if (const auto *AT =
FD->getType()->castAs<FunctionProtoType>()->getContainedAutoType())
if (AT->getDeducedType().isNull())
return;
if (llvm::DINode *Target =
getDeclarationOrDefinition(USD.getUnderlyingDecl())) {
auto Loc = USD.getLocation();
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
getOrCreateFile(Loc), getLineNumber(Loc));
}
}
void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
if (CGM.getCodeGenOpts().getDebuggerTuning() != llvm::DebuggerKind::LLDB)
return;
if (Module *M = ID.getImportedModule()) {
auto Info = ASTSourceDescriptor(*M);
auto Loc = ID.getLocation();
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
getOrCreateModuleRef(Info, DebugTypeExtRefs), getOrCreateFile(Loc),
getLineNumber(Loc));
}
}
llvm::DIImportedEntity *
CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
return nullptr;
auto &VH = NamespaceAliasCache[&NA];
if (VH)
return cast<llvm::DIImportedEntity>(VH);
llvm::DIImportedEntity *R;
auto Loc = NA.getLocation();
if (const auto *Underlying =
dyn_cast<NamespaceAliasDecl>(NA.getAliasedNamespace()))
// This could cache & dedup here rather than relying on metadata deduping.
R = DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
EmitNamespaceAlias(*Underlying), getOrCreateFile(Loc),
getLineNumber(Loc), NA.getName());
else
R = DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
getOrCreateNamespace(cast<NamespaceDecl>(NA.getAliasedNamespace())),
getOrCreateFile(Loc), getLineNumber(Loc), NA.getName());
VH.reset(R);
return R;
}
llvm::DINamespace *
CGDebugInfo::getOrCreateNamespace(const NamespaceDecl *NSDecl) {
// Don't canonicalize the NamespaceDecl here: The DINamespace will be uniqued
// if necessary, and this way multiple declarations of the same namespace in
// different parent modules stay distinct.
auto I = NamespaceCache.find(NSDecl);
if (I != NamespaceCache.end())
return cast<llvm::DINamespace>(I->second);
llvm::DIScope *Context = getDeclContextDescriptor(NSDecl);
// Don't trust the context if it is a DIModule (see comment above).
llvm::DINamespace *NS =
DBuilder.createNameSpace(Context, NSDecl->getName(), NSDecl->isInline());
NamespaceCache[NSDecl].reset(NS);
return NS;
}
void CGDebugInfo::setDwoId(uint64_t Signature) {
assert(TheCU && "no main compile unit");
TheCU->setDWOId(Signature);
}
void CGDebugInfo::finalize() {
// Creating types might create further types - invalidating the current
// element and the size(), so don't cache/reference them.
for (size_t i = 0; i != ObjCInterfaceCache.size(); ++i) {
ObjCInterfaceCacheEntry E = ObjCInterfaceCache[i];
llvm::DIType *Ty = E.Type->getDecl()->getDefinition()
? CreateTypeDefinition(E.Type, E.Unit)
: E.Decl;
DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty);
}
// Add methods to interface.
for (const auto &P : ObjCMethodCache) {
if (P.second.empty())
continue;
QualType QTy(P.first->getTypeForDecl(), 0);
auto It = TypeCache.find(QTy.getAsOpaquePtr());
assert(It != TypeCache.end());
llvm::DICompositeType *InterfaceDecl =
cast<llvm::DICompositeType>(It->second);
auto CurElts = InterfaceDecl->getElements();
SmallVector<llvm::Metadata *, 16> EltTys(CurElts.begin(), CurElts.end());
// For DWARF v4 or earlier, only add objc_direct methods.
for (auto &SubprogramDirect : P.second)
if (CGM.getCodeGenOpts().DwarfVersion >= 5 || SubprogramDirect.getInt())
EltTys.push_back(SubprogramDirect.getPointer());
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(InterfaceDecl, Elements);
}
for (const auto &P : ReplaceMap) {
assert(P.second);
auto *Ty = cast<llvm::DIType>(P.second);
assert(Ty->isForwardDecl());
auto It = TypeCache.find(P.first);
assert(It != TypeCache.end());
assert(It->second);
DBuilder.replaceTemporary(llvm::TempDIType(Ty),
cast<llvm::DIType>(It->second));
}
for (const auto &P : FwdDeclReplaceMap) {
assert(P.second);
llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(P.second));
llvm::Metadata *Repl;
auto It = DeclCache.find(P.first);
// If there has been no definition for the declaration, call RAUW
// with ourselves, that will destroy the temporary MDNode and
// replace it with a standard one, avoiding leaking memory.
if (It == DeclCache.end())
Repl = P.second;
else
Repl = It->second;
if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(Repl))
Repl = GVE->getVariable();
DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl));
}
// We keep our own list of retained types, because we need to look
// up the final type in the type cache.
for (auto &RT : RetainedTypes)
if (auto MD = TypeCache[RT])
DBuilder.retainType(cast<llvm::DIType>(MD));
DBuilder.finalize();
}
// Don't ignore in case of explicit cast where it is referenced indirectly.
void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
if (CGM.getCodeGenOpts().hasReducedDebugInfo())
if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
DBuilder.retainType(DieTy);
}
void CGDebugInfo::EmitAndRetainType(QualType Ty) {
if (CGM.getCodeGenOpts().hasMaybeUnusedDebugInfo())
if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
DBuilder.retainType(DieTy);
}
llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
if (LexicalBlockStack.empty())
return llvm::DebugLoc();
llvm::MDNode *Scope = LexicalBlockStack.back();
return llvm::DILocation::get(CGM.getLLVMContext(), getLineNumber(Loc),
getColumnNumber(Loc), Scope);
}
llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
// Call site-related attributes are only useful in optimized programs, and
// when there's a possibility of debugging backtraces.
if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo ||
DebugKind == codegenoptions::LocTrackingOnly)
return llvm::DINode::FlagZero;
// Call site-related attributes are available in DWARF v5. Some debuggers,
// while not fully DWARF v5-compliant, may accept these attributes as if they
// were part of DWARF v4.
bool SupportsDWARFv4Ext =
CGM.getCodeGenOpts().DwarfVersion == 4 &&
(CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB ||
CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::GDB);
if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
return llvm::DINode::FlagZero;
return llvm::DINode::FlagAllCallsDescribed;
}
|
#include "constant_calculation_rule.hpp"
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "constant_mappings.hpp"
#include "expression/abstract_expression.hpp"
#include "expression/abstract_predicate_expression.hpp"
#include "expression/evaluation/expression_evaluator.hpp"
#include "expression/expression_utils.hpp"
#include "expression/value_expression.hpp"
#include "logical_query_plan/abstract_lqp_node.hpp"
#include "logical_query_plan/lqp_column_reference.hpp"
#include "logical_query_plan/predicate_node.hpp"
#include "logical_query_plan/projection_node.hpp"
#include "resolve_type.hpp"
namespace opossum {
std::string ConstantCalculationRule::name() const { return "Constant Calculation Rule"; }
void ConstantCalculationRule::apply_to(const std::shared_ptr<AbstractLQPNode>& node) const {
// We can't prune Aggregate arguments, because the operator doesn't support, e.g., `MIN(1)`, whereas it supports
// `MIN(2-1)`, since `2-1` is a column.
if (node->type == LQPNodeType::Aggregate) {
_apply_to_inputs(node);
return;
}
for (const auto& expression : node->node_expressions) {
// TODO(anybody)
// We can't prune top level expressions right now, because that breaks `SELECT MIN(1+2)...` because if we rewrite
// that to `SELECT MIN(3)...` the input to the aggregate is not a column anymore and the Aggregate operator cannot
// handle that
for (auto& argument : expression->arguments) {
_prune_expression(argument);
}
}
_apply_to_inputs(node);
}
void ConstantCalculationRule::_prune_expression(std::shared_ptr<AbstractExpression>& expression) const {
for (auto& argument : expression->arguments) {
_prune_expression(argument);
}
if (expression->arguments.empty()) return;
// Only prune a whitelisted selection of ExpressionTypes, because we can't, e.g., prune List of literals.
if (expression->type != ExpressionType::Predicate && expression->type != ExpressionType::Arithmetic &&
expression->type != ExpressionType::Logical) {
return;
}
const auto all_arguments_are_values =
std::all_of(expression->arguments.begin(), expression->arguments.end(),
[&](const auto& argument) { return argument->type == ExpressionType::Value; });
if (!all_arguments_are_values) return;
resolve_data_type(expression->data_type(), [&](const auto data_type_t) {
using ExpressionDataType = typename decltype(data_type_t)::type;
const auto result = ExpressionEvaluator{}.evaluate_expression_to_result<ExpressionDataType>(*expression);
Assert(result->is_literal(), "Expected Literal");
if (result->is_null(0)) {
expression = std::make_shared<ValueExpression>(NullValue{});
} else {
expression = std::make_shared<ValueExpression>(result->value(0));
}
});
}
} // namespace opossum
|
// Copyright (c) 2011-2013 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "splashscreen.h"
#include "clientversion.h"
#include "util.h"
#include <QPainter>
#undef loop /* ugh, remove this when the #define loop is gone from util.h */
#include <QApplication>
SplashScreen::SplashScreen(const QPixmap &pixmap, Qt::WindowFlags f) :
QSplashScreen(pixmap, f)
{
// set reference point, paddings
int paddingLeftCol2 = 230;
int paddingTopCol2 = 376;
int line1 = 0;
int line2 = 13;
int line3 = 26;
int line4 = 39;
float fontFactor = 1.0;
// define text to place
QString titleText = QString(QApplication::applicationName()).replace(QString("-testnet"), QString(""), Qt::CaseSensitive); // cut of testnet, place it as single object further down
QString versionText = QString("Version %1 ").arg(QString::fromStdString(FormatFullVersion()));
QString copyrightText1 = QChar(0xA9)+QString(" 2009-%1 ").arg(COPYRIGHT_YEAR) + QString(tr("The Bitcoin developers"));
QString copyrightText2 = QChar(0xA9)+QString(" 2011-%1 ").arg(COPYRIGHT_YEAR) + QString(tr("The Litecoin developers"));
QString copyrightText3 = QChar(0xA9)+QString(" %1 ").arg(2018) + QString(tr("Referencelinecoin Developer"));
QString font = "Arial";
// load the bitmap for writing some text over it
QPixmap newPixmap;
if(GetBoolArg("-testnet")) {
newPixmap = QPixmap(":/images/splash_testnet");
}
else {
newPixmap = QPixmap(":/images/splash");
}
QPainter pixPaint(&newPixmap);
pixPaint.setPen(QColor(70,70,70));
pixPaint.setFont(QFont(font, 9*fontFactor));
pixPaint.drawText(paddingLeftCol2,paddingTopCol2+line4,versionText);
// draw copyright stuff
pixPaint.setFont(QFont(font, 9*fontFactor));
pixPaint.drawText(paddingLeftCol2,paddingTopCol2+line1,copyrightText1);
pixPaint.drawText(paddingLeftCol2,paddingTopCol2+line2,copyrightText2);
pixPaint.drawText(paddingLeftCol2,paddingTopCol2+line3,copyrightText3);
pixPaint.end();
this->setPixmap(newPixmap);
}
|
// SPDX-License-Identifier: BSD-3-Clause
// Copyright Contributors to the OpenColorIO Project.
#include <cmath>
#include <cstring>
#include <sstream>
#include <algorithm>
#include <OpenColorIO/OpenColorIO.h>
#include "HashUtils.h"
#include "ops/exponent/ExponentOp.h"
#include "GpuShaderUtils.h"
#include "MathUtils.h"
namespace OCIO_NAMESPACE
{
namespace DefaultValues
{
const int FLOAT_DECIMALS = 7;
}
ExponentOpData::ExponentOpData()
: OpData()
{
for (unsigned i = 0; i < 4; ++i)
{
m_exp4[i] = 1.0;
}
}
ExponentOpData::ExponentOpData(const ExponentOpData & rhs)
: OpData()
{
if (this != &rhs)
{
*this = rhs;
}
}
ExponentOpData::ExponentOpData(const double * exp4)
: OpData()
{
memcpy(m_exp4, exp4, 4*sizeof(double));
}
ExponentOpData & ExponentOpData::operator = (const ExponentOpData & rhs)
{
if (this != &rhs)
{
OpData::operator=(rhs);
memcpy(m_exp4, rhs.m_exp4, sizeof(double)*4);
}
return *this;
}
bool ExponentOpData::isNoOp() const
{
return isIdentity();
}
bool ExponentOpData::isIdentity() const
{
return IsVecEqualToOne(m_exp4, 4);
}
std::string ExponentOpData::getCacheID() const
{
AutoMutex lock(m_mutex);
// Create the cacheID.
std::ostringstream cacheIDStream;
if (!getID().empty())
{
cacheIDStream << getID() << " ";
}
cacheIDStream.precision(DefaultValues::FLOAT_DECIMALS);
for (int i = 0; i < 4; ++i)
{
cacheIDStream << m_exp4[i] << " ";
}
return cacheIDStream.str();
}
void ExponentOpData::validate() const
{
}
namespace
{
class ExponentOpCPU : public OpCPU
{
public:
ExponentOpCPU(ConstExponentOpDataRcPtr exp) : OpCPU(), m_data(exp) {}
virtual ~ExponentOpCPU() {}
void apply(const void * inImg, void * outImg, long numPixels) const override;
private:
ConstExponentOpDataRcPtr m_data;
};
void ExponentOpCPU::apply(const void * inImg, void * outImg, long numPixels) const
{
const float * in = (const float *)inImg;
float * out = (float *)outImg;
const float exp[4] = { float(m_data->m_exp4[0]),
float(m_data->m_exp4[1]),
float(m_data->m_exp4[2]),
float(m_data->m_exp4[3]) };
for (long pixelIndex = 0; pixelIndex < numPixels; ++pixelIndex)
{
out[0] = powf( std::max(0.0f, in[0]), exp[0]);
out[1] = powf( std::max(0.0f, in[1]), exp[1]);
out[2] = powf( std::max(0.0f, in[2]), exp[2]);
out[3] = powf( std::max(0.0f, in[3]), exp[3]);
in += 4;
out += 4;
}
}
class ExponentOp : public Op
{
public:
ExponentOp() = delete;
ExponentOp(ExponentOp & exp) = delete;
explicit ExponentOp(const double * exp4);
explicit ExponentOp(ExponentOpDataRcPtr & exp);
virtual ~ExponentOp();
OpRcPtr clone() const override;
std::string getInfo() const override;
bool isSameType(ConstOpRcPtr & op) const override;
bool isInverse(ConstOpRcPtr & op) const override;
bool canCombineWith(ConstOpRcPtr & op) const override;
void combineWith(OpRcPtrVec & ops, ConstOpRcPtr & secondOp) const override;
std::string getCacheID() const override;
ConstOpCPURcPtr getCPUOp(bool fastLogExpPow) const override;
void extractGpuShaderInfo(GpuShaderCreatorRcPtr & shaderCreator) const override;
protected:
ConstExponentOpDataRcPtr expData() const { return DynamicPtrCast<const ExponentOpData>(data()); }
ExponentOpDataRcPtr expData() { return DynamicPtrCast<ExponentOpData>(data()); }
};
typedef OCIO_SHARED_PTR<ExponentOp> ExponentOpRcPtr;
typedef OCIO_SHARED_PTR<const ExponentOp> ConstExponentOpRcPtr;
ExponentOp::ExponentOp(const double * exp4)
: Op()
{
data().reset(new ExponentOpData(exp4));
}
ExponentOp::ExponentOp(ExponentOpDataRcPtr & exp)
: Op()
{
data() = exp;
}
OpRcPtr ExponentOp::clone() const
{
return std::make_shared<ExponentOp>(expData()->m_exp4);
}
ExponentOp::~ExponentOp()
{
}
std::string ExponentOp::getInfo() const
{
return "<ExponentOp>";
}
bool ExponentOp::isSameType(ConstOpRcPtr & op) const
{
ConstExponentOpRcPtr typedRcPtr = DynamicPtrCast<const ExponentOp>(op);
if (!typedRcPtr) return false;
return true;
}
bool ExponentOp::isInverse(ConstOpRcPtr & /*op*/) const
{
// It is simpler to handle a pair of inverses by combining them and then removing
// the identity. So we just return false here.
return false;
}
bool ExponentOp::canCombineWith(ConstOpRcPtr & op) const
{
return isSameType(op);
}
void ExponentOp::combineWith(OpRcPtrVec & ops, ConstOpRcPtr & secondOp) const
{
if (!canCombineWith(secondOp))
{
throw Exception("ExponentOp: canCombineWith must be checked "
"before calling combineWith.");
}
ConstExponentOpRcPtr typedRcPtr = DynamicPtrCast<const ExponentOp>(secondOp);
const double combined[4]
= { expData()->m_exp4[0]*typedRcPtr->expData()->m_exp4[0],
expData()->m_exp4[1]*typedRcPtr->expData()->m_exp4[1],
expData()->m_exp4[2]*typedRcPtr->expData()->m_exp4[2],
expData()->m_exp4[3]*typedRcPtr->expData()->m_exp4[3] };
if (!IsVecEqualToOne(combined, 4))
{
auto combinedOp = std::make_shared<ExponentOp>(combined);
// Combine metadata.
// TODO: May want to revisit how the metadata is set.
FormatMetadataImpl newDesc = expData()->getFormatMetadata();
newDesc.combine(typedRcPtr->expData()->getFormatMetadata());
combinedOp->expData()->getFormatMetadata() = newDesc;
ops.push_back(combinedOp);
}
}
std::string ExponentOp::getCacheID() const
{
// Create the cacheID
std::ostringstream cacheIDStream;
cacheIDStream << "<ExponentOp ";
cacheIDStream << expData()->getCacheID();
cacheIDStream << ">";
return cacheIDStream.str();
}
ConstOpCPURcPtr ExponentOp::getCPUOp(bool /*fastLogExpPow*/) const
{
return std::make_shared<ExponentOpCPU>(expData());
}
void ExponentOp::extractGpuShaderInfo(GpuShaderCreatorRcPtr & shaderCreator) const
{
GpuShaderText ss(shaderCreator->getLanguage());
ss.indent();
ss.newLine() << "";
ss.newLine() << "// Add an Exponent processing";
ss.newLine() << "";
ss.newLine() << "{";
ss.indent();
// outColor = pow(max(outColor, 0.), exp);
ss.newLine()
<< shaderCreator->getPixelName()
<< " = pow( "
<< "max( " << shaderCreator->getPixelName()
<< ", " << ss.float4Const(0.0f) << " )"
<< ", " << ss.float4Const(expData()->m_exp4[0], expData()->m_exp4[1],
expData()->m_exp4[2], expData()->m_exp4[3]) << " );";
ss.dedent();
ss.newLine() << "}";
shaderCreator->addToFunctionShaderCode(ss.string().c_str());
}
} // Anon namespace
void CreateExponentOp(OpRcPtrVec & ops,
const double(&vec4)[4],
TransformDirection direction)
{
ExponentOpDataRcPtr expData = std::make_shared<ExponentOpData>(vec4);
CreateExponentOp(ops, expData, direction);
}
void CreateExponentOp(OpRcPtrVec & ops,
ExponentOpDataRcPtr & expData,
TransformDirection direction)
{
switch (direction)
{
case TRANSFORM_DIR_FORWARD:
{
ops.push_back(std::make_shared<ExponentOp>(expData));
break;
}
case TRANSFORM_DIR_INVERSE:
{
double values[4];
for (int i = 0; i<4; ++i)
{
if (!IsScalarEqualToZero(expData->m_exp4[i]))
{
values[i] = 1.0 / expData->m_exp4[i];
}
else
{
throw Exception("Cannot apply ExponentOp op, Cannot apply 0.0 exponent in the inverse.");
}
}
ExponentOpDataRcPtr expInv = std::make_shared<ExponentOpData>(values);
ops.push_back(std::make_shared<ExponentOp>(expInv));
break;
}
}
}
void CreateExponentTransform(GroupTransformRcPtr & group, ConstOpRcPtr & op)
{
auto exp = DynamicPtrCast<const ExponentOp>(op);
if (!exp)
{
throw Exception("CreateExponentTransform: op has to be a ExponentOp");
}
auto expTransform = ExponentTransform::Create();
auto expData = DynamicPtrCast<const ExponentOpData>(op->data());
auto & formatMetadata = expTransform->getFormatMetadata();
auto & metadata = dynamic_cast<FormatMetadataImpl &>(formatMetadata);
metadata = expData->getFormatMetadata();
expTransform->setValue(expData->m_exp4);
group->appendTransform(expTransform);
}
} // namespace OCIO_NAMESPACE
|
/*
* Copyright 2002-2011, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#include "legacy_drivers.h"
#include <dirent.h>
#include <errno.h>
#include <new>
#include <stdio.h>
#include <FindDirectory.h>
#include <image.h>
#include <NodeMonitor.h>
#include <boot_device.h>
#include <boot/kernel_args.h>
#include <elf.h>
#include <find_directory_private.h>
#include <fs/devfs.h>
#include <fs/KPath.h>
#include <fs/node_monitor.h>
#include <Notifications.h>
#include <safemode.h>
#include <util/DoublyLinkedList.h>
#include <util/OpenHashTable.h>
#include <util/Stack.h>
#include <vfs.h>
#include "AbstractModuleDevice.h"
#include "devfs_private.h"
//#define TRACE_LEGACY_DRIVERS
#ifdef TRACE_LEGACY_DRIVERS
# define TRACE(x) dprintf x
#else
# define TRACE(x)
#endif
#define DRIVER_HASH_SIZE 16
namespace {
struct legacy_driver;
class LegacyDevice : public AbstractModuleDevice,
public DoublyLinkedListLinkImpl<LegacyDevice> {
public:
LegacyDevice(legacy_driver* driver,
const char* path, device_hooks* hooks);
virtual ~LegacyDevice();
status_t InitCheck() const;
virtual status_t InitDevice();
virtual void UninitDevice();
virtual void Removed();
void SetHooks(device_hooks* hooks);
legacy_driver* Driver() const { return fDriver; }
const char* Path() const { return fPath; }
device_hooks* Hooks() const { return fHooks; }
virtual status_t Open(const char* path, int openMode,
void** _cookie);
virtual status_t Select(void* cookie, uint8 event, selectsync* sync);
bool Republished() const { return fRepublished; }
void SetRepublished(bool republished)
{ fRepublished = republished; }
void SetRemovedFromParent(bool removed)
{ fRemovedFromParent = removed; }
private:
legacy_driver* fDriver;
const char* fPath;
device_hooks* fHooks;
bool fRepublished;
bool fRemovedFromParent;
};
typedef DoublyLinkedList<LegacyDevice> DeviceList;
struct legacy_driver {
legacy_driver* next;
const char* path;
const char* name;
dev_t device;
ino_t node;
timespec last_modified;
image_id image;
uint32 devices_used;
bool binary_updated;
int32 priority;
DeviceList devices;
// driver image information
int32 api_version;
device_hooks* (*find_device)(const char *);
const char** (*publish_devices)(void);
status_t (*uninit_driver)(void);
status_t (*uninit_hardware)(void);
};
enum driver_event_type {
kAddDriver,
kRemoveDriver,
kAddWatcher,
kRemoveWatcher
};
struct driver_event : DoublyLinkedListLinkImpl<driver_event> {
driver_event(driver_event_type _type) : type(_type) {}
struct ref {
dev_t device;
ino_t node;
};
driver_event_type type;
union {
char path[B_PATH_NAME_LENGTH];
ref node;
};
};
typedef DoublyLinkedList<driver_event> DriverEventList;
struct driver_entry : DoublyLinkedListLinkImpl<driver_entry> {
char* path;
dev_t device;
ino_t node;
int32 busses;
};
typedef DoublyLinkedList<driver_entry> DriverEntryList;
struct node_entry : DoublyLinkedListLinkImpl<node_entry> {
};
typedef DoublyLinkedList<node_entry> NodeList;
struct directory_node_entry {
directory_node_entry* hash_link;
ino_t node;
};
struct DirectoryNodeHashDefinition {
typedef ino_t* KeyType;
typedef directory_node_entry ValueType;
size_t HashKey(ino_t* key) const
{ return _Hash(*key); }
size_t Hash(directory_node_entry* entry) const
{ return _Hash(entry->node); }
bool Compare(ino_t* key, directory_node_entry* entry) const
{ return *key == entry->node; }
directory_node_entry*&
GetLink(directory_node_entry* entry) const
{ return entry->hash_link; }
uint32 _Hash(ino_t node) const
{ return (uint32)(node >> 32) + (uint32)node; }
};
typedef BOpenHashTable<DirectoryNodeHashDefinition> DirectoryNodeHash;
class DirectoryIterator {
public:
DirectoryIterator(const char *path,
const char *subPath = NULL, bool recursive = false);
~DirectoryIterator();
void SetTo(const char *path, const char *subPath = NULL,
bool recursive = false);
status_t GetNext(KPath &path, struct stat &stat);
const char* CurrentName() const { return fCurrentName; }
void Unset();
void AddPath(const char *path, const char *subPath = NULL);
private:
Stack<KPath*> fPaths;
bool fRecursive;
DIR* fDirectory;
KPath* fBasePath;
const char* fCurrentName;
};
class DirectoryWatcher : public NotificationListener {
public:
DirectoryWatcher();
virtual ~DirectoryWatcher();
virtual void EventOccurred(NotificationService& service,
const KMessage* event);
};
class DriverWatcher : public NotificationListener {
public:
DriverWatcher();
virtual ~DriverWatcher();
virtual void EventOccurred(NotificationService& service,
const KMessage* event);
};
struct DriverHash {
typedef const char* KeyType;
typedef legacy_driver ValueType;
size_t HashKey(KeyType key) const
{
return hash_hash_string(key);
}
size_t Hash(ValueType* driver) const
{
return HashKey(driver->name);
}
bool Compare(KeyType key, ValueType* driver) const
{
return strcmp(driver->name, key) == 0;
}
ValueType*& GetLink(ValueType* value) const
{
return value->next;
}
};
typedef BOpenHashTable<DriverHash> DriverTable;
} // unnamed namespace
static status_t unload_driver(legacy_driver *driver);
static status_t load_driver(legacy_driver *driver);
static DriverWatcher sDriverWatcher;
static sem_id sDriverWatcherEventSemaphore;
static DriverEventList sDriverEvents;
static mutex sDriverEventsLock = MUTEX_INITIALIZER("driver events");
// inner lock, protects the sDriverEvents list only
static DirectoryWatcher sDirectoryWatcher;
static DirectoryNodeHash sDirectoryNodeHash;
static recursive_lock sLock;
static bool sWatching;
static DriverTable* sDriverHash;
// #pragma mark - driver private
/*! Collects all published devices of a driver, compares them to what the
driver would publish now, and then publishes/unpublishes the devices
as needed.
If the driver does not publish any devices anymore, it is unloaded.
*/
static status_t
republish_driver(legacy_driver* driver)
{
if (driver->image < 0) {
// The driver is not yet loaded - go through the normal load procedure
return load_driver(driver);
}
// mark all devices
DeviceList::Iterator iterator = driver->devices.GetIterator();
while (LegacyDevice* device = iterator.Next()) {
device->SetRepublished(false);
}
// now ask the driver for it's currently published devices
const char** devicePaths = driver->publish_devices();
int32 exported = 0;
for (; devicePaths != NULL && devicePaths[0]; devicePaths++) {
LegacyDevice* device;
iterator = driver->devices.GetIterator();
while ((device = iterator.Next()) != NULL) {
if (!strncmp(device->Path(), devicePaths[0], B_PATH_NAME_LENGTH)) {
// mark device as republished
device->SetRepublished(true);
exported++;
break;
}
}
device_hooks* hooks = driver->find_device(devicePaths[0]);
if (hooks == NULL)
continue;
if (device != NULL) {
// update hooks
device->SetHooks(hooks);
continue;
}
// the device was not present before -> publish it now
TRACE(("devfs: publishing new device \"%s\"\n", devicePaths[0]));
device = new(std::nothrow) LegacyDevice(driver, devicePaths[0], hooks);
if (device != NULL && device->InitCheck() == B_OK
&& devfs_publish_device(devicePaths[0], device) == B_OK) {
driver->devices.Add(device);
exported++;
} else
delete device;
}
// remove all devices that weren't republished
iterator = driver->devices.GetIterator();
while (LegacyDevice* device = iterator.Next()) {
if (device->Republished())
continue;
TRACE(("devfs: unpublishing no more present \"%s\"\n", device->Path()));
iterator.Remove();
device->SetRemovedFromParent(true);
devfs_unpublish_device(device, true);
}
if (exported == 0 && driver->devices_used == 0) {
TRACE(("devfs: driver \"%s\" does not publish any more nodes and is "
"unloaded\n", driver->path));
unload_driver(driver);
}
return B_OK;
}
static status_t
load_driver(legacy_driver *driver)
{
status_t (*init_hardware)(void);
status_t (*init_driver)(void);
status_t status;
driver->binary_updated = false;
// load the module
image_id image = driver->image;
if (image < 0) {
image = load_kernel_add_on(driver->path);
if (image < 0)
return image;
}
// For a valid device driver the following exports are required
int32 *apiVersion;
if (get_image_symbol(image, "api_version", B_SYMBOL_TYPE_DATA,
(void **)&apiVersion) == B_OK) {
#if B_CUR_DRIVER_API_VERSION != 2
// just in case someone decides to bump up the api version
#error Add checks here for new vs old api version!
#endif
if (*apiVersion > B_CUR_DRIVER_API_VERSION) {
dprintf("devfs: \"%s\" api_version %" B_PRId32 " not handled\n",
driver->name, *apiVersion);
status = B_BAD_VALUE;
goto error1;
}
if (*apiVersion < 1) {
dprintf("devfs: \"%s\" api_version invalid\n", driver->name);
status = B_BAD_VALUE;
goto error1;
}
driver->api_version = *apiVersion;
} else
dprintf("devfs: \"%s\" api_version missing\n", driver->name);
if (get_image_symbol(image, "publish_devices", B_SYMBOL_TYPE_TEXT,
(void **)&driver->publish_devices) != B_OK
|| get_image_symbol(image, "find_device", B_SYMBOL_TYPE_TEXT,
(void **)&driver->find_device) != B_OK) {
dprintf("devfs: \"%s\" mandatory driver symbol(s) missing!\n",
driver->name);
status = B_BAD_VALUE;
goto error1;
}
// Init the driver
if (get_image_symbol(image, "init_hardware", B_SYMBOL_TYPE_TEXT,
(void **)&init_hardware) == B_OK
&& (status = init_hardware()) != B_OK) {
TRACE(("%s: init_hardware() failed: %s\n", driver->name,
strerror(status)));
status = ENXIO;
goto error1;
}
if (get_image_symbol(image, "init_driver", B_SYMBOL_TYPE_TEXT,
(void **)&init_driver) == B_OK
&& (status = init_driver()) != B_OK) {
TRACE(("%s: init_driver() failed: %s\n", driver->name,
strerror(status)));
status = ENXIO;
goto error2;
}
// resolve and cache those for the driver unload code
if (get_image_symbol(image, "uninit_driver", B_SYMBOL_TYPE_TEXT,
(void **)&driver->uninit_driver) != B_OK)
driver->uninit_driver = NULL;
if (get_image_symbol(image, "uninit_hardware", B_SYMBOL_TYPE_TEXT,
(void **)&driver->uninit_hardware) != B_OK)
driver->uninit_hardware = NULL;
// The driver has successfully been initialized, now we can
// finally publish its device entries
driver->image = image;
return republish_driver(driver);
error2:
if (driver->uninit_hardware)
driver->uninit_hardware();
error1:
if (driver->image < 0) {
unload_kernel_add_on(image);
driver->image = status;
}
return status;
}
static status_t
unload_driver(legacy_driver *driver)
{
if (driver->image < 0) {
// driver is not currently loaded
return B_NO_INIT;
}
if (driver->uninit_driver)
driver->uninit_driver();
if (driver->uninit_hardware)
driver->uninit_hardware();
unload_kernel_add_on(driver->image);
driver->image = -1;
driver->binary_updated = false;
driver->find_device = NULL;
driver->publish_devices = NULL;
driver->uninit_driver = NULL;
driver->uninit_hardware = NULL;
return B_OK;
}
/*! Unpublishes all devices belonging to the \a driver. */
static void
unpublish_driver(legacy_driver *driver)
{
while (LegacyDevice* device = driver->devices.RemoveHead()) {
device->SetRemovedFromParent(true);
devfs_unpublish_device(device, true);
}
}
static void
change_driver_watcher(dev_t device, ino_t node, bool add)
{
if (device == -1)
return;
driver_event* event = new (std::nothrow) driver_event(
add ? kAddWatcher : kRemoveWatcher);
if (event == NULL)
return;
event->node.device = device;
event->node.node = node;
MutexLocker _(sDriverEventsLock);
sDriverEvents.Add(event);
if(sDriverEvents.Count() == 1) {
release_sem(sDriverWatcherEventSemaphore);
}
}
static int32
get_priority(const char* path)
{
// TODO: would it be better to initialize a static structure here
// using find_directory()?
const directory_which whichPath[] = {
B_BEOS_DIRECTORY,
B_SYSTEM_NONPACKAGED_DIRECTORY,
B_USER_DIRECTORY
};
KPath pathBuffer;
for (uint32 index = 0; index < sizeof(whichPath) / sizeof(whichPath[0]);
index++) {
if (__find_directory(whichPath[index], gBootDevice, false,
pathBuffer.LockBuffer(), pathBuffer.BufferSize()) == B_OK) {
pathBuffer.UnlockBuffer();
if (!strncmp(pathBuffer.Path(), path, pathBuffer.BufferSize()))
return index;
} else
pathBuffer.UnlockBuffer();
}
return -1;
}
static const char *
get_leaf(const char *path)
{
const char *name = strrchr(path, '/');
if (name == NULL)
return path;
return name + 1;
}
static legacy_driver *
find_driver(dev_t device, ino_t node)
{
DriverTable::Iterator iterator(sDriverHash);
while (iterator.HasNext()) {
legacy_driver *driver = iterator.Next();
if (driver->device == device && driver->node == node)
return driver;
}
return NULL;
}
static status_t
add_driver(const char *path, image_id image)
{
// Check if we already know this driver
struct stat stat;
if (image >= 0) {
// The image ID should be a small number and hopefully the boot FS
// doesn't use small negative values -- if it is inode based, we should
// be relatively safe.
stat.st_dev = -1;
stat.st_ino = -1;
} else {
if (::stat(path, &stat) != 0)
return errno;
}
int32 priority = get_priority(path);
RecursiveLocker _(sLock);
legacy_driver *driver = sDriverHash->Lookup(get_leaf(path));
if (driver != NULL) {
// we know this driver
if (strcmp(driver->path, path) != 0) {
// TODO: do properly, but for now we just update the path if it
// isn't the same anymore so rescanning of drivers will work in
// case this driver was loaded so early that it has a boot module
// path and not a proper driver path
free((char*)driver->path);
driver->path = strdup(path);
driver->name = get_leaf(driver->path);
driver->binary_updated = true;
}
// TODO: check if this driver is a different one and has precendence
// (ie. common supersedes system).
//dprintf("new driver has priority %ld, old %ld\n", priority, driver->priority);
if (priority >= driver->priority) {
driver->binary_updated = true;
return B_OK;
}
// TODO: test for changes here and/or via node monitoring and reload
// the driver if necessary
if (driver->image < B_OK)
return driver->image;
return B_OK;
}
// we don't know this driver, create a new entry for it
driver = (legacy_driver *)malloc(sizeof(legacy_driver));
if (driver == NULL)
return B_NO_MEMORY;
driver->path = strdup(path);
if (driver->path == NULL) {
free(driver);
return B_NO_MEMORY;
}
driver->name = get_leaf(driver->path);
driver->device = stat.st_dev;
driver->node = stat.st_ino;
driver->image = image;
driver->last_modified = stat.st_mtim;
driver->devices_used = 0;
driver->binary_updated = false;
driver->priority = priority;
driver->api_version = 1;
driver->find_device = NULL;
driver->publish_devices = NULL;
driver->uninit_driver = NULL;
driver->uninit_hardware = NULL;
new(&driver->devices) DeviceList;
sDriverHash->Insert(driver);
if (stat.st_dev > 0)
change_driver_watcher(stat.st_dev, stat.st_ino, true);
// Even if loading the driver fails - its entry will stay with us
// so that we don't have to go through it again
return load_driver(driver);
}
/*! This is no longer part of the public kernel API, so we just export the
symbol
*/
extern "C" status_t load_driver_symbols(const char *driverName);
status_t
load_driver_symbols(const char *driverName)
{
// This is done globally for the whole kernel via the settings file.
// We don't have to do anything here.
return B_OK;
}
static status_t
reload_driver(legacy_driver *driver)
{
dprintf("devfs: reload driver \"%s\" (%" B_PRIdDEV ", %" B_PRIdINO ")\n",
driver->name, driver->device, driver->node);
unload_driver(driver);
struct stat stat;
if (::stat(driver->path, &stat) == 0
&& (stat.st_dev != driver->device || stat.st_ino != driver->node)) {
// The driver file has been changed, so we need to update its listener
change_driver_watcher(driver->device, driver->node, false);
driver->device = stat.st_dev;
driver->node = stat.st_ino;
change_driver_watcher(driver->device, driver->node, true);
}
status_t status = load_driver(driver);
if (status != B_OK)
unpublish_driver(driver);
return status;
}
static status_t handle_driver_events(void *)
{
while (true) {
acquire_sem(sDriverWatcherEventSemaphore);
while(true) {
MutexLocker eventLocker(sDriverEventsLock);
driver_event* event = sDriverEvents.RemoveHead();
if (event == NULL)
break;
eventLocker.Unlock();
TRACE(("driver event %p, type %d\n", event, event->type));
switch (event->type) {
case kAddDriver:
{
// Add new drivers
RecursiveLocker locker(sLock);
TRACE((" add driver %p\n", event->path));
legacy_driver* driver = sDriverHash->Lookup(
get_leaf(event->path));
if (driver == NULL)
legacy_driver_add(event->path);
else if (get_priority(event->path) >= driver->priority)
driver->binary_updated = true;
break;
}
case kRemoveDriver:
{
// Mark removed drivers as updated
RecursiveLocker locker(sLock);
TRACE((" remove driver %p\n", event->path));
legacy_driver* driver = sDriverHash->Lookup(
get_leaf(event->path));
if (driver != NULL
&& get_priority(event->path) >= driver->priority)
driver->binary_updated = true;
break;
}
case kAddWatcher:
TRACE((" add watcher %ld:%lld\n", event->node.device,
event->node.node));
add_node_listener(event->node.device, event->node.node,
B_WATCH_STAT | B_WATCH_NAME, sDriverWatcher);
break;
case kRemoveWatcher:
TRACE((" remove watcher %ld:%lld\n", event->node.device,
event->node.node));
remove_node_listener(event->node.device, event->node.node,
sDriverWatcher);
break;
}
delete event;
}
// Reload updated drivers
RecursiveLocker locker(sLock);
DriverTable::Iterator iterator(sDriverHash);
while (iterator.HasNext()) {
legacy_driver *driver = iterator.Next();
if (!driver->binary_updated || driver->devices_used != 0)
continue;
// try to reload the driver
reload_driver(driver);
}
}
}
// #pragma mark - DriverWatcher
DriverWatcher::DriverWatcher()
{
}
DriverWatcher::~DriverWatcher()
{
}
void
DriverWatcher::EventOccurred(NotificationService& service,
const KMessage* event)
{
int32 opcode = event->GetInt32("opcode", -1);
if (opcode != B_STAT_CHANGED
|| (event->GetInt32("fields", 0) & B_STAT_MODIFICATION_TIME) == 0)
return;
RecursiveLocker locker(sLock);
legacy_driver* driver = find_driver(event->GetInt32("device", -1),
event->GetInt64("node", 0));
if (driver == NULL)
return;
driver->binary_updated = true;
if (driver->devices_used == 0) {
// trigger a reload of the driver
release_sem(sDriverWatcherEventSemaphore);
} else {
// driver is in use right now
dprintf("devfs: changed driver \"%s\" is still in use\n", driver->name);
}
}
static void
dump_driver(legacy_driver* driver)
{
kprintf("DEVFS DRIVER: %p\n", driver);
kprintf(" name: %s\n", driver->name);
kprintf(" path: %s\n", driver->path);
kprintf(" image: %" B_PRId32 "\n", driver->image);
kprintf(" device: %" B_PRIdDEV "\n", driver->device);
kprintf(" node: %" B_PRIdINO "\n", driver->node);
kprintf(" last modified: %" B_PRIdTIME ".%ld\n", driver->last_modified.tv_sec,
driver->last_modified.tv_nsec);
kprintf(" devs used: %" B_PRIu32 "\n", driver->devices_used);
kprintf(" devs published: %" B_PRId32 "\n", driver->devices.Count());
kprintf(" binary updated: %d\n", driver->binary_updated);
kprintf(" priority: %" B_PRId32 "\n", driver->priority);
kprintf(" api version: %" B_PRId32 "\n", driver->api_version);
kprintf(" hooks: find_device %p, publish_devices %p\n"
" uninit_driver %p, uninit_hardware %p\n",
driver->find_device, driver->publish_devices, driver->uninit_driver,
driver->uninit_hardware);
}
static int
dump_device(int argc, char** argv)
{
if (argc < 2 || !strcmp(argv[1], "--help")) {
kprintf("usage: %s [device]\n", argv[0]);
return 0;
}
LegacyDevice* device = (LegacyDevice*)parse_expression(argv[1]);
kprintf("LEGACY DEVICE: %p\n", device);
kprintf(" path: %s\n", device->Path());
kprintf(" hooks: %p\n", device->Hooks());
device_hooks* hooks = device->Hooks();
kprintf(" close() %p\n", hooks->close);
kprintf(" free() %p\n", hooks->free);
kprintf(" control() %p\n", hooks->control);
kprintf(" read() %p\n", hooks->read);
kprintf(" write() %p\n", hooks->write);
kprintf(" select() %p\n", hooks->select);
kprintf(" deselect() %p\n", hooks->deselect);
dump_driver(device->Driver());
return 0;
}
static int
dump_driver(int argc, char** argv)
{
if (argc < 2) {
// print list of all drivers
kprintf("address image used publ. pri name\n");
DriverTable::Iterator iterator(sDriverHash);
while (iterator.HasNext()) {
legacy_driver* driver = iterator.Next();
kprintf("%p %5" B_PRId32 " %3" B_PRIu32 " %5" B_PRId32 " %c "
"%3" B_PRId32 " %s\n", driver,
driver->image < 0 ? -1 : driver->image,
driver->devices_used, driver->devices.Count(),
driver->binary_updated ? 'U' : ' ', driver->priority,
driver->name);
}
return 0;
}
if (!strcmp(argv[1], "--help")) {
kprintf("usage: %s [name]\n", argv[0]);
return 0;
}
legacy_driver* driver = sDriverHash->Lookup(argv[1]);
if (driver == NULL) {
kprintf("Driver named \"%s\" not found.\n", argv[1]);
return 0;
}
dump_driver(driver);
return 0;
}
// #pragma mark -
DirectoryIterator::DirectoryIterator(const char* path, const char* subPath,
bool recursive)
:
fDirectory(NULL),
fBasePath(NULL),
fCurrentName(NULL)
{
SetTo(path, subPath, recursive);
}
DirectoryIterator::~DirectoryIterator()
{
Unset();
}
void
DirectoryIterator::SetTo(const char* path, const char* subPath, bool recursive)
{
Unset();
fRecursive = recursive;
if (path == NULL) {
// add default paths
const directory_which whichPath[] = {
B_USER_NONPACKAGED_ADDONS_DIRECTORY,
B_USER_ADDONS_DIRECTORY,
B_SYSTEM_NONPACKAGED_ADDONS_DIRECTORY,
B_BEOS_ADDONS_DIRECTORY
};
KPath pathBuffer;
bool disableUserAddOns = get_safemode_boolean(
B_SAFEMODE_DISABLE_USER_ADD_ONS, false);
for (uint32 i = 0; i < sizeof(whichPath) / sizeof(whichPath[0]); i++) {
if (i < 2 && disableUserAddOns)
continue;
if (__find_directory(whichPath[i], gBootDevice, true,
pathBuffer.LockBuffer(), pathBuffer.BufferSize()) == B_OK) {
pathBuffer.UnlockBuffer();
pathBuffer.Append("kernel");
AddPath(pathBuffer.Path(), subPath);
} else
pathBuffer.UnlockBuffer();
}
} else
AddPath(path, subPath);
}
status_t
DirectoryIterator::GetNext(KPath& path, struct stat& stat)
{
next_directory:
while (fDirectory == NULL) {
delete fBasePath;
fBasePath = NULL;
if (!fPaths.Pop(&fBasePath))
return B_ENTRY_NOT_FOUND;
fDirectory = opendir(fBasePath->Path());
}
next_entry:
struct dirent* dirent = readdir(fDirectory);
if (dirent == NULL) {
// get over to next directory on the stack
closedir(fDirectory);
fDirectory = NULL;
goto next_directory;
}
if (!strcmp(dirent->d_name, "..") || !strcmp(dirent->d_name, "."))
goto next_entry;
fCurrentName = dirent->d_name;
path.SetTo(fBasePath->Path());
path.Append(fCurrentName);
if (::stat(path.Path(), &stat) != 0)
goto next_entry;
if (S_ISDIR(stat.st_mode) && fRecursive) {
KPath *nextPath = new(nothrow) KPath(path);
if (!nextPath)
return B_NO_MEMORY;
if (fPaths.Push(nextPath) != B_OK)
return B_NO_MEMORY;
goto next_entry;
}
return B_OK;
}
void
DirectoryIterator::Unset()
{
if (fDirectory != NULL) {
closedir(fDirectory);
fDirectory = NULL;
}
delete fBasePath;
fBasePath = NULL;
KPath *path;
while (fPaths.Pop(&path))
delete path;
}
void
DirectoryIterator::AddPath(const char* basePath, const char* subPath)
{
KPath *path = new(nothrow) KPath(basePath);
if (!path)
panic("out of memory");
if (subPath != NULL)
path->Append(subPath);
fPaths.Push(path);
}
// #pragma mark -
DirectoryWatcher::DirectoryWatcher()
{
}
DirectoryWatcher::~DirectoryWatcher()
{
}
void
DirectoryWatcher::EventOccurred(NotificationService& service,
const KMessage* event)
{
int32 opcode = event->GetInt32("opcode", -1);
dev_t device = event->GetInt32("device", -1);
ino_t directory = event->GetInt64("directory", -1);
const char *name = event->GetString("name", NULL);
if (opcode == B_ENTRY_MOVED) {
// Determine whether it's a move within, out of, or into one
// of our watched directories.
ino_t from = event->GetInt64("from directory", -1);
ino_t to = event->GetInt64("to directory", -1);
if (sDirectoryNodeHash.Lookup(&from) == NULL) {
directory = to;
opcode = B_ENTRY_CREATED;
} else if (sDirectoryNodeHash.Lookup(&to) == NULL) {
directory = from;
opcode = B_ENTRY_REMOVED;
} else {
// Move within, don't do anything for now
// TODO: adjust driver priority if necessary
return;
}
}
KPath path(B_PATH_NAME_LENGTH + 1);
if (path.InitCheck() != B_OK || vfs_entry_ref_to_path(device, directory,
name, true, path.LockBuffer(), path.BufferSize()) != B_OK)
return;
path.UnlockBuffer();
dprintf("driver \"%s\" %s\n", path.Leaf(),
opcode == B_ENTRY_CREATED ? "added" : "removed");
driver_event* driverEvent = new(std::nothrow) driver_event(
opcode == B_ENTRY_CREATED ? kAddDriver : kRemoveDriver);
if (driverEvent == NULL)
return;
strlcpy(driverEvent->path, path.Path(), sizeof(driverEvent->path));
MutexLocker _(sDriverEventsLock);
sDriverEvents.Add(driverEvent);
if(sDriverEvents.Count() == 1) {
release_sem(sDriverWatcherEventSemaphore);
}
}
// #pragma mark -
static void
start_watching(const char *base, const char *sub)
{
KPath path(base);
path.Append(sub);
// TODO: create missing directories?
struct stat stat;
if (::stat(path.Path(), &stat) != 0)
return;
add_node_listener(stat.st_dev, stat.st_ino, B_WATCH_DIRECTORY,
sDirectoryWatcher);
directory_node_entry *entry = new(std::nothrow) directory_node_entry;
if (entry != NULL) {
entry->node = stat.st_ino;
sDirectoryNodeHash.Insert(entry);
}
}
static struct driver_entry*
new_driver_entry(const char* path, dev_t device, ino_t node)
{
driver_entry* entry = (driver_entry*)malloc(sizeof(driver_entry));
if (entry == NULL)
return NULL;
entry->path = strdup(path);
if (entry->path == NULL) {
free(entry);
return NULL;
}
entry->device = device;
entry->node = node;
entry->busses = 0;
return entry;
}
/*! Iterates over the given list and tries to load all drivers in that list.
The list is emptied and freed during the traversal.
*/
static status_t
try_drivers(DriverEntryList& list)
{
while (true) {
driver_entry* entry = list.RemoveHead();
if (entry == NULL)
break;
image_id image = load_kernel_add_on(entry->path);
if (image >= 0) {
// check if it's an old-style driver
if (legacy_driver_add(entry->path) == B_OK) {
// we have a driver
dprintf("loaded driver %s\n", entry->path);
}
unload_kernel_add_on(image);
}
free(entry->path);
free(entry);
}
return B_OK;
}
static status_t
probe_for_drivers(const char *type)
{
TRACE(("probe_for_drivers(type = %s)\n", type));
if (gBootDevice < 0)
return B_OK;
DriverEntryList drivers;
// build list of potential drivers for that type
DirectoryIterator iterator(NULL, type, false);
struct stat stat;
KPath path;
while (iterator.GetNext(path, stat) == B_OK) {
if (S_ISDIR(stat.st_mode)) {
add_node_listener(stat.st_dev, stat.st_ino, B_WATCH_DIRECTORY,
sDirectoryWatcher);
directory_node_entry *entry
= new(std::nothrow) directory_node_entry;
if (entry != NULL) {
entry->node = stat.st_ino;
sDirectoryNodeHash.Insert(entry);
}
// We need to make sure that drivers in ie. "audio/raw/" can
// be found as well - therefore, we must make sure that "audio"
// exists on /dev.
size_t length = strlen("drivers/dev");
if (strncmp(type, "drivers/dev", length))
continue;
path.SetTo(type);
path.Append(iterator.CurrentName());
devfs_publish_directory(path.Path() + length + 1);
continue;
}
driver_entry *entry = new_driver_entry(path.Path(), stat.st_dev,
stat.st_ino);
if (entry == NULL)
return B_NO_MEMORY;
TRACE(("found potential driver: %s\n", path.Path()));
drivers.Add(entry);
}
if (drivers.IsEmpty())
return B_OK;
// ToDo: do something with the remaining drivers... :)
try_drivers(drivers);
return B_OK;
}
// #pragma mark - LegacyDevice
LegacyDevice::LegacyDevice(legacy_driver* driver, const char* path,
device_hooks* hooks)
:
fDriver(driver),
fRepublished(true),
fRemovedFromParent(false)
{
fDeviceModule = (device_module_info*)malloc(sizeof(device_module_info));
if (fDeviceModule != NULL)
memset(fDeviceModule, 0, sizeof(device_module_info));
fDeviceData = this;
fPath = strdup(path);
SetHooks(hooks);
}
LegacyDevice::~LegacyDevice()
{
free(fDeviceModule);
free((char*)fPath);
}
status_t
LegacyDevice::InitCheck() const
{
return fDeviceModule != NULL && fPath != NULL ? B_OK : B_NO_MEMORY;
}
status_t
LegacyDevice::InitDevice()
{
RecursiveLocker _(sLock);
if (fInitialized++ > 0)
return B_OK;
if (fDriver != NULL && fDriver->devices_used == 0
&& (fDriver->image < 0 || fDriver->binary_updated)) {
status_t status = reload_driver(fDriver);
if (status < B_OK)
return status;
}
if (fDriver != NULL)
fDriver->devices_used++;
return B_OK;
}
void
LegacyDevice::UninitDevice()
{
RecursiveLocker _(sLock);
if (fInitialized-- > 1)
return;
if (fDriver != NULL) {
if (--fDriver->devices_used == 0 && fDriver->devices.IsEmpty())
unload_driver(fDriver);
fDriver = NULL;
}
}
void
LegacyDevice::Removed()
{
RecursiveLocker _(sLock);
if (!fRemovedFromParent && fDriver != NULL)
fDriver->devices.Remove(this);
delete this;
}
void
LegacyDevice::SetHooks(device_hooks* hooks)
{
// TODO: setup compatibility layer!
fHooks = hooks;
fDeviceModule->close = hooks->close;
fDeviceModule->free = hooks->free;
fDeviceModule->control = hooks->control;
fDeviceModule->read = hooks->read;
fDeviceModule->write = hooks->write;
if (fDriver == NULL || fDriver->api_version >= 2) {
// According to Be newsletter, vol II, issue 36,
// version 2 added readv/writev, which we don't support, but also
// select/deselect.
if (hooks->select != NULL) {
// Note we set the module's select to a non-null value to indicate
// that we have select. HasSelect() will therefore return the
// correct answer. As Select() is virtual our compatibility
// version below is going to be called though, that redirects to
// the proper select hook, so it is ok to set it to an invalid
// address here.
fDeviceModule->select = (status_t (*)(void*, uint8, selectsync*))~0;
}
fDeviceModule->deselect = hooks->deselect;
}
}
status_t
LegacyDevice::Open(const char* path, int openMode, void** _cookie)
{
return Hooks()->open(path, openMode, _cookie);
}
status_t
LegacyDevice::Select(void* cookie, uint8 event, selectsync* sync)
{
return Hooks()->select(cookie, event, 0, sync);
}
// #pragma mark - kernel private API
extern "C" void
legacy_driver_add_preloaded(kernel_args* args)
{
// NOTE: This function does not exit in case of error, since it
// needs to unload the images then. Also the return code of
// the path operations is kept separate from the add_driver()
// success, so that even if add_driver() fails for one driver, it
// is still tried for the other drivers.
// NOTE: The initialization success of the path objects is implicitely
// checked by the immediately following functions.
KPath basePath;
status_t status = __find_directory(B_BEOS_ADDONS_DIRECTORY,
gBootDevice, false, basePath.LockBuffer(), basePath.BufferSize());
if (status != B_OK) {
dprintf("legacy_driver_add_preloaded: find_directory() failed: "
"%s\n", strerror(status));
}
basePath.UnlockBuffer();
if (status == B_OK)
status = basePath.Append("kernel");
if (status != B_OK) {
dprintf("legacy_driver_add_preloaded: constructing base driver "
"path failed: %s\n", strerror(status));
return;
}
struct preloaded_image* image;
for (image = args->preloaded_images; image != NULL; image = image->next) {
if (image->is_module || image->id < 0)
continue;
KPath imagePath(basePath);
status = imagePath.Append(image->name);
// try to add the driver
TRACE(("legacy_driver_add_preloaded: adding driver %s\n",
imagePath.Path()));
if (status == B_OK)
status = add_driver(imagePath.Path(), image->id);
if (status != B_OK) {
dprintf("legacy_driver_add_preloaded: Failed to add \"%s\": %s\n",
(char *)image->name, strerror(status));
unload_kernel_add_on(image->id);
}
}
}
extern "C" status_t
legacy_driver_add(const char* path)
{
return add_driver(path, -1);
}
extern "C" status_t
legacy_driver_publish(const char *path, device_hooks *hooks)
{
// we don't have a driver, just publish the hooks
LegacyDevice* device = new(std::nothrow) LegacyDevice(NULL, path, hooks);
if (device == NULL)
return B_NO_MEMORY;
status_t status = device->InitCheck();
if (status == B_OK)
status = devfs_publish_device(path, device);
if (status != B_OK)
delete device;
return status;
}
extern "C" status_t
legacy_driver_rescan(const char* driverName)
{
RecursiveLocker locker(sLock);
legacy_driver* driver = sDriverHash->Lookup(driverName);
if (driver == NULL)
return B_ENTRY_NOT_FOUND;
// Republish the driver's entries
return republish_driver(driver);
}
extern "C" status_t
legacy_driver_probe(const char* subPath)
{
TRACE(("legacy_driver_probe(type = %s)\n", subPath));
char devicePath[64];
snprintf(devicePath, sizeof(devicePath), "drivers/dev%s%s",
subPath[0] ? "/" : "", subPath);
if (!sWatching && gBootDevice > 0) {
// We're probing the actual boot volume for the first time,
// let's watch its driver directories for changes
const directory_which whichPath[] = {
B_USER_NONPACKAGED_ADDONS_DIRECTORY,
B_USER_ADDONS_DIRECTORY,
B_SYSTEM_NONPACKAGED_ADDONS_DIRECTORY,
B_BEOS_ADDONS_DIRECTORY
};
KPath path;
new(&sDirectoryWatcher) DirectoryWatcher;
bool disableUserAddOns = get_safemode_boolean(
B_SAFEMODE_DISABLE_USER_ADD_ONS, false);
for (uint32 i = 0; i < sizeof(whichPath) / sizeof(whichPath[0]); i++) {
if (i < 2 && disableUserAddOns)
continue;
if (__find_directory(whichPath[i], gBootDevice, true,
path.LockBuffer(), path.BufferSize()) == B_OK) {
path.UnlockBuffer();
path.Append("kernel/drivers");
start_watching(path.Path(), "dev");
start_watching(path.Path(), "bin");
} else
path.UnlockBuffer();
}
sWatching = true;
}
return probe_for_drivers(devicePath);
}
extern "C" status_t
legacy_driver_init(void)
{
sDriverHash = new DriverTable();
if (sDriverHash == NULL || sDriverHash->Init(DRIVER_HASH_SIZE) != B_OK)
return B_NO_MEMORY;
sDriverWatcherEventSemaphore = create_sem(0, "driver_event_sem");
ASSERT_ALWAYS(sDriverWatcherEventSemaphore >= 0);
recursive_lock_init(&sLock, "legacy driver");
new(&sDriverWatcher) DriverWatcher;
new(&sDriverEvents) DriverEventList;
thread_id thread = spawn_kernel_thread(handle_driver_events, "driver_events", B_LOW_PRIORITY, nullptr);
ASSERT_ALWAYS(thread >= 0);
resume_thread(thread);
add_debugger_command("legacy_driver", &dump_driver,
"info about a legacy driver entry");
add_debugger_command("legacy_device", &dump_device,
"info about a legacy device");
return B_OK;
}
|
/**
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "identifier_evaluator.h"
#include "field_evaluator.h"
#include "instance_field_reader.h"
#include "local_variable_reader.h"
#include "messages.h"
#include "model.h"
#include "readers_factory.h"
#include "static_field_reader.h"
namespace devtools {
namespace cdbg {
IdentifierEvaluator::IdentifierEvaluator(std::string identifier_name)
: identifier_name_(identifier_name) {}
IdentifierEvaluator::~IdentifierEvaluator() {
if (static_field_reader_ != nullptr) {
static_field_reader_->ReleaseRef();
}
}
bool IdentifierEvaluator::Compile(
ReadersFactory* readers_factory,
FormatMessageModel* error_message) {
*error_message = FormatMessageModel();
// Case 1: this is a local variable.
FormatMessageModel local_variable_message;
variable_reader_ = readers_factory->CreateLocalVariableReader(
identifier_name_,
&local_variable_message);
if (variable_reader_ != nullptr) {
result_type_ = variable_reader_->GetStaticType();
computer_ = &IdentifierEvaluator::LocalVariableComputer;
return true;
}
// Case 2: implicitly referenced instance field ("myInt" is equivalent to
// "this.myInt" unless we are in a static method).
FormatMessageModel local_instance_message;
std::unique_ptr<LocalVariableReader> local_instance_reader =
readers_factory->CreateLocalInstanceReader();
if (local_instance_reader != nullptr) {
auto chain = CreateInstanceFieldReadersChain(
readers_factory,
local_instance_reader->GetStaticType().object_signature,
identifier_name_,
&local_instance_message);
if (!chain.empty()) {
variable_reader_ = std::move(local_instance_reader);
instance_fields_chain_ = std::move(chain);
result_type_ = instance_fields_chain_.back()->GetStaticType();
computer_ = &IdentifierEvaluator::ImplicitInstanceFieldComputer;
return true;
}
// "Field not found" error is considered non-specific here.
if (local_instance_message.format == InstanceFieldNotFound) {
local_instance_message = FormatMessageModel();
}
}
// Case 3: static variable in the class containing the current evaluation
// point.
FormatMessageModel static_field_message;
std::unique_ptr<StaticFieldReader> static_field_reader =
readers_factory->CreateStaticFieldReader(
identifier_name_,
&static_field_message);
if (static_field_reader != nullptr) {
static_field_reader_ = std::move(static_field_reader);
result_type_ = static_field_reader_->GetStaticType();
computer_ = &IdentifierEvaluator::StaticFieldComputer;
return true;
}
// Choose the most specific message defaulting to "invalid
// identifier".
const FormatMessageModel* messages[] = {
&local_variable_message,
&local_instance_message,
&static_field_message
};
for (const FormatMessageModel* message : messages) {
if (!message->format.empty() &&
message->format != InvalidIdentifier) {
*error_message = *message;
break;
}
}
if (error_message->format.empty()) {
*error_message = { InvalidIdentifier, { identifier_name_ } };
}
return false;
}
ErrorOr<JVariant> IdentifierEvaluator::Evaluate(
const EvaluationContext& evaluation_context) const {
return (this->*computer_)(evaluation_context);
}
ErrorOr<JVariant> IdentifierEvaluator::LocalVariableComputer(
const EvaluationContext& evaluation_context) const {
JVariant result;
FormatMessageModel error;
if (!variable_reader_->ReadValue(evaluation_context, &result, &error)) {
return error;
}
return std::move(result);
}
ErrorOr<JVariant> IdentifierEvaluator::ImplicitInstanceFieldComputer(
const EvaluationContext& evaluation_context) const {
JVariant result;
FormatMessageModel error;
if (!variable_reader_->ReadValue(evaluation_context, &result, &error)) {
return error;
}
for (const auto& reader : instance_fields_chain_) {
jobject source_jobject = nullptr;
if (!result.get<jobject>(&source_jobject)) {
return INTERNAL_ERROR_MESSAGE;
}
if (source_jobject == nullptr) {
// Attempt to dereference null object.
return FormatMessageModel { NullPointerDereference };
}
JVariant next;
FormatMessageModel error;
if (!reader->ReadValue(source_jobject, &next, &error)) {
return error;
}
result = std::move(next);
}
return std::move(result);
}
ErrorOr<JVariant> IdentifierEvaluator::StaticFieldComputer(
const EvaluationContext& evaluation_context) const {
JVariant result;
FormatMessageModel error;
if (!static_field_reader_->ReadValue(&result, &error)) {
return error;
}
return std::move(result);
}
} // namespace cdbg
} // namespace devtools
|
#ifndef DARKSTARDTSCONVERTER_PAL_VIEW_HPP
#define DARKSTARDTSCONVERTER_PAL_VIEW_HPP
#include "graphics_view.hpp"
namespace studio::views
{
class pal_view
{
public:
explicit pal_view(std::basic_istream<std::byte>& image_stream);
std::map<sf::Keyboard::Key, std::reference_wrapper<std::function<void(const sf::Event&)>>> get_callbacks() { return {}; }
void setup_view(wxWindow& parent, sf::RenderWindow& window, ImGuiContext& guiContext);
void render_gl(wxWindow& parent, sf::RenderWindow& window, ImGuiContext& guiContext) {}
void render_ui(wxWindow& parent, sf::RenderWindow& window, ImGuiContext& guiContext);
private:
std::vector<sf::RectangleShape>* rectangles = nullptr;
std::vector<std::vector<sf::RectangleShape>> all_rectangles;
};
}// namespace studio::views
#endif//DARKSTARDTSCONVERTER_PAL_VIEW_HPP
|
/*
* Copyright 2019 Google LLC. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "util/vectorutils.h"
namespace cardboard {
// Returns the dot (inner) product of two Vectors.
double Dot(const Vector<3>& v0, const Vector<3>& v1) {
return v0[0] * v1[0] + v0[1] * v1[1] + v0[2] * v1[2];
}
// Returns the dot (inner) product of two Vectors.
double Dot(const Vector<4>& v0, const Vector<4>& v1) {
return v0[0] * v1[0] + v0[1] * v1[1] + v0[2] * v1[2] + v0[3] * v1[3];
}
// Returns the 3-dimensional cross product of 2 Vectors. Note that this is
// defined only for 3-dimensional Vectors.
Vector<3> Cross(const Vector<3>& v0, const Vector<3>& v1) {
return Vector<3>(v0[1] * v1[2] - v0[2] * v1[1], v0[2] * v1[0] - v0[0] * v1[2],
v0[0] * v1[1] - v0[1] * v1[0]);
}
} // namespace cardboard
|
#include "FBRenderer.h"
#include <exception>
#include <iostream>
#include "IRenderer.h"
// for windows
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
using namespace fb;
typedef fb::IRenderer* (*CreateRendererD3D12)();
IRenderer* fb::InitRenderer(RendererType type, void* windowHandle)
{
std::wcout << L"fb::InitRenderer" << std::endl;
switch (type)
{
case RendererType::D3D12:
{
HMODULE hmodule = LoadLibrary("FBRendererD3D12.dll");
if (!hmodule)
{
std::wcout << L"Cannot load FBRendererD3D12.dll" << std::endl;
return nullptr;
}
auto createFunc = (CreateRendererD3D12)GetProcAddress(hmodule, "CreateRendererD3D12");
auto renderer = createFunc();
if (!renderer)
{
std::wcout << L"Failed to create Renderer." << std::endl;
return nullptr;
}
renderer->Initialize(windowHandle);
return renderer;
}
}
std::wcout << L"Failed to initialize renderer!" << std::endl;
return nullptr;
}
void fb::FinalizeRenderer(IRenderer*& renderer)
{
renderer->Finalize();
renderer = nullptr;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.